summaryrefslogtreecommitdiff
path: root/include/linux/skb_array.h
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2017-06-08 14:14:40 +0200
committerJohannes Berg <johannes.berg@intel.com>2017-06-08 14:14:45 +0200
commita43e61842ec55baa486d60eed2a19af67ba78b9f (patch)
tree6c3c93f19e3933273b73b9edd16edc146ab18527 /include/linux/skb_array.h
parent5d473fedd17ae3a9f92fb35551e307d01459ea6a (diff)
parent50dffe7fad6c156c2928e45c19ff7b86eb951f4c (diff)
Merge remote-tracking branch 'net-next/master' into mac80211-next
This brings in commit 7a7c0a6438b8 ("mac80211: fix TX aggregation start/stop callback race") to allow the follow-up cleanup. Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'include/linux/skb_array.h')
-rw-r--r--include/linux/skb_array.h31
1 files changed, 31 insertions, 0 deletions
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
index f4dfade428f0..35226cd4efb0 100644
--- a/include/linux/skb_array.h
+++ b/include/linux/skb_array.h
@@ -97,21 +97,46 @@ static inline struct sk_buff *skb_array_consume(struct skb_array *a)
return ptr_ring_consume(&a->ring);
}
+static inline int skb_array_consume_batched(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
{
return ptr_ring_consume_irq(&a->ring);
}
+static inline int skb_array_consume_batched_irq(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
+}
+
static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
{
return ptr_ring_consume_any(&a->ring);
}
+static inline int skb_array_consume_batched_any(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
+}
+
+
static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
{
return ptr_ring_consume_bh(&a->ring);
}
+static inline int skb_array_consume_batched_bh(struct skb_array *a,
+ struct sk_buff **array, int n)
+{
+ return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
+}
+
static inline int __skb_array_len_with_tag(struct sk_buff *skb)
{
if (likely(skb)) {
@@ -156,6 +181,12 @@ static void __skb_array_destroy_skb(void *ptr)
kfree_skb(ptr);
}
+static inline void skb_array_unconsume(struct skb_array *a,
+ struct sk_buff **skbs, int n)
+{
+ ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
+}
+
static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
{
return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);