summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-08-09 19:25:21 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 15:31:14 -0700
commit8728b834b226ffcf2c94a58530090e292af2a7bf (patch)
tree2fd51ff3b7097eb3ffc41ea3a1d8b3ba04715b4c /net
parent6869c4d8e066e21623c812c448a05f1ed931c9c6 (diff)
[NET]: Kill skb->list
Remove the "list" member of struct sk_buff, as it is entirely redundant. All SKB list removal callers know which list the SKB is on, so storing this in sk_buff does nothing other than taking up some space. Two tricky bits were SCTP, which I took care of, and two ATM drivers which Francois Romieu <romieu@fr.zoreil.com> fixed up. Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
Diffstat (limited to 'net')
-rw-r--r--net/atm/ipcommon.c3
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/core/skbuff.c57
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c29
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/irda/irlap_frame.c6
-rw-r--r--net/lapb/lapb_subr.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c6
-rw-r--r--net/netrom/nr_subr.c2
-rw-r--r--net/rose/rose_subr.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/ulpqueue.c63
-rw-r--r--net/unix/garbage.c12
-rw-r--r--net/x25/x25_subr.c2
19 files changed, 105 insertions, 103 deletions
diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c
index 181a3002d8ad..4b1faca5013f 100644
--- a/net/atm/ipcommon.c
+++ b/net/atm/ipcommon.c
@@ -34,7 +34,6 @@
void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
{
- struct sk_buff *skb;
unsigned long flags;
struct sk_buff *skb_from = (struct sk_buff *) from;
struct sk_buff *skb_to = (struct sk_buff *) to;
@@ -47,8 +46,6 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to)
prev->next = skb_to;
to->prev->next = from->next;
to->prev = from->prev;
- for (skb = from->next; skb != skb_to; skb = skb->next)
- skb->list = to;
to->qlen += from->qlen;
spin_unlock(&to->lock);
from->prev = skb_from;
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 99694b57f6f5..eb7343c10a9f 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -76,7 +76,7 @@ void ax25_requeue_frames(ax25_cb *ax25)
if (skb_prev == NULL)
skb_queue_head(&ax25->write_queue, skb);
else
- skb_append(skb_prev, skb);
+ skb_append(skb_prev, skb, &ax25->write_queue);
skb_prev = skb;
}
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 096991cb09d9..e6564b0a6839 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -281,8 +281,6 @@ void kfree_skbmem(struct sk_buff *skb)
void __kfree_skb(struct sk_buff *skb)
{
- BUG_ON(skb->list != NULL);
-
dst_release(skb->dst);
#ifdef CONFIG_XFRM
secpath_put(skb->sp);
@@ -333,7 +331,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
#define C(x) n->x = skb->x
n->next = n->prev = NULL;
- n->list = NULL;
n->sk = NULL;
C(stamp);
C(dev);
@@ -403,7 +400,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
*/
unsigned long offset = new->data - old->data;
- new->list = NULL;
new->sk = NULL;
new->dev = old->dev;
new->real_dev = old->real_dev;
@@ -1342,50 +1338,43 @@ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
__skb_queue_tail(list, newsk);
spin_unlock_irqrestore(&list->lock, flags);
}
+
/**
* skb_unlink - remove a buffer from a list
* @skb: buffer to remove
+ * @list: list to use
*
- * Place a packet after a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls
+ * Remove a packet from a list. The list locks are taken and this
+ * function is atomic with respect to other list locked calls
*
- * Works even without knowing the list it is sitting on, which can be
- * handy at times. It also means that THE LIST MUST EXIST when you
- * unlink. Thus a list must have its contents unlinked before it is
- * destroyed.
+ * You must know what list the SKB is on.
*/
-void skb_unlink(struct sk_buff *skb)
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
- struct sk_buff_head *list = skb->list;
-
- if (list) {
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&list->lock, flags);
- if (skb->list == list)
- __skb_unlink(skb, skb->list);
- spin_unlock_irqrestore(&list->lock, flags);
- }
+ spin_lock_irqsave(&list->lock, flags);
+ __skb_unlink(skb, list);
+ spin_unlock_irqrestore(&list->lock, flags);
}
-
/**
* skb_append - append a buffer
* @old: buffer to insert after
* @newsk: buffer to insert
+ * @list: list to use
*
* Place a packet after a given packet in a list. The list locks are taken
* and this function is atomic with respect to other list locked calls.
* A buffer cannot be placed on two lists at the same time.
*/
-
-void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
unsigned long flags;
- spin_lock_irqsave(&old->list->lock, flags);
- __skb_append(old, newsk);
- spin_unlock_irqrestore(&old->list->lock, flags);
+ spin_lock_irqsave(&list->lock, flags);
+ __skb_append(old, newsk, list);
+ spin_unlock_irqrestore(&list->lock, flags);
}
@@ -1393,19 +1382,21 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
* skb_insert - insert a buffer
* @old: buffer to insert before
* @newsk: buffer to insert
+ * @list: list to use
+ *
+ * Place a packet before a given packet in a list. The list locks are
+ * taken and this function is atomic with respect to other list locked
+ * calls.
*
- * Place a packet before a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls
* A buffer cannot be placed on two lists at the same time.
*/
-
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
{
unsigned long flags;
- spin_lock_irqsave(&old->list->lock, flags);
- __skb_insert(newsk, old->prev, old, old->list);
- spin_unlock_irqrestore(&old->list->lock, flags);
+ spin_lock_irqsave(&list->lock, flags);
+ __skb_insert(newsk, old->prev, old, list);
+ spin_unlock_irqrestore(&list->lock, flags);
}
#if 0
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index acdd18e6adb2..0c30409fe9e5 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1763,7 +1763,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
nskb = skb->next;
if (skb->len == 0) {
- skb_unlink(skb);
+ skb_unlink(skb, queue);
kfree_skb(skb);
/*
* N.B. Don't refer to skb or cb after this point
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 8cce1fdbda90..e0bebf4bbcad 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -479,7 +479,7 @@ int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff
xmit_count = cb2->xmit_count;
segnum = cb2->segnum;
/* Remove and drop ack'ed packet */
- skb_unlink(ack);
+ skb_unlink(ack, q);
kfree_skb(ack);
ack = NULL;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index de691e119e17..b807a314269e 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -869,7 +869,7 @@ static void aun_tx_ack(unsigned long seq, int result)
foundit:
tx_result(skb->sk, eb->cookie, result);
- skb_unlink(skb);
+ skb_unlink(skb, &aun_queue);
spin_unlock_irqrestore(&aun_queue_lock, flags);
kfree_skb(skb);
}
@@ -947,7 +947,7 @@ static void ab_cleanup(unsigned long h)
{
tx_result(skb->sk, eb->cookie,
ECTYPE_TRANSMIT_NOT_PRESENT);
- skb_unlink(skb);
+ skb_unlink(skb, &aun_queue);
kfree_skb(skb);
}
skb = newskb;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 69b1fcf70077..d2696af46c70 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -975,7 +975,7 @@ do_fault:
if (!skb->len) {
if (sk->sk_send_head == skb)
sk->sk_send_head = NULL;
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &sk->sk_write_queue);
sk_stream_free_skb(sk, skb);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 53a8a5399f1e..ffa24025cd02 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2085,7 +2085,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
seq_rtt = now - scb->when;
tcp_dec_pcount_approx(&tp->fackets_out, skb);
tcp_packets_out_dec(tp, skb);
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &sk->sk_write_queue);
sk_stream_free_skb(sk, skb);
}
@@ -2853,7 +2853,7 @@ static void tcp_ofo_queue(struct sock *sk)
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
SOCK_DEBUG(sk, "ofo packet was already received \n");
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &tp->out_of_order_queue);
__kfree_skb(skb);
continue;
}
@@ -2861,7 +2861,7 @@ static void tcp_ofo_queue(struct sock *sk)
tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq);
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &tp->out_of_order_queue);
__skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if(skb->h.th->fin)
@@ -3027,7 +3027,7 @@ drop:
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (seq == TCP_SKB_CB(skb1)->end_seq) {
- __skb_append(skb1, skb);
+ __skb_append(skb1, skb, &tp->out_of_order_queue);
if (!tp->rx_opt.num_sacks ||
tp->selective_acks[0].end_seq != seq)
@@ -3071,7 +3071,7 @@ drop:
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
break;
}
- __skb_unlink(skb1, skb1->list);
+ __skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
__kfree_skb(skb1);
}
@@ -3088,8 +3088,9 @@ add_sack:
* simplifies code)
*/
static void
-tcp_collapse(struct sock *sk, struct sk_buff *head,
- struct sk_buff *tail, u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list,
+ struct sk_buff *head, struct sk_buff *tail,
+ u32 start, u32 end)
{
struct sk_buff *skb;
@@ -3099,7 +3100,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next;
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next;
@@ -3145,7 +3146,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
- __skb_insert(nskb, skb->prev, skb, skb->list);
+ __skb_insert(nskb, skb->prev, skb, list);
sk_stream_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
@@ -3164,7 +3165,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head,
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
struct sk_buff *next = skb->next;
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
skb = next;
@@ -3200,7 +3201,8 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
- tcp_collapse(sk, head, skb, start, end);
+ tcp_collapse(sk, &tp->out_of_order_queue,
+ head, skb, start, end);
head = skb;
if (skb == (struct sk_buff *)&tp->out_of_order_queue)
break;
@@ -3237,7 +3239,8 @@ static int tcp_prune_queue(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk);
- tcp_collapse(sk, sk->sk_receive_queue.next,
+ tcp_collapse(sk, &sk->sk_receive_queue,
+ sk->sk_receive_queue.next,
(struct sk_buff*)&sk->sk_receive_queue,
tp->copied_seq, tp->rcv_nxt);
sk_stream_mem_reclaim(sk);
@@ -3462,7 +3465,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
tp->copied_seq++;
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dd30dd137b74..a4d1eb9a0926 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -505,7 +505,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
/* Link BUFF into the send queue. */
skb_header_release(buff);
- __skb_append(skb, buff);
+ __skb_append(skb, buff, &sk->sk_write_queue);
return 0;
}
@@ -893,7 +893,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
/* Link BUFF into the send queue. */
skb_header_release(buff);
- __skb_append(skb, buff);
+ __skb_append(skb, buff, &sk->sk_write_queue);
return 0;
}
@@ -1238,7 +1238,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
tcp_skb_pcount(next_skb) != 1);
/* Ok. We will be able to collapse the packet. */
- __skb_unlink(next_skb, next_skb->list);
+ __skb_unlink(next_skb, &sk->sk_write_queue);
memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 6dafbb43b529..eb65b4925b51 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -988,9 +988,6 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command)
IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
return;
}
- /* Unlink tx_skb from list */
- tx_skb->next = tx_skb->prev = NULL;
- tx_skb->list = NULL;
/* Clear old Nr field + poll bit */
tx_skb->data[1] &= 0x0f;
@@ -1063,9 +1060,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command)
IRDA_DEBUG(0, "%s(), unable to copy\n", __FUNCTION__);
return;
}
- /* Unlink tx_skb from list */
- tx_skb->next = tx_skb->prev = NULL;
- tx_skb->list = NULL;
/* Clear old Nr field + poll bit */
tx_skb->data[1] &= 0x0f;
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c
index 5de05a0bc0ff..8b5eefd70f03 100644
--- a/net/lapb/lapb_subr.c
+++ b/net/lapb/lapb_subr.c
@@ -78,7 +78,7 @@ void lapb_requeue_frames(struct lapb_cb *lapb)
if (!skb_prev)
skb_queue_head(&lapb->write_queue, skb);
else
- skb_append(skb_prev, skb);
+ skb_append(skb_prev, skb, &lapb->write_queue);
skb_prev = skb;
}
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 20b4cfebd74c..f49b82da8264 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -714,7 +714,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
if (uaddr)
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
msg->msg_namelen = sizeof(*uaddr);
- if (!skb->list) {
+ if (!skb->next) {
dgram_free:
kfree_skb(skb);
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index eba812a9c69c..571548619469 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -71,7 +71,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
if (!ev->ind_prim && !ev->cfm_prim) {
/* indicate or confirm not required */
- if (!skb->list)
+ /* XXX this is not very pretty, perhaps we should store
+ * XXX indicate/confirm-needed state in the llc_conn_state_ev
+ * XXX control block of the SKB instead? -DaveM
+ */
+ if (!skb->next)
goto out_kfree_skb;
goto out_skb_put;
}
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index 0627347b14b8..252c1b3ecd78 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -77,7 +77,7 @@ void nr_requeue_frames(struct sock *sk)
if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb);
else
- skb_append(skb_prev, skb);
+ skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7db7e1cedc3a..ae135e27799b 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -74,7 +74,7 @@ void rose_requeue_frames(struct sock *sk)
if (skb_prev == NULL)
skb_queue_head(&sk->sk_write_queue, skb);
else
- skb_append(skb_prev, skb);
+ skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 091a66f06a35..4454afe4727e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4892,7 +4892,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
}
}
@@ -4921,7 +4921,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- __skb_unlink(skb, skb->list);
+ __skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
}
}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 8bbc279d6c99..ec2c857eae7f 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -50,9 +50,9 @@
/* Forward declarations for internal helpers. */
static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *);
+ struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
- struct sctp_ulpevent *);
+ struct sctp_ulpevent *);
/* 1st Level Abstractions */
@@ -125,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
event = sctp_ulpq_order(ulpq, event);
}
- /* Send event to the ULP. */
+ /* Send event to the ULP. 'event' is the sctp_ulpevent for
+ * very first SKB on the 'temp' list.
+ */
if (event)
sctp_ulpq_tail_event(ulpq, event);
@@ -158,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
return sctp_clear_pd(ulpq->asoc->base.sk);
}
-
-
+/* If the SKB of 'event' is on a list, it is the first such member
+ * of that list.
+ */
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
- struct sk_buff_head *queue;
+ struct sk_buff_head *queue, *skb_list;
+ struct sk_buff *skb = sctp_event2skb(event);
int clear_pd = 0;
+ skb_list = (struct sk_buff_head *) skb->prev;
+
/* If the socket is just going to throw this away, do not
* even try to deliver it.
*/
@@ -197,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
- if (sctp_event2skb(event)->list)
- sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
+ if (skb_list)
+ sctp_skb_list_tail(skb_list, queue);
else
- __skb_queue_tail(queue, sctp_event2skb(event));
+ __skb_queue_tail(queue, skb);
/* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive
@@ -214,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
return 1;
out_free:
- if (sctp_event2skb(event)->list)
- sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
+ if (skb_list)
+ sctp_queue_purge_ulpevents(skb_list);
else
sctp_ulpevent_free(event);
+
return 0;
}
@@ -269,7 +276,7 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
-static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
+static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
struct sctp_ulpevent *event;
@@ -294,7 +301,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
skb_shinfo(f_frag)->frag_list = pos;
/* Remove the first fragment from the reassembly queue. */
- __skb_unlink(f_frag, f_frag->list);
+ __skb_unlink(f_frag, queue);
while (pos) {
pnext = pos->next;
@@ -304,7 +311,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag,
f_frag->data_len += pos->len;
/* Remove the fragment from the reassembly queue. */
- __skb_unlink(pos, pos->list);
+ __skb_unlink(pos, queue);
/* Break if we have reached the last fragment. */
if (pos == l_frag)
@@ -375,7 +382,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u
done:
return retval;
found:
- retval = sctp_make_reassembled_event(first_frag, pos);
+ retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
if (retval)
retval->msg_flags |= MSG_EOR;
goto done;
@@ -435,7 +442,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq
* further.
*/
done:
- retval = sctp_make_reassembled_event(first_frag, last_frag);
+ retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
if (retval && is_last)
retval->msg_flags |= MSG_EOR;
@@ -527,7 +534,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u
* further.
*/
done:
- retval = sctp_make_reassembled_event(first_frag, last_frag);
+ retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
return retval;
}
@@ -537,6 +544,7 @@ done:
static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
+ struct sk_buff_head *event_list;
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
struct sctp_stream *in;
@@ -547,6 +555,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
ssn = event->ssn;
in = &ulpq->asoc->ssnmap->in;
+ event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
+
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
@@ -567,10 +577,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
- __skb_unlink(pos, pos->list);
+ __skb_unlink(pos, &ulpq->lobby);
/* Attach all gathered skbs to the event. */
- __skb_queue_tail(sctp_event2skb(event)->list, pos);
+ __skb_queue_tail(event_list, pos);
}
}
@@ -626,7 +636,7 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
}
static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
- struct sctp_ulpevent *event)
+ struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
@@ -667,7 +677,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
- struct sctp_ulpevent *event = NULL;
+ struct sctp_ulpevent *event;
struct sctp_stream *in;
struct sk_buff_head temp;
__u16 csid, cssn;
@@ -675,6 +685,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */
+ skb_queue_head_init(&temp);
+ event = NULL;
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
@@ -686,19 +698,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, csid);
- __skb_unlink(pos, pos->list);
+ __skb_unlink(pos, &ulpq->lobby);
if (!event) {
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
- skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
} else {
/* Attach all gathered skbs to the event. */
- __skb_queue_tail(sctp_event2skb(event)->list, pos);
+ __skb_queue_tail(&temp, pos);
}
}
- /* Send event to the ULP. */
+ /* Send event to the ULP. 'event' is the sctp_ulpevent for
+ * very first SKB on the 'temp' list.
+ */
if (event)
sctp_ulpq_tail_event(ulpq, event);
}
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 4bd95c8f5934..46252d2807bb 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -286,16 +286,16 @@ void unix_gc(void)
skb = skb_peek(&s->sk_receive_queue);
while (skb &&
skb != (struct sk_buff *)&s->sk_receive_queue) {
- nextsk=skb->next;
+ nextsk = skb->next;
/*
* Do we have file descriptors ?
*/
- if(UNIXCB(skb).fp)
- {
- __skb_unlink(skb, skb->list);
- __skb_queue_tail(&hitlist,skb);
+ if (UNIXCB(skb).fp) {
+ __skb_unlink(skb,
+ &s->sk_receive_queue);
+ __skb_queue_tail(&hitlist, skb);
}
- skb=nextsk;
+ skb = nextsk;
}
spin_unlock(&s->sk_receive_queue.lock);
}
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index 7fd872ad0c20..e20cfadad4d9 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -80,7 +80,7 @@ void x25_requeue_frames(struct sock *sk)
if (!skb_prev)
skb_queue_head(&sk->sk_write_queue, skb);
else
- skb_append(skb_prev, skb);
+ skb_append(skb_prev, skb, &sk->sk_write_queue);
skb_prev = skb;
}
}