1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
27 * Please send any bug reports or fixes you make to the
29 * lksctp developers <linux-sctp@vger.kernel.org>
31 * Written or modified by:
32 * Jon Grimm <jgrimm@us.ibm.com>
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Sridhar Samudrala <sri@us.ibm.com>
37 #include <linux/slab.h>
38 #include <linux/types.h>
39 #include <linux/skbuff.h>
41 #include <net/busy_poll.h>
42 #include <net/sctp/structs.h>
43 #include <net/sctp/sctp.h>
44 #include <net/sctp/sm.h>
46 /* Forward declarations for internal helpers. */
47 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
48 struct sctp_ulpevent *);
49 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
50 struct sctp_ulpevent *);
51 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
53 /* 1st Level Abstractions */
55 /* Initialize a ULP queue from a block of memory. */
56 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
57 struct sctp_association *asoc)
59 memset(ulpq, 0, sizeof(struct sctp_ulpq));
62 skb_queue_head_init(&ulpq->reasm);
63 skb_queue_head_init(&ulpq->lobby);
70 /* Flush the reassembly and ordering queues. */
71 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
74 struct sctp_ulpevent *event;
76 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
77 event = sctp_skb2event(skb);
78 sctp_ulpevent_free(event);
81 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
88 /* Dispose of a ulpqueue. */
89 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
91 sctp_ulpq_flush(ulpq);
94 /* Process an incoming DATA chunk. */
95 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
98 struct sk_buff_head temp;
99 struct sctp_ulpevent *event;
102 /* Create an event from the incoming chunk. */
103 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
107 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
108 event->ppid = chunk->subh.data_hdr->ppid;
110 /* Do reassembly if needed. */
111 event = sctp_ulpq_reasm(ulpq, event);
113 /* Do ordering if needed. */
114 if ((event) && (event->msg_flags & MSG_EOR)) {
115 /* Create a temporary list to collect chunks on. */
116 skb_queue_head_init(&temp);
117 __skb_queue_tail(&temp, sctp_event2skb(event));
119 event = sctp_ulpq_order(ulpq, event);
122 /* Send event to the ULP. 'event' is the sctp_ulpevent for
123 * very first SKB on the 'temp' list.
126 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
127 sctp_ulpq_tail_event(ulpq, event);
133 /* Add a new event for propagation to the ULP. */
134 /* Clear the partial delivery mode for this socket. Note: This
135 * assumes that no association is currently in partial delivery mode.
137 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
139 struct sctp_sock *sp = sctp_sk(sk);
141 if (atomic_dec_and_test(&sp->pd_mode)) {
142 /* This means there are no other associations in PD, so
143 * we can go ahead and clear out the lobby in one shot
145 if (!skb_queue_empty(&sp->pd_lobby)) {
146 skb_queue_splice_tail_init(&sp->pd_lobby,
147 &sk->sk_receive_queue);
151 /* There are other associations in PD, so we only need to
152 * pull stuff out of the lobby that belongs to the
153 * associations that is exiting PD (all of its notifications
156 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
157 struct sk_buff *skb, *tmp;
158 struct sctp_ulpevent *event;
160 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
161 event = sctp_skb2event(skb);
162 if (event->asoc == asoc) {
163 __skb_unlink(skb, &sp->pd_lobby);
164 __skb_queue_tail(&sk->sk_receive_queue,
174 /* Set the pd_mode on the socket and ulpq */
175 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
177 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
179 atomic_inc(&sp->pd_mode);
183 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
184 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
187 sctp_ulpq_reasm_drain(ulpq);
188 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
191 /* If the SKB of 'event' is on a list, it is the first such member
194 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
196 struct sock *sk = ulpq->asoc->base.sk;
197 struct sctp_sock *sp = sctp_sk(sk);
198 struct sk_buff_head *queue, *skb_list;
199 struct sk_buff *skb = sctp_event2skb(event);
202 skb_list = (struct sk_buff_head *) skb->prev;
204 /* If the socket is just going to throw this away, do not
205 * even try to deliver it.
207 if (sk->sk_shutdown & RCV_SHUTDOWN &&
208 (sk->sk_shutdown & SEND_SHUTDOWN ||
209 !sctp_ulpevent_is_notification(event)))
212 if (!sctp_ulpevent_is_notification(event)) {
213 sk_mark_napi_id(sk, skb);
214 sk_incoming_cpu_update(sk);
216 /* Check if the user wishes to receive this event. */
217 if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
220 /* If we are in partial delivery mode, post to the lobby until
221 * partial delivery is cleared, unless, of course _this_ is
222 * the association the cause of the partial delivery.
225 if (atomic_read(&sp->pd_mode) == 0) {
226 queue = &sk->sk_receive_queue;
229 /* If the association is in partial delivery, we
230 * need to finish delivering the partially processed
231 * packet before passing any other data. This is
232 * because we don't truly support stream interleaving.
234 if ((event->msg_flags & MSG_NOTIFICATION) ||
235 (SCTP_DATA_NOT_FRAG ==
236 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
237 queue = &sp->pd_lobby;
239 clear_pd = event->msg_flags & MSG_EOR;
240 queue = &sk->sk_receive_queue;
244 * If fragment interleave is enabled, we
245 * can queue this to the receive queue instead
248 if (sp->frag_interleave)
249 queue = &sk->sk_receive_queue;
251 queue = &sp->pd_lobby;
255 /* If we are harvesting multiple skbs they will be
256 * collected on a list.
259 skb_queue_splice_tail_init(skb_list, queue);
261 __skb_queue_tail(queue, skb);
263 /* Did we just complete partial delivery and need to get
264 * rolling again? Move pending data to the receive
268 sctp_ulpq_clear_pd(ulpq);
270 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
271 if (!sock_owned_by_user(sk))
272 sp->data_ready_signalled = 1;
273 sk->sk_data_ready(sk);
279 sctp_queue_purge_ulpevents(skb_list);
281 sctp_ulpevent_free(event);
286 /* 2nd Level Abstractions */
288 /* Helper function to store chunks that need to be reassembled. */
289 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
290 struct sctp_ulpevent *event)
293 struct sctp_ulpevent *cevent;
298 /* See if it belongs at the end. */
299 pos = skb_peek_tail(&ulpq->reasm);
301 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
305 /* Short circuit just dropping it at the end. */
306 cevent = sctp_skb2event(pos);
308 if (TSN_lt(ctsn, tsn)) {
309 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
313 /* Find the right place in this list. We store them by TSN. */
314 skb_queue_walk(&ulpq->reasm, pos) {
315 cevent = sctp_skb2event(pos);
318 if (TSN_lt(tsn, ctsn))
322 /* Insert before pos. */
323 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
327 /* Helper function to return an event corresponding to the reassembled
329 * This routine creates a re-assembled skb given the first and last skb's
330 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
331 * payload was fragmented on the way and ip had to reassemble them.
332 * We add the rest of skb's to the first skb's fraglist.
334 struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
335 struct sk_buff_head *queue,
336 struct sk_buff *f_frag,
337 struct sk_buff *l_frag)
340 struct sk_buff *new = NULL;
341 struct sctp_ulpevent *event;
342 struct sk_buff *pnext, *last;
343 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
345 /* Store the pointer to the 2nd skb */
346 if (f_frag == l_frag)
351 /* Get the last skb in the f_frag's frag_list if present. */
352 for (last = list; list; last = list, list = list->next)
355 /* Add the list of remaining fragments to the first fragments
361 if (skb_cloned(f_frag)) {
362 /* This is a cloned skb, we can't just modify
363 * the frag_list. We need a new skb to do that.
364 * Instead of calling skb_unshare(), we'll do it
365 * ourselves since we need to delay the free.
367 new = skb_copy(f_frag, GFP_ATOMIC);
369 return NULL; /* try again later */
371 sctp_skb_set_owner_r(new, f_frag->sk);
373 skb_shinfo(new)->frag_list = pos;
375 skb_shinfo(f_frag)->frag_list = pos;
378 /* Remove the first fragment from the reassembly queue. */
379 __skb_unlink(f_frag, queue);
381 /* if we did unshare, then free the old skb and re-assign */
391 /* Update the len and data_len fields of the first fragment. */
392 f_frag->len += pos->len;
393 f_frag->data_len += pos->len;
395 /* Remove the fragment from the reassembly queue. */
396 __skb_unlink(pos, queue);
398 /* Break if we have reached the last fragment. */
405 event = sctp_skb2event(f_frag);
406 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
412 /* Helper function to check if an incoming chunk has filled up the last
413 * missing fragment in a SCTP datagram and return the corresponding event.
415 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
418 struct sctp_ulpevent *cevent;
419 struct sk_buff *first_frag = NULL;
420 __u32 ctsn, next_tsn;
421 struct sctp_ulpevent *retval = NULL;
422 struct sk_buff *pd_first = NULL;
423 struct sk_buff *pd_last = NULL;
425 struct sctp_association *asoc;
428 /* Initialized to 0 just to avoid compiler warning message. Will
429 * never be used with this value. It is referenced only after it
430 * is set when we find the first fragment of a message.
434 /* The chunks are held in the reasm queue sorted by TSN.
435 * Walk through the queue sequentially and look for a sequence of
436 * fragmented chunks that complete a datagram.
437 * 'first_frag' and next_tsn are reset when we find a chunk which
438 * is the first fragment of a datagram. Once these 2 fields are set
439 * we expect to find the remaining middle fragments and the last
440 * fragment in order. If not, first_frag is reset to NULL and we
441 * start the next pass when we find another first fragment.
443 * There is a potential to do partial delivery if user sets
444 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
445 * to see if can do PD.
447 skb_queue_walk(&ulpq->reasm, pos) {
448 cevent = sctp_skb2event(pos);
451 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
452 case SCTP_DATA_FIRST_FRAG:
453 /* If this "FIRST_FRAG" is the first
454 * element in the queue, then count it towards
457 if (pos == ulpq->reasm.next) {
471 case SCTP_DATA_MIDDLE_FRAG:
472 if ((first_frag) && (ctsn == next_tsn)) {
482 case SCTP_DATA_LAST_FRAG:
483 if (first_frag && (ctsn == next_tsn))
493 /* Make sure we can enter partial deliver.
494 * We can trigger partial delivery only if framgent
495 * interleave is set, or the socket is not already
496 * in partial delivery.
498 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
499 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
502 cevent = sctp_skb2event(pd_first);
503 pd_point = sctp_sk(asoc->base.sk)->pd_point;
504 if (pd_point && pd_point <= pd_len) {
505 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
510 sctp_ulpq_set_pd(ulpq);
516 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
517 &ulpq->reasm, first_frag, pos);
519 retval->msg_flags |= MSG_EOR;
523 /* Retrieve the next set of fragments of a partial message. */
524 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
526 struct sk_buff *pos, *last_frag, *first_frag;
527 struct sctp_ulpevent *cevent;
528 __u32 ctsn, next_tsn;
530 struct sctp_ulpevent *retval;
532 /* The chunks are held in the reasm queue sorted by TSN.
533 * Walk through the queue sequentially and look for the first
534 * sequence of fragmented chunks.
537 if (skb_queue_empty(&ulpq->reasm))
540 last_frag = first_frag = NULL;
545 skb_queue_walk(&ulpq->reasm, pos) {
546 cevent = sctp_skb2event(pos);
549 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
550 case SCTP_DATA_FIRST_FRAG:
554 case SCTP_DATA_MIDDLE_FRAG:
559 } else if (next_tsn == ctsn) {
565 case SCTP_DATA_LAST_FRAG:
568 else if (ctsn != next_tsn)
578 /* We have the reassembled event. There is no need to look
582 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
583 &ulpq->reasm, first_frag, last_frag);
584 if (retval && is_last)
585 retval->msg_flags |= MSG_EOR;
591 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
594 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
595 struct sctp_ulpevent *event)
597 struct sctp_ulpevent *retval = NULL;
599 /* Check if this is part of a fragmented message. */
600 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
601 event->msg_flags |= MSG_EOR;
605 sctp_ulpq_store_reasm(ulpq, event);
607 retval = sctp_ulpq_retrieve_reassembled(ulpq);
611 /* Do not even bother unless this is the next tsn to
615 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
616 if (TSN_lte(ctsn, ctsnap))
617 retval = sctp_ulpq_retrieve_partial(ulpq);
623 /* Retrieve the first part (sequential fragments) for partial delivery. */
624 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
626 struct sk_buff *pos, *last_frag, *first_frag;
627 struct sctp_ulpevent *cevent;
628 __u32 ctsn, next_tsn;
629 struct sctp_ulpevent *retval;
631 /* The chunks are held in the reasm queue sorted by TSN.
632 * Walk through the queue sequentially and look for a sequence of
633 * fragmented chunks that start a datagram.
636 if (skb_queue_empty(&ulpq->reasm))
639 last_frag = first_frag = NULL;
643 skb_queue_walk(&ulpq->reasm, pos) {
644 cevent = sctp_skb2event(pos);
647 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
648 case SCTP_DATA_FIRST_FRAG:
657 case SCTP_DATA_MIDDLE_FRAG:
660 if (ctsn == next_tsn) {
667 case SCTP_DATA_LAST_FRAG:
679 /* We have the reassembled event. There is no need to look
683 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
684 &ulpq->reasm, first_frag, last_frag);
689 * Flush out stale fragments from the reassembly queue when processing
692 * RFC 3758, Section 3.6
694 * After receiving and processing a FORWARD TSN, the data receiver MUST
695 * take cautions in updating its re-assembly queue. The receiver MUST
696 * remove any partially reassembled message, which is still missing one
697 * or more TSNs earlier than or equal to the new cumulative TSN point.
698 * In the event that the receiver has invoked the partial delivery API,
699 * a notification SHOULD also be generated to inform the upper layer API
700 * that the message being partially delivered will NOT be completed.
702 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
704 struct sk_buff *pos, *tmp;
705 struct sctp_ulpevent *event;
708 if (skb_queue_empty(&ulpq->reasm))
711 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
712 event = sctp_skb2event(pos);
715 /* Since the entire message must be abandoned by the
716 * sender (item A3 in Section 3.5, RFC 3758), we can
717 * free all fragments on the list that are less then
718 * or equal to ctsn_point
720 if (TSN_lte(tsn, fwd_tsn)) {
721 __skb_unlink(pos, &ulpq->reasm);
722 sctp_ulpevent_free(event);
729 * Drain the reassembly queue. If we just cleared parted delivery, it
730 * is possible that the reassembly queue will contain already reassembled
731 * messages. Retrieve any such messages and give them to the user.
733 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
735 struct sctp_ulpevent *event = NULL;
736 struct sk_buff_head temp;
738 if (skb_queue_empty(&ulpq->reasm))
741 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
742 /* Do ordering if needed. */
743 if ((event) && (event->msg_flags & MSG_EOR)) {
744 skb_queue_head_init(&temp);
745 __skb_queue_tail(&temp, sctp_event2skb(event));
747 event = sctp_ulpq_order(ulpq, event);
750 /* Send event to the ULP. 'event' is the
751 * sctp_ulpevent for very first SKB on the temp' list.
754 sctp_ulpq_tail_event(ulpq, event);
759 /* Helper function to gather skbs that have possibly become
760 * ordered by an an incoming chunk.
762 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
763 struct sctp_ulpevent *event)
765 struct sk_buff_head *event_list;
766 struct sk_buff *pos, *tmp;
767 struct sctp_ulpevent *cevent;
768 struct sctp_stream *stream;
769 __u16 sid, csid, cssn;
772 stream = &ulpq->asoc->stream;
774 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
776 /* We are holding the chunks by stream, by SSN. */
777 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
778 cevent = (struct sctp_ulpevent *) pos->cb;
779 csid = cevent->stream;
782 /* Have we gone too far? */
786 /* Have we not gone far enough? */
790 if (cssn != sctp_ssn_peek(stream, in, sid))
793 /* Found it, so mark in the stream. */
794 sctp_ssn_next(stream, in, sid);
796 __skb_unlink(pos, &ulpq->lobby);
798 /* Attach all gathered skbs to the event. */
799 __skb_queue_tail(event_list, pos);
803 /* Helper function to store chunks needing ordering. */
804 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
805 struct sctp_ulpevent *event)
808 struct sctp_ulpevent *cevent;
812 pos = skb_peek_tail(&ulpq->lobby);
814 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
821 cevent = (struct sctp_ulpevent *) pos->cb;
822 csid = cevent->stream;
825 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
829 if ((sid == csid) && SSN_lt(cssn, ssn)) {
830 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
834 /* Find the right place in this list. We store them by
835 * stream ID and then by SSN.
837 skb_queue_walk(&ulpq->lobby, pos) {
838 cevent = (struct sctp_ulpevent *) pos->cb;
839 csid = cevent->stream;
844 if (csid == sid && SSN_lt(ssn, cssn))
849 /* Insert before pos. */
850 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
853 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
854 struct sctp_ulpevent *event)
857 struct sctp_stream *stream;
859 /* Check if this message needs ordering. */
860 if (event->msg_flags & SCTP_DATA_UNORDERED)
863 /* Note: The stream ID must be verified before this routine. */
866 stream = &ulpq->asoc->stream;
868 /* Is this the expected SSN for this stream ID? */
869 if (ssn != sctp_ssn_peek(stream, in, sid)) {
870 /* We've received something out of order, so find where it
871 * needs to be placed. We order by stream and then by SSN.
873 sctp_ulpq_store_ordered(ulpq, event);
877 /* Mark that the next chunk has been found. */
878 sctp_ssn_next(stream, in, sid);
880 /* Go find any other chunks that were waiting for
883 sctp_ulpq_retrieve_ordered(ulpq, event);
888 /* Helper function to gather skbs that have possibly become
889 * ordered by forward tsn skipping their dependencies.
891 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
893 struct sk_buff *pos, *tmp;
894 struct sctp_ulpevent *cevent;
895 struct sctp_ulpevent *event;
896 struct sctp_stream *stream;
897 struct sk_buff_head temp;
898 struct sk_buff_head *lobby = &ulpq->lobby;
901 stream = &ulpq->asoc->stream;
903 /* We are holding the chunks by stream, by SSN. */
904 skb_queue_head_init(&temp);
906 sctp_skb_for_each(pos, lobby, tmp) {
907 cevent = (struct sctp_ulpevent *) pos->cb;
908 csid = cevent->stream;
911 /* Have we gone too far? */
915 /* Have we not gone far enough? */
919 /* see if this ssn has been marked by skipping */
920 if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
923 __skb_unlink(pos, lobby);
925 /* Create a temporary list to collect chunks on. */
926 event = sctp_skb2event(pos);
928 /* Attach all gathered skbs to the event. */
929 __skb_queue_tail(&temp, pos);
932 /* If we didn't reap any data, see if the next expected SSN
933 * is next on the queue and if so, use that.
935 if (event == NULL && pos != (struct sk_buff *)lobby) {
936 cevent = (struct sctp_ulpevent *) pos->cb;
937 csid = cevent->stream;
940 if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
941 sctp_ssn_next(stream, in, csid);
942 __skb_unlink(pos, lobby);
943 __skb_queue_tail(&temp, pos);
944 event = sctp_skb2event(pos);
948 /* Send event to the ULP. 'event' is the sctp_ulpevent for
949 * very first SKB on the 'temp' list.
952 /* see if we have more ordered that we can deliver */
953 sctp_ulpq_retrieve_ordered(ulpq, event);
954 sctp_ulpq_tail_event(ulpq, event);
958 /* Skip over an SSN. This is used during the processing of
959 * Forwared TSN chunk to skip over the abandoned ordered data
961 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
963 struct sctp_stream *stream;
965 /* Note: The stream ID must be verified before this routine. */
966 stream = &ulpq->asoc->stream;
968 /* Is this an old SSN? If so ignore. */
969 if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
972 /* Mark that we are no longer expecting this SSN or lower. */
973 sctp_ssn_skip(stream, in, sid, ssn);
975 /* Go find any other chunks that were waiting for
976 * ordering and deliver them if needed.
978 sctp_ulpq_reap_ordered(ulpq, sid);
981 static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
982 struct sk_buff_head *list, __u16 needed)
986 struct sk_buff *skb, *flist, *last;
987 struct sctp_ulpevent *event;
988 struct sctp_tsnmap *tsnmap;
990 tsnmap = &ulpq->asoc->peer.tsn_map;
992 while ((skb = skb_peek_tail(list)) != NULL) {
993 event = sctp_skb2event(skb);
996 /* Don't renege below the Cumulative TSN ACK Point. */
997 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
1000 /* Events in ordering queue may have multiple fragments
1001 * corresponding to additional TSNs. Sum the total
1002 * freed space; find the last TSN.
1004 freed += skb_headlen(skb);
1005 flist = skb_shinfo(skb)->frag_list;
1006 for (last = flist; flist; flist = flist->next) {
1008 freed += skb_headlen(last);
1011 last_tsn = sctp_skb2event(last)->tsn;
1015 /* Unlink the event, then renege all applicable TSNs. */
1016 __skb_unlink(skb, list);
1017 sctp_ulpevent_free(event);
1018 while (TSN_lte(tsn, last_tsn)) {
1019 sctp_tsnmap_renege(tsnmap, tsn);
1022 if (freed >= needed)
1029 /* Renege 'needed' bytes from the ordering queue. */
1030 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1032 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1035 /* Renege 'needed' bytes from the reassembly queue. */
1036 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1038 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1041 /* Partial deliver the first message as there is pressure on rwnd. */
1042 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1045 struct sctp_ulpevent *event;
1046 struct sctp_association *asoc;
1047 struct sctp_sock *sp;
1049 struct sk_buff *skb;
1052 sp = sctp_sk(asoc->base.sk);
1054 /* If the association is already in Partial Delivery mode
1055 * we have nothing to do.
1060 /* Data must be at or below the Cumulative TSN ACK Point to
1061 * start partial delivery.
1063 skb = skb_peek(&asoc->ulpq.reasm);
1065 ctsn = sctp_skb2event(skb)->tsn;
1066 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1070 /* If the user enabled fragment interleave socket option,
1071 * multiple associations can enter partial delivery.
1072 * Otherwise, we can only enter partial delivery if the
1073 * socket is not in partial deliver mode.
1075 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1076 /* Is partial delivery possible? */
1077 event = sctp_ulpq_retrieve_first(ulpq);
1078 /* Send event to the ULP. */
1080 sctp_ulpq_tail_event(ulpq, event);
1081 sctp_ulpq_set_pd(ulpq);
1087 /* Renege some packets to make room for an incoming chunk. */
1088 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1091 struct sctp_association *asoc;
1092 __u16 needed, freed;
1097 needed = ntohs(chunk->chunk_hdr->length);
1098 needed -= sizeof(struct sctp_data_chunk);
1100 needed = SCTP_DEFAULT_MAXWINDOW;
1104 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1105 freed = sctp_ulpq_renege_order(ulpq, needed);
1106 if (freed < needed) {
1107 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1110 /* If able to free enough room, accept this chunk. */
1111 if (chunk && (freed >= needed)) {
1113 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1115 * Enter partial delivery if chunk has not been
1116 * delivered; otherwise, drain the reassembly queue.
1119 sctp_ulpq_partial_delivery(ulpq, gfp);
1120 else if (retval == 1)
1121 sctp_ulpq_reasm_drain(ulpq);
1124 sk_mem_reclaim(asoc->base.sk);
1129 /* Notify the application if an association is aborted and in
1130 * partial delivery mode. Send up any pending received messages.
1132 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1134 struct sctp_ulpevent *ev = NULL;
1136 struct sctp_sock *sp;
1141 sk = ulpq->asoc->base.sk;
1143 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1144 &sctp_sk(sk)->subscribe))
1145 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1146 SCTP_PARTIAL_DELIVERY_ABORTED,
1149 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1151 /* If there is data waiting, send it up the socket now. */
1152 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1153 sp->data_ready_signalled = 1;
1154 sk->sk_data_ready(sk);