3 * Packet buffer management
5 * Packets are built from the pbuf data structure. It supports dynamic
6 * memory allocation for packet contents or can reference externally
7 * managed packet contents both in RAM and ROM. Quick allocation for
8 * incoming packets is provided through pools with fixed sized pbufs.
10 * A packet may span over multiple pbufs, chained as a singly linked
11 * list. This is called a "pbuf chain".
13 * Multiple packets may be queued, also using this singly linked list.
14 * This is called a "packet queue".
16 * So, a packet queue consists of one or more pbuf chains, each of
17 * which consist of one or more pbufs. Currently, queues are only
18 * supported in a limited section of lwIP, this is the etharp queueing
19 * code. Outside of this section no packet queues are supported yet.
21 * The differences between a pbuf chain and a packet queue are very
24 * The last pbuf of a packet has a ->tot_len field that equals the
25 * ->len field. It can be found by traversing the list. If the last
26 * pbuf of a packet has a ->next field other than NULL, more packets
29 * Therefore, looping through a pbuf of a single packet, has an
30 * loop end condition (tot_len == p->len), NOT (next == NULL).
34 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without modification,
38 * are permitted provided that the following conditions are met:
40 * 1. Redistributions of source code must retain the above copyright notice,
41 * this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright notice,
43 * this list of conditions and the following disclaimer in the documentation
44 * and/or other materials provided with the distribution.
45 * 3. The name of the author may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
49 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
51 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
52 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
53 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
59 * This file is part of the lwIP TCP/IP stack.
61 * Author: Adam Dunkels <adam@sics.se>
67 #include "lwip/stats.h"
71 #include "lwip/memp.h"
72 #include "lwip/pbuf.h"
76 #include "arch/perf.h"
78 static u8_t pbuf_pool_memory[(PBUF_POOL_SIZE * MEM_ALIGN_SIZE(PBUF_POOL_BUFSIZE + sizeof(struct pbuf)))];
80 #if !SYS_LIGHTWEIGHT_PROT
81 static volatile u8_t pbuf_pool_free_lock, pbuf_pool_alloc_lock;
82 static sys_sem_t pbuf_pool_free_sem;
85 static struct pbuf *pbuf_pool = NULL;
88 * Initializes the pbuf module.
90 * A large part of memory is allocated for holding the pool of pbufs.
91 * The size of the individual pbufs in the pool is given by the size
92 * parameter, and the number of pbufs in the pool by the num parameter.
94 * After the memory has been allocated, the pbufs are set up. The
95 * ->next pointer in each pbuf is set up to point to the next pbuf in
102 struct pbuf *p, *q = NULL;
105 pbuf_pool = (struct pbuf *)&pbuf_pool_memory[0];
106 LWIP_ASSERT("pbuf_init: pool aligned", (mem_ptr_t)pbuf_pool % MEM_ALIGNMENT == 0);
109 lwip_stats.pbuf.avail = PBUF_POOL_SIZE;
110 #endif /* PBUF_STATS */
112 /* Set up ->next pointers to link the pbufs of the pool together */
115 for(i = 0; i < PBUF_POOL_SIZE; ++i) {
116 p->next = (struct pbuf *)((u8_t *)p + PBUF_POOL_BUFSIZE + sizeof(struct pbuf));
117 p->len = p->tot_len = PBUF_POOL_BUFSIZE;
118 p->payload = MEM_ALIGN((void *)((u8_t *)p + sizeof(struct pbuf)));
119 p->flags = PBUF_FLAG_POOL;
124 /* The ->next pointer of last pbuf is NULL to indicate that there
125 are no more pbufs in the pool */
128 #if !SYS_LIGHTWEIGHT_PROT
129 pbuf_pool_alloc_lock = 0;
130 pbuf_pool_free_lock = 0;
131 pbuf_pool_free_sem = sys_sem_new(1);
136 * @internal only called from pbuf_alloc()
139 pbuf_pool_alloc(void)
141 struct pbuf *p = NULL;
143 SYS_ARCH_DECL_PROTECT(old_level);
144 SYS_ARCH_PROTECT(old_level);
146 #if !SYS_LIGHTWEIGHT_PROT
147 /* Next, check the actual pbuf pool, but if the pool is locked, we
148 pretend to be out of buffers and return NULL. */
149 if (pbuf_pool_free_lock) {
151 ++lwip_stats.pbuf.alloc_locked;
152 #endif /* PBUF_STATS */
155 pbuf_pool_alloc_lock = 1;
156 if (!pbuf_pool_free_lock) {
157 #endif /* SYS_LIGHTWEIGHT_PROT */
162 #if !SYS_LIGHTWEIGHT_PROT
165 ++lwip_stats.pbuf.alloc_locked;
166 #endif /* PBUF_STATS */
168 pbuf_pool_alloc_lock = 0;
169 #endif /* SYS_LIGHTWEIGHT_PROT */
173 ++lwip_stats.pbuf.used;
174 if (lwip_stats.pbuf.used > lwip_stats.pbuf.max) {
175 lwip_stats.pbuf.max = lwip_stats.pbuf.used;
178 #endif /* PBUF_STATS */
180 SYS_ARCH_UNPROTECT(old_level);
188 * The actual memory allocated for the pbuf is determined by the
189 * layer at which the pbuf is allocated and the requested size
190 * (from the size parameter).
192 * @param flag this parameter decides how and where the pbuf
193 * should be allocated as follows:
195 * - PBUF_RAM: buffer memory for pbuf is allocated as one large
196 * chunk. This includes protocol headers as well.
197 * - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
198 * protocol headers. Additional headers must be prepended
199 * by allocating another pbuf and chain in to the front of
200 * the ROM pbuf. It is assumed that the memory used is really
201 * similar to ROM in that it is immutable and will not be
202 * changed. Memory which is dynamic should generally not
203 * be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
204 * - PBUF_REF: no buffer memory is allocated for the pbuf, even for
205 * protocol headers. It is assumed that the pbuf is only
206 * being used in a single thread. If the pbuf gets queued,
207 * then pbuf_take should be called to copy the buffer.
208 * - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
209 * the pbuf pool that is allocated during pbuf_init().
211 * @return the allocated pbuf. If multiple pbufs where allocated, this
212 * is the first pbuf of a pbuf chain.
215 pbuf_alloc(pbuf_layer l, u16_t length, pbuf_flag flag)
217 struct pbuf *p, *q, *r;
219 s32_t rem_len; /* remaining length */
220 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_alloc(length=%u)\n", length));
222 /* determine header offset */
226 /* add room for transport (often TCP) layer header */
227 offset += PBUF_TRANSPORT_HLEN;
230 /* add room for IP layer header */
231 offset += PBUF_IP_HLEN;
234 /* add room for link layer header */
235 offset += PBUF_LINK_HLEN;
240 LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
246 /* allocate head of pbuf chain into p */
247 p = pbuf_pool_alloc();
248 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
251 ++lwip_stats.pbuf.err;
252 #endif /* PBUF_STATS */
257 /* make the payload pointer point 'offset' bytes into pbuf data memory */
258 p->payload = MEM_ALIGN((void *)((u8_t *)p + (sizeof(struct pbuf) + offset)));
259 LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
260 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
261 /* the total length of the pbuf chain is the requested size */
263 /* set the length of the first pbuf in the chain */
264 p->len = length > PBUF_POOL_BUFSIZE - offset? PBUF_POOL_BUFSIZE - offset: length;
265 /* set reference count (needed here in case we fail) */
268 /* now allocate the tail of the pbuf chain */
270 /* remember first pbuf for linkage in next iteration */
272 /* remaining length to be allocated */
273 rem_len = length - p->len;
274 /* any remaining pbufs to be allocated? */
275 while (rem_len > 0) {
276 q = pbuf_pool_alloc();
278 LWIP_DEBUGF(PBUF_DEBUG | 2, ("pbuf_alloc: Out of pbufs in pool.\n"));
280 ++lwip_stats.pbuf.err;
281 #endif /* PBUF_STATS */
282 /* free chain so far allocated */
284 /* bail out unsuccesfully */
288 /* make previous pbuf point to this pbuf */
290 /* set total length of this pbuf and next in chain */
291 q->tot_len = rem_len;
292 /* this pbuf length is pool size, unless smaller sized tail */
293 q->len = rem_len > PBUF_POOL_BUFSIZE? PBUF_POOL_BUFSIZE: rem_len;
294 q->payload = (void *)((u8_t *)q + sizeof(struct pbuf));
295 LWIP_ASSERT("pbuf_alloc: pbuf q->payload properly aligned",
296 ((mem_ptr_t)q->payload % MEM_ALIGNMENT) == 0);
298 /* calculate remaining length to be allocated */
300 /* remember this pbuf for linkage in next iteration */
308 /* If pbuf is to be allocated in RAM, allocate memory for it. */
309 p = mem_malloc(MEM_ALIGN_SIZE(sizeof(struct pbuf) + length + offset));
313 /* Set up internal structure of the pbuf. */
314 p->payload = MEM_ALIGN((void *)((u8_t *)p + sizeof(struct pbuf) + offset));
315 p->len = p->tot_len = length;
317 p->flags = PBUF_FLAG_RAM;
319 LWIP_ASSERT("pbuf_alloc: pbuf->payload properly aligned",
320 ((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
322 /* pbuf references existing (static constant) ROM payload? */
324 /* pbuf references existing (externally allocated) RAM payload? */
326 /* only allocate memory for the pbuf structure */
327 p = memp_malloc(MEMP_PBUF);
329 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_alloc: Could not allocate MEMP_PBUF for PBUF_%s.\n", flag == PBUF_ROM?"ROM":"REF"));
332 /* caller must set this field properly, afterwards */
334 p->len = p->tot_len = length;
336 p->flags = (flag == PBUF_ROM? PBUF_FLAG_ROM: PBUF_FLAG_REF);
339 LWIP_ASSERT("pbuf_alloc: erroneous flag", 0);
342 /* set reference count */
344 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_alloc(length=%u) == %p\n", length, (void *)p));
350 #define DEC_PBUF_STATS do { --lwip_stats.pbuf.used; } while (0)
351 #else /* PBUF_STATS */
352 #define DEC_PBUF_STATS
353 #endif /* PBUF_STATS */
355 #define PBUF_POOL_FAST_FREE(p) do { \
356 p->next = pbuf_pool; \
361 #if SYS_LIGHTWEIGHT_PROT
362 #define PBUF_POOL_FREE(p) do { \
363 SYS_ARCH_DECL_PROTECT(old_level); \
364 SYS_ARCH_PROTECT(old_level); \
365 PBUF_POOL_FAST_FREE(p); \
366 SYS_ARCH_UNPROTECT(old_level); \
368 #else /* SYS_LIGHTWEIGHT_PROT */
369 #define PBUF_POOL_FREE(p) do { \
370 sys_sem_wait(pbuf_pool_free_sem); \
371 PBUF_POOL_FAST_FREE(p); \
372 sys_sem_signal(pbuf_pool_free_sem); \
374 #endif /* SYS_LIGHTWEIGHT_PROT */
377 * Shrink a pbuf chain to a desired length.
379 * @param p pbuf to shrink.
380 * @param new_len desired new length of pbuf chain
382 * Depending on the desired length, the first few pbufs in a chain might
383 * be skipped and left unchanged. The new last pbuf in the chain will be
384 * resized, and any remaining pbufs will be freed.
386 * @note If the pbuf is ROM/REF, only the ->tot_len and ->len fields are adjusted.
387 * @note May not be called on a packet queue.
389 * @bug Cannot grow the size of a pbuf (chain) (yet).
392 pbuf_realloc(struct pbuf *p, u16_t new_len)
395 u16_t rem_len; /* remaining length */
398 LWIP_ASSERT("pbuf_realloc: sane p->flags", p->flags == PBUF_FLAG_POOL ||
399 p->flags == PBUF_FLAG_ROM ||
400 p->flags == PBUF_FLAG_RAM ||
401 p->flags == PBUF_FLAG_REF);
403 /* desired length larger than current length? */
404 if (new_len >= p->tot_len) {
405 /* enlarging not yet supported */
409 /* the pbuf chain grows by (new_len - p->tot_len) bytes
410 * (which may be negative in case of shrinking) */
411 grow = new_len - p->tot_len;
413 /* first, step over any pbufs that should remain in the chain */
416 /* should this pbuf be kept? */
417 while (rem_len > q->len) {
418 /* decrease remaining length by pbuf length */
420 /* decrease total length indicator */
422 /* proceed to next pbuf in chain */
425 /* we have now reached the new last pbuf (in q) */
426 /* rem_len == desired length for pbuf q */
428 /* shrink allocated memory for PBUF_RAM */
429 /* (other types merely adjust their length fields */
430 if ((q->flags == PBUF_FLAG_RAM) && (rem_len != q->len)) {
431 /* reallocate and adjust the length of the pbuf that will be split */
432 mem_realloc(q, (u8_t *)q->payload - (u8_t *)q + rem_len);
434 /* adjust length fields for new last pbuf */
438 /* any remaining pbufs in chain? */
439 if (q->next != NULL) {
440 /* free remaining pbufs in chain */
443 /* q is last packet in chain */
449 * Adjusts the payload pointer to hide or reveal headers in the payload.
451 * Adjusts the ->payload pointer so that space for a header
452 * (dis)appears in the pbuf payload.
454 * The ->payload, ->tot_len and ->len fields are adjusted.
456 * @param hdr_size Number of bytes to increment header size which
457 * increases the size of the pbuf. New space is on the front.
458 * (Using a negative value decreases the header size.)
460 * PBUF_ROM and PBUF_REF type buffers cannot have their sizes increased, so
461 * the call will fail. A check is made that the increase in header size does
462 * not move the payload pointer in front of the start of the buffer.
463 * @return 1 on failure, 0 on success.
465 * @note May not be called on a packet queue.
468 pbuf_header(struct pbuf *p, s16_t header_size)
472 /* remember current payload pointer */
473 payload = p->payload;
475 /* pbuf types containing payloads? */
476 if (p->flags == PBUF_FLAG_RAM || p->flags == PBUF_FLAG_POOL) {
477 /* set new payload pointer */
478 p->payload = (u8_t *)p->payload - header_size;
479 /* boundary check fails? */
480 if ((u8_t *)p->payload < (u8_t *)p + sizeof(struct pbuf)) {
481 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_header: failed as %p < %p\n",
483 (u8_t *)p + sizeof(struct pbuf)) );\
484 /* restore old payload pointer */
485 p->payload = payload;
486 /* bail out unsuccesfully */
489 /* pbuf types refering to payloads? */
490 } else if (p->flags == PBUF_FLAG_REF || p->flags == PBUF_FLAG_ROM) {
491 /* hide a header in the payload? */
492 if ((header_size < 0) && (header_size - p->len <= 0)) {
493 /* increase payload pointer */
494 p->payload = (u8_t *)p->payload - header_size;
496 /* cannot expand payload to front (yet!)
497 * bail out unsuccesfully */
501 LWIP_DEBUGF( PBUF_DEBUG, ("pbuf_header: old %p new %p (%d)\n", (void *)payload, (void *)p->payload, header_size) );
502 /* modify pbuf length fields */
503 p->len += header_size;
504 p->tot_len += header_size;
510 * Dereference a pbuf chain or queue and deallocate any no-longer-used
511 * pbufs at the head of this chain or queue.
513 * Decrements the pbuf reference count. If it reaches
514 * zero, the pbuf is deallocated.
516 * For a pbuf chain, this is repeated for each pbuf in the chain,
517 * up to the first pbuf which has a non-zero reference count after
518 * decrementing. (This might de-allocate the whole chain.)
520 * @param pbuf The pbuf (chain) to be dereferenced.
522 * @return the number of pbufs that were de-allocated
523 * from the head of the chain.
525 * @note MUST NOT be called on a packet queue.
526 * @note the reference counter of a pbuf equals the number of pointers
527 * that refer to the pbuf (or into the pbuf).
529 * @internal examples:
531 * Assuming existing chains a->b->c with the following reference
532 * counts, calling pbuf_free(a) results in:
534 * 1->2->3 becomes ...1->3
535 * 3->3->3 becomes 2->3->3
536 * 1->1->2 becomes ......1
537 * 2->1->1 becomes 1->1->1
538 * 1->1->1 becomes .......
542 pbuf_free(struct pbuf *p)
546 SYS_ARCH_DECL_PROTECT(old_level);
548 LWIP_ASSERT("p != NULL", p != NULL);
549 /* if assertions are disabled, proceed with debug output */
551 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_free(p == NULL) was called.\n"));
554 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_free(%p)\n", (void *)p));
558 LWIP_ASSERT("pbuf_free: sane flags",
559 p->flags == PBUF_FLAG_RAM || p->flags == PBUF_FLAG_ROM ||
560 p->flags == PBUF_FLAG_REF || p->flags == PBUF_FLAG_POOL);
563 /* Since decrementing ref cannot be guaranteed to be a single machine operation
564 * we must protect it. Also, the later test of ref must be protected.
566 SYS_ARCH_PROTECT(old_level);
567 /* de-allocate all consecutive pbufs from the head of the chain that
568 * obtain a zero reference count after decrementing*/
570 /* all pbufs in a chain are referenced at least once */
571 LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
572 /* decrease reference count (number of pointers to pbuf) */
574 /* this pbuf is no longer referenced to? */
576 /* remember next pbuf in chain for next iteration */
578 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: deallocating %p\n", (void *)p));
579 /* is this a pbuf from the pool? */
580 if (p->flags == PBUF_FLAG_POOL) {
581 p->len = p->tot_len = PBUF_POOL_BUFSIZE;
582 p->payload = (void *)((u8_t *)p + sizeof(struct pbuf));
584 /* a ROM or RAM referencing pbuf */
585 } else if (p->flags == PBUF_FLAG_ROM || p->flags == PBUF_FLAG_REF) {
586 memp_free(MEMP_PBUF, p);
587 /* p->flags == PBUF_FLAG_RAM */
592 /* proceed to next pbuf */
594 /* p->ref > 0, this pbuf is still referenced to */
595 /* (and so the remaining pbufs in chain as well) */
597 LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: %p has ref %u, ending here.\n", (void *)p, (unsigned int)p->ref));
598 /* stop walking through chain */
602 SYS_ARCH_UNPROTECT(old_level);
603 PERF_STOP("pbuf_free");
604 /* return number of de-allocated pbufs */
609 * Count number of pbufs in a chain
611 * @param p first pbuf of chain
612 * @return the number of pbufs in a chain
616 pbuf_clen(struct pbuf *p)
629 * Increment the reference count of the pbuf.
631 * @param p pbuf to increase reference counter of
635 pbuf_ref(struct pbuf *p)
637 SYS_ARCH_DECL_PROTECT(old_level);
640 SYS_ARCH_PROTECT(old_level);
642 SYS_ARCH_UNPROTECT(old_level);
647 * Concatenate two pbufs (each may be a pbuf chain) and take over
648 * the caller's reference of the tail pbuf.
650 * @note The caller MAY NOT reference the tail pbuf afterwards.
651 * Use pbuf_chain() for that purpose.
657 pbuf_cat(struct pbuf *h, struct pbuf *t)
661 LWIP_ASSERT("h != NULL", h != NULL);
662 LWIP_ASSERT("t != NULL", t != NULL);
663 if ((h == NULL) || (t == NULL)) return;
665 /* proceed to last pbuf of chain */
666 for (p = h; p->next != NULL; p = p->next) {
667 /* add total length of second chain to all totals of first chain */
668 p->tot_len += t->tot_len;
670 /* { p is last pbuf of first h chain, p->next == NULL } */
671 LWIP_ASSERT("p->tot_len == p->len (of last pbuf in chain)", p->tot_len == p->len);
672 /* add total length of second chain to last pbuf total of first chain */
673 p->tot_len += t->tot_len;
674 /* chain last pbuf of head (p) with first of tail (t) */
679 * Chain two pbufs (or pbuf chains) together.
681 * The caller MUST call pbuf_free(t) once it has stopped
682 * using it. Use pbuf_cat() instead if you no longer use t.
684 * @param h head pbuf (chain)
685 * @param t tail pbuf (chain)
686 * @note The pbufs MUST belong to the same packet.
687 * @note MAY NOT be called on a packet queue.
689 * The ->tot_len fields of all pbufs of the head chain are adjusted.
690 * The ->next field of the last pbuf of the head chain is adjusted.
691 * The ->ref field of the first pbuf of the tail chain is adjusted.
695 pbuf_chain(struct pbuf *h, struct pbuf *t)
698 /* t is now referenced by h */
700 LWIP_DEBUGF(PBUF_DEBUG | DBG_FRESH | 2, ("pbuf_chain: %p references %p\n", (void *)h, (void *)t));
703 /* For packet queueing. Note that queued packets must be dequeued first
704 * before calling any pbuf functions. */
707 * Add a packet to the end of a queue.
709 * @param q pointer to first packet on the queue
710 * @param n packet to be queued
714 pbuf_queue(struct pbuf *p, struct pbuf *n)
716 LWIP_ASSERT("p != NULL", p != NULL);
717 LWIP_ASSERT("n != NULL", n != NULL);
718 if ((p == NULL) || (n == NULL)) return;
720 /* iterate through all packets on queue */
721 while (p->next != NULL) {
722 /* be very picky about pbuf chain correctness */
724 /* iterate through all pbufs in packet */
725 while (p->tot_len != p->len) {
726 /* make sure invariant condition holds */
727 LWIP_ASSERT("p->len < p->tot_len", p->len < p->tot_len);
728 /* make sure each packet is complete */
729 LWIP_ASSERT("p->next != NULL", p->next != NULL);
731 /* { p->tot_len == p->len } => p is last pbuf of a packet */
734 /* { p->tot_len == p->len } => p is last pbuf of a packet */
735 /* proceed to next packet on queue */
736 if (p->next != NULL) p = p->next;
738 /* { p->tot_len == p->len and p->next == NULL } ==>
739 * { p is last pbuf of last packet on queue } */
740 /* chain last pbuf of queue with n */
742 /* n is now referenced to one more time */
744 LWIP_DEBUGF(PBUF_DEBUG | DBG_FRESH | 2, ("pbuf_queue: referencing queued packet %p\n", (void *)n));
748 * Remove a packet from the head of a queue.
750 * The caller MUST reference the remainder of the queue (as returned).
752 * @param p pointer to first packet on the queue which will be dequeued.
753 * @return first packet on the remaining queue (NULL if no further packets).
757 pbuf_dequeue(struct pbuf *p)
760 LWIP_ASSERT("p != NULL", p != NULL);
762 /* iterate through all pbufs in packet */
763 while (p->tot_len != p->len) {
764 /* make sure invariant condition holds */
765 LWIP_ASSERT("p->len < p->tot_len", p->len < p->tot_len);
766 /* make sure each packet is complete */
767 LWIP_ASSERT("p->next != NULL", p->next != NULL);
770 /* { p->tot_len == p->len } => p is the last pbuf of the first packet */
771 /* remember next packet on queue */
773 /* dequeue p from queue */
775 /* any next packet on queue? */
777 /* although q is no longer referenced by p, it MUST be referenced by
778 * the caller, who is maintaining this packet queue */
779 LWIP_DEBUGF(PBUF_DEBUG | DBG_FRESH | 2, ("pbuf_dequeue: at least one packet on queue, first %p\n", (void *)q));
781 LWIP_DEBUGF(PBUF_DEBUG | DBG_FRESH | 2, ("pbuf_dequeue: no further packets on queue\n"));
789 * Create PBUF_POOL (or PBUF_RAM) copies of PBUF_REF pbufs.
791 * Used to queue packets on behalf of the lwIP stack, such as
792 * ARP based queueing.
794 * Go through a pbuf chain and replace any PBUF_REF buffers
795 * with PBUF_POOL (or PBUF_RAM) pbufs, each taking a copy of
796 * the referenced data.
798 * @note You MUST explicitly use p = pbuf_take(p);
799 * The pbuf you give as argument, may have been replaced
802 * @note Any replaced pbufs will be freed through pbuf_free().
803 * This may deallocate them if they become no longer referenced.
805 * @param p Head of pbuf chain to process
807 * @return Pointer to head of pbuf chain
810 pbuf_take(struct pbuf *p)
812 struct pbuf *q , *prev, *head;
813 LWIP_ASSERT("pbuf_take: p != NULL\n", p != NULL);
814 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_take(%p)\n", (void*)p));
818 /* iterate through pbuf chain */
821 /* pbuf is of type PBUF_REF? */
822 if (p->flags == PBUF_FLAG_REF) {
823 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE, ("pbuf_take: encountered PBUF_REF %p\n", (void *)p));
824 /* allocate a pbuf (w/ payload) fully in RAM */
825 /* PBUF_POOL buffers are faster if we can use them */
826 if (p->len <= PBUF_POOL_BUFSIZE) {
827 q = pbuf_alloc(PBUF_RAW, p->len, PBUF_POOL);
828 if (q == NULL) LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_take: Could not allocate PBUF_POOL\n"));
830 /* no replacement pbuf yet */
832 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_take: PBUF_POOL too small to replace PBUF_REF\n"));
834 /* no (large enough) PBUF_POOL was available? retry with PBUF_RAM */
836 q = pbuf_alloc(PBUF_RAW, p->len, PBUF_RAM);
837 if (q == NULL) LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_take: Could not allocate PBUF_RAM\n"));
839 /* replacement pbuf could be allocated? */
845 /* remove linkage from original pbuf */
847 /* remove linkage to original pbuf */
849 /* prev->next == p at this point */
850 LWIP_ASSERT("prev->next == p", prev->next == p);
851 /* break chain and insert new pbuf instead */
853 /* prev == NULL, so we replaced the head pbuf of the chain */
857 /* copy pbuf payload */
858 memcpy(q->payload, p->payload, p->len);
859 q->tot_len = p->tot_len;
861 /* in case p was the first pbuf, it is no longer refered to by
862 * our caller, as the caller MUST do p = pbuf_take(p);
863 * in case p was not the first pbuf, it is no longer refered to
864 * by prev. we can safely free the pbuf here.
865 * (note that we have set p->next to NULL already so that
866 * we will not free the rest of the chain by accident.)
869 /* do not copy ref, since someone else might be using the old buffer */
870 LWIP_DEBUGF(PBUF_DEBUG, ("pbuf_take: replaced PBUF_REF %p with %p\n", (void *)p, (void *)q));
873 /* deallocate chain */
875 LWIP_DEBUGF(PBUF_DEBUG | 2, ("pbuf_take: failed to allocate replacement pbuf for %p\n", (void *)p));
878 /* p->flags != PBUF_FLAG_REF */
880 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 1, ("pbuf_take: skipping pbuf not of type PBUF_REF\n"));
882 /* remember this pbuf */
884 /* proceed to next pbuf in original chain */
887 LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 1, ("pbuf_take: end of chain reached.\n"));
893 * Dechains the first pbuf from its succeeding pbufs in the chain.
895 * Makes p->tot_len field equal to p->len.
896 * @param p pbuf to dechain
897 * @return remainder of the pbuf chain, or NULL if it was de-allocated.
898 * @note May not be called on a packet queue.
901 pbuf_dechain(struct pbuf *p)
907 /* pbuf has successor in chain? */
909 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
910 LWIP_ASSERT("p->tot_len == p->len + q->tot_len", q->tot_len == p->tot_len - p->len);
911 /* enforce invariant if assertion is disabled */
912 q->tot_len = p->tot_len - p->len;
913 /* decouple pbuf from remainder */
915 /* total length of pbuf p is its own length only */
917 /* q is no longer referenced by p, free it */
918 LWIP_DEBUGF(PBUF_DEBUG | DBG_STATE, ("pbuf_dechain: unreferencing %p\n", (void *)q));
919 tail_gone = pbuf_free(q);
920 if (tail_gone > 0) LWIP_DEBUGF(PBUF_DEBUG | DBG_STATE,
921 ("pbuf_dechain: deallocated %p (as it is no longer referenced)\n", (void *)q));
922 /* return remaining tail or NULL if deallocated */
924 /* assert tot_len invariant: (p->tot_len == p->len + (p->next? p->next->tot_len: 0) */
925 LWIP_ASSERT("p->tot_len == p->len", p->tot_len == p->len);
926 return (tail_gone > 0? NULL: q);