2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pagemap.h>
15 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
20 * XDR functions for basic NFS types
23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = cpu_to_be32(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
32 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
43 return p + XDR_QUADLEN(len);
45 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
58 * Returns the updated current XDR buffer position
61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
68 memcpy(p, ptr, nbytes);
70 memset((char *)p + nbytes, 0, padding);
75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
83 * Returns the updated current XDR buffer position
85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
87 *p++ = cpu_to_be32(nbytes);
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
90 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
93 xdr_encode_string(__be32 *p, const char *string)
95 return xdr_encode_array(p, string, strlen(string));
97 EXPORT_SYMBOL_GPL(xdr_encode_string);
100 xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
105 len = be32_to_cpu(*p++);
110 return p + XDR_QUADLEN(len);
112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
121 xdr_terminate_string(struct xdr_buf *buf, const u32 len)
125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr);
129 EXPORT_SYMBOL_GPL(xdr_terminate_string);
132 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
133 struct page **pages, unsigned int base, unsigned int len)
135 struct kvec *head = xdr->head;
136 struct kvec *tail = xdr->tail;
137 char *buf = (char *)head->iov_base;
138 unsigned int buflen = head->iov_len;
140 head->iov_len = offset;
143 xdr->page_base = base;
146 tail->iov_base = buf + offset;
147 tail->iov_len = buflen - offset;
151 EXPORT_SYMBOL_GPL(xdr_inline_pages);
154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
158 * _shift_data_right_pages
159 * @pages: vector of pages containing both the source and dest memory area.
160 * @pgto_base: page vector address of destination
161 * @pgfrom_base: page vector address of source
162 * @len: number of bytes to copy
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap.
172 _shift_data_right_pages(struct page **pages, size_t pgto_base,
173 size_t pgfrom_base, size_t len)
175 struct page **pgfrom, **pgto;
179 BUG_ON(pgto_base <= pgfrom_base);
184 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
185 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
187 pgto_base &= ~PAGE_CACHE_MASK;
188 pgfrom_base &= ~PAGE_CACHE_MASK;
191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base == 0) {
193 pgto_base = PAGE_CACHE_SIZE;
196 if (pgfrom_base == 0) {
197 pgfrom_base = PAGE_CACHE_SIZE;
202 if (copy > pgto_base)
204 if (copy > pgfrom_base)
209 vto = kmap_atomic(*pgto);
210 if (*pgto != *pgfrom) {
211 vfrom = kmap_atomic(*pgfrom);
212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 kunmap_atomic(vfrom);
215 memmove(vto + pgto_base, vto + pgfrom_base, copy);
216 flush_dcache_page(*pgto);
219 } while ((len -= copy) != 0);
224 * @pages: array of pages
225 * @pgbase: page vector address of destination
226 * @p: pointer to source data
229 * Copies data from an arbitrary memory location into an array of pages
230 * The copy is assumed to be non-overlapping.
233 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
239 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
240 pgbase &= ~PAGE_CACHE_MASK;
243 copy = PAGE_CACHE_SIZE - pgbase;
247 vto = kmap_atomic(*pgto);
248 memcpy(vto + pgbase, p, copy);
256 if (pgbase == PAGE_CACHE_SIZE) {
257 flush_dcache_page(*pgto);
263 flush_dcache_page(*pgto);
268 * @p: pointer to destination
269 * @pages: array of pages
270 * @pgbase: offset of source data
273 * Copies data into an arbitrary memory location from an array of pages
274 * The copy is assumed to be non-overlapping.
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
279 struct page **pgfrom;
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
284 pgbase &= ~PAGE_CACHE_MASK;
287 copy = PAGE_CACHE_SIZE - pgbase;
291 vfrom = kmap_atomic(*pgfrom);
292 memcpy(p, vfrom + pgbase, copy);
293 kunmap_atomic(vfrom);
296 if (pgbase == PAGE_CACHE_SIZE) {
302 } while ((len -= copy) != 0);
304 EXPORT_SYMBOL_GPL(_copy_from_pages);
309 * @len: bytes to remove from buf->head[0]
311 * Shrinks XDR buffer's header kvec buf->head[0] by
312 * 'len' bytes. The extra data is not lost, but is instead
313 * moved into the inlined pages and/or the tail.
316 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
318 struct kvec *head, *tail;
320 unsigned int pglen = buf->page_len;
325 WARN_ON_ONCE(len > head->iov_len);
326 if (len > head->iov_len)
329 /* Shift the tail first */
330 if (tail->iov_len != 0) {
331 if (tail->iov_len > len) {
332 copy = tail->iov_len - len;
333 memmove((char *)tail->iov_base + len,
334 tail->iov_base, copy);
336 /* Copy from the inlined pages into the tail */
341 if (offs >= tail->iov_len)
343 else if (copy > tail->iov_len - offs)
344 copy = tail->iov_len - offs;
346 _copy_from_pages((char *)tail->iov_base + offs,
348 buf->page_base + pglen + offs - len,
350 /* Do we also need to copy data from the head into the tail ? */
352 offs = copy = len - pglen;
353 if (copy > tail->iov_len)
354 copy = tail->iov_len;
355 memcpy(tail->iov_base,
356 (char *)head->iov_base +
357 head->iov_len - offs,
361 /* Now handle pages */
364 _shift_data_right_pages(buf->pages,
365 buf->page_base + len,
371 _copy_to_pages(buf->pages, buf->page_base,
372 (char *)head->iov_base + head->iov_len - len,
375 head->iov_len -= len;
377 /* Have we truncated the message? */
378 if (buf->len > buf->buflen)
379 buf->len = buf->buflen;
385 * @len: bytes to remove from buf->pages
387 * Shrinks XDR buffer's page array buf->pages by
388 * 'len' bytes. The extra data is not lost, but is instead
389 * moved into the tail.
392 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
396 unsigned int pglen = buf->page_len;
397 unsigned int tailbuf_len;
400 BUG_ON (len > pglen);
402 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
404 /* Shift the tail first */
405 if (tailbuf_len != 0) {
406 unsigned int free_space = tailbuf_len - tail->iov_len;
408 if (len < free_space)
410 tail->iov_len += free_space;
413 if (tail->iov_len > len) {
414 char *p = (char *)tail->iov_base + len;
415 memmove(p, tail->iov_base, tail->iov_len - len);
417 copy = tail->iov_len;
418 /* Copy from the inlined pages into the tail */
419 _copy_from_pages((char *)tail->iov_base,
420 buf->pages, buf->page_base + pglen - len,
423 buf->page_len -= len;
425 /* Have we truncated the message? */
426 if (buf->len > buf->buflen)
427 buf->len = buf->buflen;
431 xdr_shift_buf(struct xdr_buf *buf, size_t len)
433 xdr_shrink_bufhead(buf, len);
435 EXPORT_SYMBOL_GPL(xdr_shift_buf);
438 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
439 * @xdr: pointer to struct xdr_stream
441 unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
443 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
445 EXPORT_SYMBOL_GPL(xdr_stream_pos);
448 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
449 * @xdr: pointer to xdr_stream struct
450 * @buf: pointer to XDR buffer in which to encode data
451 * @p: current pointer inside XDR buffer
453 * Note: at the moment the RPC client only passes the length of our
454 * scratch buffer in the xdr_buf's header kvec. Previously this
455 * meant we needed to call xdr_adjust_iovec() after encoding the
456 * data. With the new scheme, the xdr_stream manages the details
457 * of the buffer length, and takes care of adjusting the kvec
460 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
462 struct kvec *iov = buf->head;
463 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
465 BUG_ON(scratch_len < 0);
468 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
469 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
470 BUG_ON(iov->iov_len > scratch_len);
472 if (p != xdr->p && p != NULL) {
475 BUG_ON(p < xdr->p || p > xdr->end);
476 len = (char *)p - (char *)xdr->p;
482 EXPORT_SYMBOL_GPL(xdr_init_encode);
485 * xdr_reserve_space - Reserve buffer space for sending
486 * @xdr: pointer to xdr_stream
487 * @nbytes: number of bytes to reserve
489 * Checks that we have enough buffer space to encode 'nbytes' more
490 * bytes of data. If so, update the total xdr_buf length, and
491 * adjust the length of the current kvec.
493 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
498 /* align nbytes on the next 32-bit boundary */
501 q = p + (nbytes >> 2);
502 if (unlikely(q > xdr->end || q < p))
505 xdr->iov->iov_len += nbytes;
506 xdr->buf->len += nbytes;
509 EXPORT_SYMBOL_GPL(xdr_reserve_space);
512 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
513 * @xdr: pointer to xdr_stream
514 * @pages: list of pages
515 * @base: offset of first byte
516 * @len: length of data in bytes
519 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
522 struct xdr_buf *buf = xdr->buf;
523 struct kvec *iov = buf->tail;
525 buf->page_base = base;
528 iov->iov_base = (char *)xdr->p;
533 unsigned int pad = 4 - (len & 3);
535 BUG_ON(xdr->p >= xdr->end);
536 iov->iov_base = (char *)xdr->p + (len & 3);
544 EXPORT_SYMBOL_GPL(xdr_write_pages);
546 static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
549 if (len > iov->iov_len)
551 xdr->p = (__be32*)iov->iov_base;
552 xdr->end = (__be32*)(iov->iov_base + len);
554 xdr->page_ptr = NULL;
557 static int xdr_set_page_base(struct xdr_stream *xdr,
558 unsigned int base, unsigned int len)
566 maxlen = xdr->buf->page_len;
573 base += xdr->buf->page_base;
575 pgnr = base >> PAGE_SHIFT;
576 xdr->page_ptr = &xdr->buf->pages[pgnr];
577 kaddr = page_address(*xdr->page_ptr);
579 pgoff = base & ~PAGE_MASK;
580 xdr->p = (__be32*)(kaddr + pgoff);
583 if (pgend > PAGE_SIZE)
585 xdr->end = (__be32*)(kaddr + pgend);
590 static void xdr_set_next_page(struct xdr_stream *xdr)
592 unsigned int newbase;
594 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
595 newbase -= xdr->buf->page_base;
597 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
598 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
601 static bool xdr_set_next_buffer(struct xdr_stream *xdr)
603 if (xdr->page_ptr != NULL)
604 xdr_set_next_page(xdr);
605 else if (xdr->iov == xdr->buf->head) {
606 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
607 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
609 return xdr->p != xdr->end;
613 * xdr_init_decode - Initialize an xdr_stream for decoding data.
614 * @xdr: pointer to xdr_stream struct
615 * @buf: pointer to XDR buffer from which to decode data
616 * @p: current pointer inside XDR buffer
618 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
621 xdr->scratch.iov_base = NULL;
622 xdr->scratch.iov_len = 0;
623 xdr->nwords = XDR_QUADLEN(buf->len);
624 if (buf->head[0].iov_len != 0)
625 xdr_set_iov(xdr, buf->head, buf->len);
626 else if (buf->page_len != 0)
627 xdr_set_page_base(xdr, 0, buf->len);
628 if (p != NULL && p > xdr->p && xdr->end >= p) {
629 xdr->nwords -= p - xdr->p;
633 EXPORT_SYMBOL_GPL(xdr_init_decode);
636 * xdr_init_decode - Initialize an xdr_stream for decoding data.
637 * @xdr: pointer to xdr_stream struct
638 * @buf: pointer to XDR buffer from which to decode data
639 * @pages: list of pages to decode into
640 * @len: length in bytes of buffer in pages
642 void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
643 struct page **pages, unsigned int len)
645 memset(buf, 0, sizeof(*buf));
650 xdr_init_decode(xdr, buf, NULL);
652 EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
654 static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
656 unsigned int nwords = XDR_QUADLEN(nbytes);
658 __be32 *q = p + nwords;
660 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
663 xdr->nwords -= nwords;
668 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
669 * @xdr: pointer to xdr_stream struct
670 * @buf: pointer to an empty buffer
671 * @buflen: size of 'buf'
673 * The scratch buffer is used when decoding from an array of pages.
674 * If an xdr_inline_decode() call spans across page boundaries, then
675 * we copy the data into the scratch buffer in order to allow linear
678 void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
680 xdr->scratch.iov_base = buf;
681 xdr->scratch.iov_len = buflen;
683 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
685 static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
688 void *cpdest = xdr->scratch.iov_base;
689 size_t cplen = (char *)xdr->end - (char *)xdr->p;
691 if (nbytes > xdr->scratch.iov_len)
693 memcpy(cpdest, xdr->p, cplen);
696 if (!xdr_set_next_buffer(xdr))
698 p = __xdr_inline_decode(xdr, nbytes);
701 memcpy(cpdest, p, nbytes);
702 return xdr->scratch.iov_base;
706 * xdr_inline_decode - Retrieve XDR data to decode
707 * @xdr: pointer to xdr_stream struct
708 * @nbytes: number of bytes of data to decode
710 * Check if the input buffer is long enough to enable us to decode
711 * 'nbytes' more bytes of data starting at the current position.
712 * If so return the current pointer, then update the current
715 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
721 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
723 p = __xdr_inline_decode(xdr, nbytes);
726 return xdr_copy_to_scratch(xdr, nbytes);
728 EXPORT_SYMBOL_GPL(xdr_inline_decode);
730 static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
732 struct xdr_buf *buf = xdr->buf;
734 unsigned int nwords = XDR_QUADLEN(len);
735 unsigned int cur = xdr_stream_pos(xdr);
737 if (xdr->nwords == 0)
739 /* Realign pages to current pointer position */
741 if (iov->iov_len > cur) {
742 xdr_shrink_bufhead(buf, iov->iov_len - cur);
743 xdr->nwords = XDR_QUADLEN(buf->len - cur);
746 if (nwords > xdr->nwords) {
747 nwords = xdr->nwords;
750 if (buf->page_len <= len)
752 else if (nwords < xdr->nwords) {
753 /* Truncate page data and move it into the tail */
754 xdr_shrink_pagelen(buf, buf->page_len - len);
755 xdr->nwords = XDR_QUADLEN(buf->len - cur);
761 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
762 * @xdr: pointer to xdr_stream struct
763 * @len: number of bytes of page data
765 * Moves data beyond the current pointer position from the XDR head[] buffer
766 * into the page list. Any data that lies beyond current position + "len"
767 * bytes is moved into the XDR tail[].
769 * Returns the number of XDR encoded bytes now contained in the pages
771 unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
773 struct xdr_buf *buf = xdr->buf;
777 unsigned int padding;
779 len = xdr_align_pages(xdr, len);
782 nwords = XDR_QUADLEN(len);
783 padding = (nwords << 2) - len;
784 xdr->iov = iov = buf->tail;
785 /* Compute remaining message length. */
786 end = ((xdr->nwords - nwords) << 2) + padding;
787 if (end > iov->iov_len)
791 * Position current pointer at beginning of tail, and
792 * set remaining message length.
794 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
795 xdr->end = (__be32 *)((char *)iov->iov_base + end);
796 xdr->page_ptr = NULL;
797 xdr->nwords = XDR_QUADLEN(end - padding);
800 EXPORT_SYMBOL_GPL(xdr_read_pages);
803 * xdr_enter_page - decode data from the XDR page
804 * @xdr: pointer to xdr_stream struct
805 * @len: number of bytes of page data
807 * Moves data beyond the current pointer position from the XDR head[] buffer
808 * into the page list. Any data that lies beyond current position + "len"
809 * bytes is moved into the XDR tail[]. The current pointer is then
810 * repositioned at the beginning of the first XDR page.
812 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
814 len = xdr_align_pages(xdr, len);
816 * Position current pointer at beginning of tail, and
817 * set remaining message length.
820 xdr_set_page_base(xdr, 0, len);
822 EXPORT_SYMBOL_GPL(xdr_enter_page);
824 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
827 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
830 buf->tail[0] = empty_iov;
832 buf->buflen = buf->len = iov->iov_len;
834 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
836 /* Sets subbuf to the portion of buf of length len beginning base bytes
837 * from the start of buf. Returns -1 if base of length are out of bounds. */
839 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
840 unsigned int base, unsigned int len)
842 subbuf->buflen = subbuf->len = len;
843 if (base < buf->head[0].iov_len) {
844 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
845 subbuf->head[0].iov_len = min_t(unsigned int, len,
846 buf->head[0].iov_len - base);
847 len -= subbuf->head[0].iov_len;
850 subbuf->head[0].iov_base = NULL;
851 subbuf->head[0].iov_len = 0;
852 base -= buf->head[0].iov_len;
855 if (base < buf->page_len) {
856 subbuf->page_len = min(buf->page_len - base, len);
857 base += buf->page_base;
858 subbuf->page_base = base & ~PAGE_CACHE_MASK;
859 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
860 len -= subbuf->page_len;
863 base -= buf->page_len;
864 subbuf->page_len = 0;
867 if (base < buf->tail[0].iov_len) {
868 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
869 subbuf->tail[0].iov_len = min_t(unsigned int, len,
870 buf->tail[0].iov_len - base);
871 len -= subbuf->tail[0].iov_len;
874 subbuf->tail[0].iov_base = NULL;
875 subbuf->tail[0].iov_len = 0;
876 base -= buf->tail[0].iov_len;
883 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
886 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
887 * @buf: buf to be trimmed
888 * @len: number of bytes to reduce "buf" by
890 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
891 * that it's possible that we'll trim less than that amount if the xdr_buf is
892 * too small, or if (for instance) it's all in the head and the parser has
893 * already read too far into it.
895 void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
898 unsigned int trim = len;
900 if (buf->tail[0].iov_len) {
901 cur = min_t(size_t, buf->tail[0].iov_len, trim);
902 buf->tail[0].iov_len -= cur;
909 cur = min_t(unsigned int, buf->page_len, trim);
910 buf->page_len -= cur;
916 if (buf->head[0].iov_len) {
917 cur = min_t(size_t, buf->head[0].iov_len, trim);
918 buf->head[0].iov_len -= cur;
922 buf->len -= (len - trim);
924 EXPORT_SYMBOL_GPL(xdr_buf_trim);
926 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
928 unsigned int this_len;
930 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
931 memcpy(obj, subbuf->head[0].iov_base, this_len);
934 this_len = min_t(unsigned int, len, subbuf->page_len);
936 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
939 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
940 memcpy(obj, subbuf->tail[0].iov_base, this_len);
943 /* obj is assumed to point to allocated memory of size at least len: */
944 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
946 struct xdr_buf subbuf;
949 status = xdr_buf_subsegment(buf, &subbuf, base, len);
952 __read_bytes_from_xdr_buf(&subbuf, obj, len);
955 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
957 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
959 unsigned int this_len;
961 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
962 memcpy(subbuf->head[0].iov_base, obj, this_len);
965 this_len = min_t(unsigned int, len, subbuf->page_len);
967 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
970 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
971 memcpy(subbuf->tail[0].iov_base, obj, this_len);
974 /* obj is assumed to point to allocated memory of size at least len: */
975 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
977 struct xdr_buf subbuf;
980 status = xdr_buf_subsegment(buf, &subbuf, base, len);
983 __write_bytes_to_xdr_buf(&subbuf, obj, len);
986 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
989 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
994 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
997 *obj = be32_to_cpu(raw);
1000 EXPORT_SYMBOL_GPL(xdr_decode_word);
1003 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
1005 __be32 raw = cpu_to_be32(obj);
1007 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1009 EXPORT_SYMBOL_GPL(xdr_encode_word);
1011 /* If the netobj starting offset bytes from the start of xdr_buf is contained
1012 * entirely in the head or the tail, set object to point to it; otherwise
1013 * try to find space for it at the end of the tail, copy it there, and
1014 * set obj to point to it. */
1015 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
1017 struct xdr_buf subbuf;
1019 if (xdr_decode_word(buf, offset, &obj->len))
1021 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1024 /* Is the obj contained entirely in the head? */
1025 obj->data = subbuf.head[0].iov_base;
1026 if (subbuf.head[0].iov_len == obj->len)
1028 /* ..or is the obj contained entirely in the tail? */
1029 obj->data = subbuf.tail[0].iov_base;
1030 if (subbuf.tail[0].iov_len == obj->len)
1033 /* use end of tail as storage for obj:
1034 * (We don't copy to the beginning because then we'd have
1035 * to worry about doing a potentially overlapping copy.
1036 * This assumes the object is at most half the length of the
1038 if (obj->len > buf->buflen - buf->len)
1040 if (buf->tail[0].iov_len != 0)
1041 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1043 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1044 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
1047 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
1049 /* Returns 0 on success, or else a negative error code. */
1051 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1052 struct xdr_array2_desc *desc, int encode)
1054 char *elem = NULL, *c;
1055 unsigned int copied = 0, todo, avail_here;
1056 struct page **ppages = NULL;
1060 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1063 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
1064 desc->array_len > desc->array_maxlen ||
1065 (unsigned long) base + 4 + desc->array_len *
1066 desc->elem_size > buf->len)
1074 todo = desc->array_len * desc->elem_size;
1077 if (todo && base < buf->head->iov_len) {
1078 c = buf->head->iov_base + base;
1079 avail_here = min_t(unsigned int, todo,
1080 buf->head->iov_len - base);
1083 while (avail_here >= desc->elem_size) {
1084 err = desc->xcode(desc, c);
1087 c += desc->elem_size;
1088 avail_here -= desc->elem_size;
1092 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1098 err = desc->xcode(desc, elem);
1101 memcpy(c, elem, avail_here);
1103 memcpy(elem, c, avail_here);
1104 copied = avail_here;
1106 base = buf->head->iov_len; /* align to start of pages */
1109 /* process pages array */
1110 base -= buf->head->iov_len;
1111 if (todo && base < buf->page_len) {
1112 unsigned int avail_page;
1114 avail_here = min(todo, buf->page_len - base);
1117 base += buf->page_base;
1118 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1119 base &= ~PAGE_CACHE_MASK;
1120 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1122 c = kmap(*ppages) + base;
1124 while (avail_here) {
1125 avail_here -= avail_page;
1126 if (copied || avail_page < desc->elem_size) {
1127 unsigned int l = min(avail_page,
1128 desc->elem_size - copied);
1130 elem = kmalloc(desc->elem_size,
1138 err = desc->xcode(desc, elem);
1142 memcpy(c, elem + copied, l);
1144 if (copied == desc->elem_size)
1147 memcpy(elem + copied, c, l);
1149 if (copied == desc->elem_size) {
1150 err = desc->xcode(desc, elem);
1159 while (avail_page >= desc->elem_size) {
1160 err = desc->xcode(desc, c);
1163 c += desc->elem_size;
1164 avail_page -= desc->elem_size;
1167 unsigned int l = min(avail_page,
1168 desc->elem_size - copied);
1170 elem = kmalloc(desc->elem_size,
1178 err = desc->xcode(desc, elem);
1182 memcpy(c, elem + copied, l);
1184 if (copied == desc->elem_size)
1187 memcpy(elem + copied, c, l);
1189 if (copied == desc->elem_size) {
1190 err = desc->xcode(desc, elem);
1203 avail_page = min(avail_here,
1204 (unsigned int) PAGE_CACHE_SIZE);
1206 base = buf->page_len; /* align to start of tail */
1210 base -= buf->page_len;
1212 c = buf->tail->iov_base + base;
1214 unsigned int l = desc->elem_size - copied;
1217 memcpy(c, elem + copied, l);
1219 memcpy(elem + copied, c, l);
1220 err = desc->xcode(desc, elem);
1228 err = desc->xcode(desc, c);
1231 c += desc->elem_size;
1232 todo -= desc->elem_size;
1245 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1246 struct xdr_array2_desc *desc)
1248 if (base >= buf->len)
1251 return xdr_xcode_array2(buf, base, desc, 0);
1253 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1256 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1257 struct xdr_array2_desc *desc)
1259 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1260 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1263 return xdr_xcode_array2(buf, base, desc, 1);
1265 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1268 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1269 int (*actor)(struct scatterlist *, void *), void *data)
1272 unsigned int page_len, thislen, page_offset;
1273 struct scatterlist sg[1];
1275 sg_init_table(sg, 1);
1277 if (offset >= buf->head[0].iov_len) {
1278 offset -= buf->head[0].iov_len;
1280 thislen = buf->head[0].iov_len - offset;
1283 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1284 ret = actor(sg, data);
1293 if (offset >= buf->page_len) {
1294 offset -= buf->page_len;
1296 page_len = buf->page_len - offset;
1300 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1301 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1302 thislen = PAGE_CACHE_SIZE - page_offset;
1304 if (thislen > page_len)
1306 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1307 ret = actor(sg, data);
1310 page_len -= thislen;
1313 thislen = PAGE_CACHE_SIZE;
1314 } while (page_len != 0);
1319 if (offset < buf->tail[0].iov_len) {
1320 thislen = buf->tail[0].iov_len - offset;
1323 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1324 ret = actor(sg, data);
1332 EXPORT_SYMBOL_GPL(xdr_process_buf);