1 #include <linux/export.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
22 while (unlikely(!left && n)) { \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
27 __v.iov_base = __p->iov_base; \
29 __v.iov_len -= left; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
43 skip += __v.iov_len; \
46 while (unlikely(n)) { \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
51 __v.iov_base = __p->iov_base; \
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
71 #define iterate_all_kinds(i, n, v, I, B, K) { \
72 size_t skip = i->iov_offset; \
73 if (unlikely(i->type & ITER_BVEC)) { \
75 struct bvec_iter __bi; \
76 iterate_bvec(i, n, v, __bi, skip, (B)) \
77 } else if (unlikely(i->type & ITER_KVEC)) { \
78 const struct kvec *kvec; \
80 iterate_kvec(i, n, v, kvec, skip, (K)) \
82 const struct iovec *iov; \
84 iterate_iovec(i, n, v, iov, skip, (I)) \
88 #define iterate_and_advance(i, n, v, I, B, K) { \
89 if (unlikely(i->count < n)) \
92 size_t skip = i->iov_offset; \
93 if (unlikely(i->type & ITER_BVEC)) { \
94 const struct bio_vec *bvec = i->bvec; \
96 struct bvec_iter __bi; \
97 iterate_bvec(i, n, v, __bi, skip, (B)) \
98 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
99 i->nr_segs -= i->bvec - bvec; \
100 skip = __bi.bi_bvec_done; \
101 } else if (unlikely(i->type & ITER_KVEC)) { \
102 const struct kvec *kvec; \
104 iterate_kvec(i, n, v, kvec, skip, (K)) \
105 if (skip == kvec->iov_len) { \
109 i->nr_segs -= kvec - i->kvec; \
112 const struct iovec *iov; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 if (skip == iov->iov_len) { \
119 i->nr_segs -= iov - i->iov; \
123 i->iov_offset = skip; \
127 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
130 size_t skip, copy, left, wanted;
131 const struct iovec *iov;
135 if (unlikely(bytes > i->count))
138 if (unlikely(!bytes))
143 skip = i->iov_offset;
144 buf = iov->iov_base + skip;
145 copy = min(bytes, iov->iov_len - skip);
147 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
148 kaddr = kmap_atomic(page);
149 from = kaddr + offset;
151 /* first chunk, usually the only one */
152 left = __copy_to_user_inatomic(buf, from, copy);
158 while (unlikely(!left && bytes)) {
161 copy = min(bytes, iov->iov_len);
162 left = __copy_to_user_inatomic(buf, from, copy);
168 if (likely(!bytes)) {
169 kunmap_atomic(kaddr);
172 offset = from - kaddr;
174 kunmap_atomic(kaddr);
175 copy = min(bytes, iov->iov_len - skip);
177 /* Too bad - revert to non-atomic kmap */
180 from = kaddr + offset;
181 left = __copy_to_user(buf, from, copy);
186 while (unlikely(!left && bytes)) {
189 copy = min(bytes, iov->iov_len);
190 left = __copy_to_user(buf, from, copy);
199 if (skip == iov->iov_len) {
203 i->count -= wanted - bytes;
204 i->nr_segs -= iov - i->iov;
206 i->iov_offset = skip;
207 return wanted - bytes;
210 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
213 size_t skip, copy, left, wanted;
214 const struct iovec *iov;
218 if (unlikely(bytes > i->count))
221 if (unlikely(!bytes))
226 skip = i->iov_offset;
227 buf = iov->iov_base + skip;
228 copy = min(bytes, iov->iov_len - skip);
230 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
231 kaddr = kmap_atomic(page);
234 /* first chunk, usually the only one */
235 left = __copy_from_user_inatomic(to, buf, copy);
241 while (unlikely(!left && bytes)) {
244 copy = min(bytes, iov->iov_len);
245 left = __copy_from_user_inatomic(to, buf, copy);
251 if (likely(!bytes)) {
252 kunmap_atomic(kaddr);
257 kunmap_atomic(kaddr);
258 copy = min(bytes, iov->iov_len - skip);
260 /* Too bad - revert to non-atomic kmap */
264 left = __copy_from_user(to, buf, copy);
269 while (unlikely(!left && bytes)) {
272 copy = min(bytes, iov->iov_len);
273 left = __copy_from_user(to, buf, copy);
282 if (skip == iov->iov_len) {
286 i->count -= wanted - bytes;
287 i->nr_segs -= iov - i->iov;
289 i->iov_offset = skip;
290 return wanted - bytes;
294 * Fault in the first iovec of the given iov_iter, to a maximum length
295 * of bytes. Returns 0 on success, or non-zero if the memory could not be
296 * accessed (ie. because it is an invalid address).
298 * writev-intensive code may want this to prefault several iovecs -- that
299 * would be possible (callers must not rely on the fact that _only_ the
300 * first iovec will be faulted with the current implementation).
302 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
304 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
305 char __user *buf = i->iov->iov_base + i->iov_offset;
306 bytes = min(bytes, i->iov->iov_len - i->iov_offset);
307 return fault_in_pages_readable(buf, bytes);
311 EXPORT_SYMBOL(iov_iter_fault_in_readable);
314 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
315 * bytes. For each iovec, fault in each page that constitutes the iovec.
317 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
318 * because it is an invalid address).
320 int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
322 size_t skip = i->iov_offset;
323 const struct iovec *iov;
327 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
328 iterate_iovec(i, bytes, v, iov, skip, ({
329 err = fault_in_multipages_readable(v.iov_base,
337 EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
339 void iov_iter_init(struct iov_iter *i, int direction,
340 const struct iovec *iov, unsigned long nr_segs,
343 /* It will get better. Eventually... */
344 if (segment_eq(get_fs(), KERNEL_DS)) {
345 direction |= ITER_KVEC;
347 i->kvec = (struct kvec *)iov;
352 i->nr_segs = nr_segs;
356 EXPORT_SYMBOL(iov_iter_init);
358 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
360 char *from = kmap_atomic(page);
361 memcpy(to, from + offset, len);
365 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
367 char *to = kmap_atomic(page);
368 memcpy(to + offset, from, len);
372 static void memzero_page(struct page *page, size_t offset, size_t len)
374 char *addr = kmap_atomic(page);
375 memset(addr + offset, 0, len);
379 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
381 const char *from = addr;
382 iterate_and_advance(i, bytes, v,
383 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
385 memcpy_to_page(v.bv_page, v.bv_offset,
386 (from += v.bv_len) - v.bv_len, v.bv_len),
387 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
392 EXPORT_SYMBOL(copy_to_iter);
394 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
397 iterate_and_advance(i, bytes, v,
398 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
400 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
401 v.bv_offset, v.bv_len),
402 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
407 EXPORT_SYMBOL(copy_from_iter);
409 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
412 iterate_and_advance(i, bytes, v,
413 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
414 v.iov_base, v.iov_len),
415 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
416 v.bv_offset, v.bv_len),
417 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
422 EXPORT_SYMBOL(copy_from_iter_nocache);
424 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
427 if (i->type & (ITER_BVEC|ITER_KVEC)) {
428 void *kaddr = kmap_atomic(page);
429 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
430 kunmap_atomic(kaddr);
433 return copy_page_to_iter_iovec(page, offset, bytes, i);
435 EXPORT_SYMBOL(copy_page_to_iter);
437 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
440 if (i->type & (ITER_BVEC|ITER_KVEC)) {
441 void *kaddr = kmap_atomic(page);
442 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
443 kunmap_atomic(kaddr);
446 return copy_page_from_iter_iovec(page, offset, bytes, i);
448 EXPORT_SYMBOL(copy_page_from_iter);
450 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
452 iterate_and_advance(i, bytes, v,
453 __clear_user(v.iov_base, v.iov_len),
454 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
455 memset(v.iov_base, 0, v.iov_len)
460 EXPORT_SYMBOL(iov_iter_zero);
462 size_t iov_iter_copy_from_user_atomic(struct page *page,
463 struct iov_iter *i, unsigned long offset, size_t bytes)
465 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
466 iterate_all_kinds(i, bytes, v,
467 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
468 v.iov_base, v.iov_len),
469 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
470 v.bv_offset, v.bv_len),
471 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
473 kunmap_atomic(kaddr);
476 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
478 void iov_iter_advance(struct iov_iter *i, size_t size)
480 iterate_and_advance(i, size, v, 0, 0, 0)
482 EXPORT_SYMBOL(iov_iter_advance);
485 * Return the count of just the current iov_iter segment.
487 size_t iov_iter_single_seg_count(const struct iov_iter *i)
491 else if (i->type & ITER_BVEC)
492 return min(i->count, i->bvec->bv_len - i->iov_offset);
494 return min(i->count, i->iov->iov_len - i->iov_offset);
496 EXPORT_SYMBOL(iov_iter_single_seg_count);
498 void iov_iter_kvec(struct iov_iter *i, int direction,
499 const struct kvec *kvec, unsigned long nr_segs,
502 BUG_ON(!(direction & ITER_KVEC));
505 i->nr_segs = nr_segs;
509 EXPORT_SYMBOL(iov_iter_kvec);
511 void iov_iter_bvec(struct iov_iter *i, int direction,
512 const struct bio_vec *bvec, unsigned long nr_segs,
515 BUG_ON(!(direction & ITER_BVEC));
518 i->nr_segs = nr_segs;
522 EXPORT_SYMBOL(iov_iter_bvec);
524 unsigned long iov_iter_alignment(const struct iov_iter *i)
526 unsigned long res = 0;
527 size_t size = i->count;
532 iterate_all_kinds(i, size, v,
533 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
534 res |= v.bv_offset | v.bv_len,
535 res |= (unsigned long)v.iov_base | v.iov_len
539 EXPORT_SYMBOL(iov_iter_alignment);
541 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
543 unsigned long res = 0;
544 size_t size = i->count;
548 iterate_all_kinds(i, size, v,
549 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
550 (size != v.iov_len ? size : 0), 0),
551 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
552 (size != v.bv_len ? size : 0)),
553 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
554 (size != v.iov_len ? size : 0))
558 EXPORT_SYMBOL(iov_iter_gap_alignment);
560 ssize_t iov_iter_get_pages(struct iov_iter *i,
561 struct page **pages, size_t maxsize, unsigned maxpages,
564 if (maxsize > i->count)
570 iterate_all_kinds(i, maxsize, v, ({
571 unsigned long addr = (unsigned long)v.iov_base;
572 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
576 if (len > maxpages * PAGE_SIZE)
577 len = maxpages * PAGE_SIZE;
578 addr &= ~(PAGE_SIZE - 1);
579 n = DIV_ROUND_UP(len, PAGE_SIZE);
580 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
581 if (unlikely(res < 0))
583 return (res == n ? len : res * PAGE_SIZE) - *start;
585 /* can't be more than PAGE_SIZE */
586 *start = v.bv_offset;
587 get_page(*pages = v.bv_page);
595 EXPORT_SYMBOL(iov_iter_get_pages);
597 static struct page **get_pages_array(size_t n)
599 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
601 p = vmalloc(n * sizeof(struct page *));
605 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
606 struct page ***pages, size_t maxsize,
611 if (maxsize > i->count)
617 iterate_all_kinds(i, maxsize, v, ({
618 unsigned long addr = (unsigned long)v.iov_base;
619 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
623 addr &= ~(PAGE_SIZE - 1);
624 n = DIV_ROUND_UP(len, PAGE_SIZE);
625 p = get_pages_array(n);
628 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
629 if (unlikely(res < 0)) {
634 return (res == n ? len : res * PAGE_SIZE) - *start;
636 /* can't be more than PAGE_SIZE */
637 *start = v.bv_offset;
638 *pages = p = get_pages_array(1);
641 get_page(*p = v.bv_page);
649 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
651 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
658 iterate_and_advance(i, bytes, v, ({
660 next = csum_and_copy_from_user(v.iov_base,
661 (to += v.iov_len) - v.iov_len,
664 sum = csum_block_add(sum, next, off);
669 char *p = kmap_atomic(v.bv_page);
670 next = csum_partial_copy_nocheck(p + v.bv_offset,
671 (to += v.bv_len) - v.bv_len,
674 sum = csum_block_add(sum, next, off);
677 next = csum_partial_copy_nocheck(v.iov_base,
678 (to += v.iov_len) - v.iov_len,
680 sum = csum_block_add(sum, next, off);
687 EXPORT_SYMBOL(csum_and_copy_from_iter);
689 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
692 const char *from = addr;
696 iterate_and_advance(i, bytes, v, ({
698 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
702 sum = csum_block_add(sum, next, off);
707 char *p = kmap_atomic(v.bv_page);
708 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
712 sum = csum_block_add(sum, next, off);
715 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
718 sum = csum_block_add(sum, next, off);
725 EXPORT_SYMBOL(csum_and_copy_to_iter);
727 int iov_iter_npages(const struct iov_iter *i, int maxpages)
729 size_t size = i->count;
735 iterate_all_kinds(i, size, v, ({
736 unsigned long p = (unsigned long)v.iov_base;
737 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
739 if (npages >= maxpages)
743 if (npages >= maxpages)
746 unsigned long p = (unsigned long)v.iov_base;
747 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
749 if (npages >= maxpages)
755 EXPORT_SYMBOL(iov_iter_npages);
757 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
760 if (new->type & ITER_BVEC)
761 return new->bvec = kmemdup(new->bvec,
762 new->nr_segs * sizeof(struct bio_vec),
765 /* iovec and kvec have identical layout */
766 return new->iov = kmemdup(new->iov,
767 new->nr_segs * sizeof(struct iovec),
770 EXPORT_SYMBOL(dup_iter);
772 int import_iovec(int type, const struct iovec __user * uvector,
773 unsigned nr_segs, unsigned fast_segs,
774 struct iovec **iov, struct iov_iter *i)
778 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
786 iov_iter_init(i, type, p, nr_segs, n);
787 *iov = p == *iov ? NULL : p;
790 EXPORT_SYMBOL(import_iovec);
793 #include <linux/compat.h>
795 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
796 unsigned nr_segs, unsigned fast_segs,
797 struct iovec **iov, struct iov_iter *i)
801 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
809 iov_iter_init(i, type, p, nr_segs, n);
810 *iov = p == *iov ? NULL : p;
815 int import_single_range(int rw, void __user *buf, size_t len,
816 struct iovec *iov, struct iov_iter *i)
818 if (len > MAX_RW_COUNT)
820 if (unlikely(!access_ok(!rw, buf, len)))
825 iov_iter_init(i, rw, iov, 1, len);
828 EXPORT_SYMBOL(import_single_range);