1 #include <linux/module.h>
3 #include <linux/uaccess.h>
5 #include <linux/hardirq.h>
6 #include <linux/highmem.h>
7 #include <linux/pagemap.h>
10 static size_t __iovec_copy_to_user(char *vaddr, const struct iovec *iov,
11 size_t base, size_t bytes, int atomic)
13 size_t copied = 0, left = 0;
16 char __user *buf = iov->iov_base + base;
17 int copy = min(bytes, iov->iov_len - base);
21 left = __copy_to_user_inatomic(buf, vaddr, copy);
23 left = __copy_to_user(buf, vaddr, copy);
36 * Copy as much as we can into the page and return the number of bytes which
37 * were sucessfully copied. If a fault is encountered then return the number of
38 * bytes which were copied.
40 static size_t ii_iovec_copy_to_user_atomic(struct page *page,
41 struct iov_iter *i, unsigned long offset, size_t bytes)
43 struct iovec *iov = (struct iovec *)i->data;
48 kaddr = kmap_atomic(page);
49 if (likely(i->nr_segs == 1)) {
51 char __user *buf = iov->iov_base + i->iov_offset;
52 left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
53 copied = bytes - left;
55 copied = __iovec_copy_to_user(kaddr + offset, iov,
56 i->iov_offset, bytes, 1);
64 * This has the same sideeffects and return value as
65 * ii_iovec_copy_to_user_atomic().
66 * The difference is that it attempts to resolve faults.
67 * Page must not be locked.
69 static size_t ii_iovec_copy_to_user(struct page *page,
70 struct iov_iter *i, unsigned long offset, size_t bytes,
73 struct iovec *iov = (struct iovec *)i->data;
79 if (generic_segment_checks(iov, &i->nr_segs, &bytes,
84 if (likely(i->nr_segs == 1)) {
86 char __user *buf = iov->iov_base + i->iov_offset;
88 * Faults on the destination of a read are common, so do it
89 * before taking the kmap.
91 if (!fault_in_pages_writeable(buf, bytes)) {
92 kaddr = kmap_atomic(page);
93 left = __copy_to_user_inatomic(buf, kaddr + offset,
100 left = copy_to_user(buf, kaddr + offset, bytes);
103 copied = bytes - left;
106 copied = __iovec_copy_to_user(kaddr + offset, iov,
107 i->iov_offset, bytes, 0);
115 * As an easily verifiable first pass, we implement all the methods that
116 * copy data to and from bvec pages with one function. We implement it
117 * all with kmap_atomic().
119 static size_t bvec_copy_tofrom_page(struct iov_iter *iter, struct page *page,
120 unsigned long page_offset, size_t bytes,
123 struct bio_vec *bvec = (struct bio_vec *)iter->data;
124 size_t bvec_offset = iter->iov_offset;
125 size_t remaining = bytes;
130 page_map = kmap_atomic(page);
132 BUG_ON(bytes > iter->count);
134 BUG_ON(bvec->bv_len == 0);
135 BUG_ON(bvec_offset >= bvec->bv_len);
136 copy = min(remaining, bvec->bv_len - bvec_offset);
137 bvec_map = kmap_atomic(bvec->bv_page);
139 memcpy(page_map + page_offset,
140 bvec_map + bvec->bv_offset + bvec_offset,
143 memcpy(bvec_map + bvec->bv_offset + bvec_offset,
144 page_map + page_offset,
146 kunmap_atomic(bvec_map);
150 if (bvec_offset == bvec->bv_len) {
156 kunmap_atomic(page_map);
161 static size_t ii_bvec_copy_to_user_atomic(struct page *page, struct iov_iter *i,
162 unsigned long offset, size_t bytes)
164 return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
166 static size_t ii_bvec_copy_to_user(struct page *page, struct iov_iter *i,
167 unsigned long offset, size_t bytes,
170 return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
172 static size_t ii_bvec_copy_from_user_atomic(struct page *page,
174 unsigned long offset, size_t bytes)
176 return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
178 static size_t ii_bvec_copy_from_user(struct page *page, struct iov_iter *i,
179 unsigned long offset, size_t bytes)
181 return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
185 * bio_vecs have a stricter structure than iovecs that might have
186 * come from userspace. There are no zero length bio_vec elements.
188 static void ii_bvec_advance(struct iov_iter *i, size_t bytes)
190 struct bio_vec *bvec = (struct bio_vec *)i->data;
191 size_t offset = i->iov_offset;
194 BUG_ON(i->count < bytes);
196 BUG_ON(bvec->bv_len == 0);
197 BUG_ON(bvec->bv_len <= offset);
198 delta = min(bytes, bvec->bv_len - offset);
202 if (offset == bvec->bv_len) {
208 i->data = (unsigned long)bvec;
209 i->iov_offset = offset;
213 * pages pointed to by bio_vecs are always pinned.
215 static int ii_bvec_fault_in_readable(struct iov_iter *i, size_t bytes)
220 static size_t ii_bvec_single_seg_count(const struct iov_iter *i)
222 const struct bio_vec *bvec = (struct bio_vec *)i->data;
226 return min(i->count, bvec->bv_len - i->iov_offset);
229 static int ii_bvec_shorten(struct iov_iter *i, size_t count)
234 struct iov_iter_ops ii_bvec_ops = {
235 .ii_copy_to_user_atomic = ii_bvec_copy_to_user_atomic,
236 .ii_copy_to_user = ii_bvec_copy_to_user,
237 .ii_copy_from_user_atomic = ii_bvec_copy_from_user_atomic,
238 .ii_copy_from_user = ii_bvec_copy_from_user,
239 .ii_advance = ii_bvec_advance,
240 .ii_fault_in_readable = ii_bvec_fault_in_readable,
241 .ii_single_seg_count = ii_bvec_single_seg_count,
242 .ii_shorten = ii_bvec_shorten,
244 EXPORT_SYMBOL(ii_bvec_ops);
245 #endif /* CONFIG_BLOCK */
247 static size_t __iovec_copy_from_user(char *vaddr, const struct iovec *iov,
248 size_t base, size_t bytes, int atomic)
250 size_t copied = 0, left = 0;
253 char __user *buf = iov->iov_base + base;
254 int copy = min(bytes, iov->iov_len - base);
258 left = __copy_from_user_inatomic(vaddr, buf, copy);
260 left = __copy_from_user(vaddr, buf, copy);
269 return copied - left;
273 * Copy as much as we can into the page and return the number of bytes which
274 * were successfully copied. If a fault is encountered then return the number
275 * of bytes which were copied.
277 static size_t ii_iovec_copy_from_user_atomic(struct page *page,
278 struct iov_iter *i, unsigned long offset, size_t bytes)
280 struct iovec *iov = (struct iovec *)i->data;
284 BUG_ON(!in_atomic());
285 kaddr = kmap_atomic(page);
286 if (likely(i->nr_segs == 1)) {
288 char __user *buf = iov->iov_base + i->iov_offset;
289 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
290 copied = bytes - left;
292 copied = __iovec_copy_from_user(kaddr + offset, iov,
293 i->iov_offset, bytes, 1);
295 kunmap_atomic(kaddr);
299 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
302 * This has the same sideeffects and return value as
303 * ii_iovec_copy_from_user_atomic().
304 * The difference is that it attempts to resolve faults.
305 * Page must not be locked.
307 static size_t ii_iovec_copy_from_user(struct page *page,
308 struct iov_iter *i, unsigned long offset, size_t bytes)
310 struct iovec *iov = (struct iovec *)i->data;
315 if (likely(i->nr_segs == 1)) {
317 char __user *buf = iov->iov_base + i->iov_offset;
318 left = __copy_from_user(kaddr + offset, buf, bytes);
319 copied = bytes - left;
321 copied = __iovec_copy_from_user(kaddr + offset, iov,
322 i->iov_offset, bytes, 0);
328 static void ii_iovec_advance(struct iov_iter *i, size_t bytes)
330 BUG_ON(i->count < bytes);
332 if (likely(i->nr_segs == 1)) {
333 i->iov_offset += bytes;
336 struct iovec *iov = (struct iovec *)i->data;
337 size_t base = i->iov_offset;
338 unsigned long nr_segs = i->nr_segs;
341 * The !iov->iov_len check ensures we skip over unlikely
342 * zero-length segments (without overruning the iovec).
344 while (bytes || unlikely(i->count && !iov->iov_len)) {
347 copy = min(bytes, iov->iov_len - base);
348 BUG_ON(!i->count || i->count < copy);
352 if (iov->iov_len == base) {
358 i->data = (unsigned long)iov;
359 i->iov_offset = base;
360 i->nr_segs = nr_segs;
365 * Fault in the first iovec of the given iov_iter, to a maximum length
366 * of bytes. Returns 0 on success, or non-zero if the memory could not be
367 * accessed (ie. because it is an invalid address).
369 * writev-intensive code may want this to prefault several iovecs -- that
370 * would be possible (callers must not rely on the fact that _only_ the
371 * first iovec will be faulted with the current implementation).
373 static int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
375 struct iovec *iov = (struct iovec *)i->data;
376 char __user *buf = iov->iov_base + i->iov_offset;
377 bytes = min(bytes, iov->iov_len - i->iov_offset);
378 return fault_in_pages_readable(buf, bytes);
382 * Return the count of just the current iov_iter segment.
384 static size_t ii_iovec_single_seg_count(const struct iov_iter *i)
386 const struct iovec *iov = (struct iovec *)i->data;
390 return min(i->count, iov->iov_len - i->iov_offset);
393 static int ii_iovec_shorten(struct iov_iter *i, size_t count)
395 struct iovec *iov = (struct iovec *)i->data;
396 i->nr_segs = iov_shorten(iov, i->nr_segs, count);
397 i->count = min(i->count, count);
401 struct iov_iter_ops ii_iovec_ops = {
402 .ii_copy_to_user_atomic = ii_iovec_copy_to_user_atomic,
403 .ii_copy_to_user = ii_iovec_copy_to_user,
404 .ii_copy_from_user_atomic = ii_iovec_copy_from_user_atomic,
405 .ii_copy_from_user = ii_iovec_copy_from_user,
406 .ii_advance = ii_iovec_advance,
407 .ii_fault_in_readable = ii_iovec_fault_in_readable,
408 .ii_single_seg_count = ii_iovec_single_seg_count,
409 .ii_shorten = ii_iovec_shorten,
411 EXPORT_SYMBOL(ii_iovec_ops);