1 #include <linux/module.h>
3 #include <linux/uaccess.h>
5 #include <linux/hardirq.h>
6 #include <linux/highmem.h>
7 #include <linux/pagemap.h>
10 static size_t __iovec_copy_to_user(char *vaddr, const struct iovec *iov,
11 size_t base, size_t bytes, int atomic)
13 size_t copied = 0, left = 0;
16 char __user *buf = iov->iov_base + base;
17 int copy = min(bytes, iov->iov_len - base);
21 left = __copy_to_user_inatomic(buf, vaddr, copy);
23 left = __copy_to_user(buf, vaddr, copy);
36 * Copy as much as we can into the page and return the number of bytes which
37 * were sucessfully copied. If a fault is encountered then return the number of
38 * bytes which were copied.
40 static size_t ii_iovec_copy_to_user_atomic(struct page *page,
41 struct iov_iter *i, unsigned long offset, size_t bytes)
43 struct iovec *iov = (struct iovec *)i->data;
48 kaddr = kmap_atomic(page);
49 if (likely(i->nr_segs == 1)) {
51 char __user *buf = iov->iov_base + i->iov_offset;
52 left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
53 copied = bytes - left;
55 copied = __iovec_copy_to_user(kaddr + offset, iov,
56 i->iov_offset, bytes, 1);
64 * This has the same sideeffects and return value as
65 * ii_iovec_copy_to_user_atomic().
66 * The difference is that it attempts to resolve faults.
67 * Page must not be locked.
69 static size_t ii_iovec_copy_to_user(struct page *page,
70 struct iov_iter *i, unsigned long offset, size_t bytes,
73 struct iovec *iov = (struct iovec *)i->data;
79 if (generic_segment_checks(iov, &i->nr_segs, &bytes,
84 if (likely(i->nr_segs == 1)) {
86 char __user *buf = iov->iov_base + i->iov_offset;
88 * Faults on the destination of a read are common, so do it
89 * before taking the kmap.
91 if (!fault_in_pages_writeable(buf, bytes)) {
92 kaddr = kmap_atomic(page);
93 left = __copy_to_user_inatomic(buf, kaddr + offset,
100 left = copy_to_user(buf, kaddr + offset, bytes);
103 copied = bytes - left;
106 copied = __iovec_copy_to_user(kaddr + offset, iov,
107 i->iov_offset, bytes, 0);
115 * As an easily verifiable first pass, we implement all the methods that
116 * copy data to and from bvec pages with one function. We implement it
117 * all with kmap_atomic().
119 static size_t bvec_copy_tofrom_page(struct iov_iter *iter, struct page *page,
120 unsigned long page_offset, size_t bytes,
123 struct bio_vec *bvec = (struct bio_vec *)iter->data;
124 size_t bvec_offset = iter->iov_offset;
125 size_t remaining = bytes;
130 page_map = kmap_atomic(page);
132 BUG_ON(bytes > iter->count);
134 BUG_ON(bvec->bv_len == 0);
135 BUG_ON(bvec_offset >= bvec->bv_len);
136 copy = min(remaining, bvec->bv_len - bvec_offset);
137 bvec_map = kmap_atomic(bvec->bv_page);
139 memcpy(page_map + page_offset,
140 bvec_map + bvec->bv_offset + bvec_offset,
143 memcpy(bvec_map + bvec->bv_offset + bvec_offset,
144 page_map + page_offset,
146 kunmap_atomic(bvec_map);
150 if (bvec_offset == bvec->bv_len) {
156 kunmap_atomic(page_map);
161 static size_t ii_bvec_copy_to_user_atomic(struct page *page, struct iov_iter *i,
162 unsigned long offset, size_t bytes)
164 return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
166 static size_t ii_bvec_copy_to_user(struct page *page, struct iov_iter *i,
167 unsigned long offset, size_t bytes,
170 return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
172 static size_t ii_bvec_copy_from_user_atomic(struct page *page,
174 unsigned long offset, size_t bytes)
176 return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
178 static size_t ii_bvec_copy_from_user(struct page *page, struct iov_iter *i,
179 unsigned long offset, size_t bytes)
181 return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
185 * bio_vecs have a stricter structure than iovecs that might have
186 * come from userspace. There are no zero length bio_vec elements.
188 static void ii_bvec_advance(struct iov_iter *i, size_t bytes)
190 struct bio_vec *bvec = (struct bio_vec *)i->data;
191 size_t offset = i->iov_offset;
194 BUG_ON(i->count < bytes);
196 BUG_ON(bvec->bv_len == 0);
197 BUG_ON(bvec->bv_len <= offset);
198 delta = min(bytes, bvec->bv_len - offset);
202 if (offset == bvec->bv_len) {
208 i->data = (unsigned long)bvec;
209 i->iov_offset = offset;
213 * pages pointed to by bio_vecs are always pinned.
215 static int ii_bvec_fault_in_readable(struct iov_iter *i, size_t bytes)
220 static size_t ii_bvec_single_seg_count(const struct iov_iter *i)
222 const struct bio_vec *bvec = (struct bio_vec *)i->data;
226 return min(i->count, bvec->bv_len - i->iov_offset);
229 struct iov_iter_ops ii_bvec_ops = {
230 .ii_copy_to_user_atomic = ii_bvec_copy_to_user_atomic,
231 .ii_copy_to_user = ii_bvec_copy_to_user,
232 .ii_copy_from_user_atomic = ii_bvec_copy_from_user_atomic,
233 .ii_copy_from_user = ii_bvec_copy_from_user,
234 .ii_advance = ii_bvec_advance,
235 .ii_fault_in_readable = ii_bvec_fault_in_readable,
236 .ii_single_seg_count = ii_bvec_single_seg_count,
238 EXPORT_SYMBOL(ii_bvec_ops);
239 #endif /* CONFIG_BLOCK */
241 static size_t __iovec_copy_from_user(char *vaddr, const struct iovec *iov,
242 size_t base, size_t bytes, int atomic)
244 size_t copied = 0, left = 0;
247 char __user *buf = iov->iov_base + base;
248 int copy = min(bytes, iov->iov_len - base);
252 left = __copy_from_user_inatomic(vaddr, buf, copy);
254 left = __copy_from_user(vaddr, buf, copy);
263 return copied - left;
267 * Copy as much as we can into the page and return the number of bytes which
268 * were successfully copied. If a fault is encountered then return the number
269 * of bytes which were copied.
271 static size_t ii_iovec_copy_from_user_atomic(struct page *page,
272 struct iov_iter *i, unsigned long offset, size_t bytes)
274 struct iovec *iov = (struct iovec *)i->data;
278 BUG_ON(!in_atomic());
279 kaddr = kmap_atomic(page);
280 if (likely(i->nr_segs == 1)) {
282 char __user *buf = iov->iov_base + i->iov_offset;
283 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
284 copied = bytes - left;
286 copied = __iovec_copy_from_user(kaddr + offset, iov,
287 i->iov_offset, bytes, 1);
289 kunmap_atomic(kaddr);
293 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
296 * This has the same sideeffects and return value as
297 * ii_iovec_copy_from_user_atomic().
298 * The difference is that it attempts to resolve faults.
299 * Page must not be locked.
301 static size_t ii_iovec_copy_from_user(struct page *page,
302 struct iov_iter *i, unsigned long offset, size_t bytes)
304 struct iovec *iov = (struct iovec *)i->data;
309 if (likely(i->nr_segs == 1)) {
311 char __user *buf = iov->iov_base + i->iov_offset;
312 left = __copy_from_user(kaddr + offset, buf, bytes);
313 copied = bytes - left;
315 copied = __iovec_copy_from_user(kaddr + offset, iov,
316 i->iov_offset, bytes, 0);
322 static void ii_iovec_advance(struct iov_iter *i, size_t bytes)
324 BUG_ON(i->count < bytes);
326 if (likely(i->nr_segs == 1)) {
327 i->iov_offset += bytes;
330 struct iovec *iov = (struct iovec *)i->data;
331 size_t base = i->iov_offset;
332 unsigned long nr_segs = i->nr_segs;
335 * The !iov->iov_len check ensures we skip over unlikely
336 * zero-length segments (without overruning the iovec).
338 while (bytes || unlikely(i->count && !iov->iov_len)) {
341 copy = min(bytes, iov->iov_len - base);
342 BUG_ON(!i->count || i->count < copy);
346 if (iov->iov_len == base) {
352 i->data = (unsigned long)iov;
353 i->iov_offset = base;
354 i->nr_segs = nr_segs;
359 * Fault in the first iovec of the given iov_iter, to a maximum length
360 * of bytes. Returns 0 on success, or non-zero if the memory could not be
361 * accessed (ie. because it is an invalid address).
363 * writev-intensive code may want this to prefault several iovecs -- that
364 * would be possible (callers must not rely on the fact that _only_ the
365 * first iovec will be faulted with the current implementation).
367 static int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
369 struct iovec *iov = (struct iovec *)i->data;
370 char __user *buf = iov->iov_base + i->iov_offset;
371 bytes = min(bytes, iov->iov_len - i->iov_offset);
372 return fault_in_pages_readable(buf, bytes);
376 * Return the count of just the current iov_iter segment.
378 static size_t ii_iovec_single_seg_count(const struct iov_iter *i)
380 const struct iovec *iov = (struct iovec *)i->data;
384 return min(i->count, iov->iov_len - i->iov_offset);
387 struct iov_iter_ops ii_iovec_ops = {
388 .ii_copy_to_user_atomic = ii_iovec_copy_to_user_atomic,
389 .ii_copy_to_user = ii_iovec_copy_to_user,
390 .ii_copy_from_user_atomic = ii_iovec_copy_from_user_atomic,
391 .ii_copy_from_user = ii_iovec_copy_from_user,
392 .ii_advance = ii_iovec_advance,
393 .ii_fault_in_readable = ii_iovec_fault_in_readable,
394 .ii_single_seg_count = ii_iovec_single_seg_count,
396 EXPORT_SYMBOL(ii_iovec_ops);