if (atomic)
left = __copy_to_user_inatomic(buf, vaddr, copy);
else
- left = copy_to_user(buf, vaddr, copy);
+ left = __copy_to_user(buf, vaddr, copy);
copied += copy;
bytes -= copy;
vaddr += copy;
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
-size_t iov_iter_copy_to_user(struct page *page,
+size_t __iov_iter_copy_to_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes)
{
char *kaddr;
kunmap(page);
return copied;
}
+EXPORT_SYMBOL(__iov_iter_copy_to_user);
+
+size_t iov_iter_copy_to_user(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+ might_sleep();
+ if (generic_segment_checks(i->iov, &i->nr_segs, &bytes, VERIFY_WRITE))
+ return 0;
+ return __iov_iter_copy_to_user(page, i, offset, bytes);
+}
EXPORT_SYMBOL(iov_iter_copy_to_user);
static size_t __iovec_copy_from_user(char *vaddr, const struct iovec *iov,
size_t count;
};
-size_t iov_iter_copy_to_user_atomic(struct page *page,
+size_t __iov_iter_copy_to_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t __iov_iter_copy_to_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
size_t iov_iter_copy_to_user(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);