1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
10 #include <linux/ceph/libceph.h>
13 * build a vector of user pages
15 struct page **ceph_get_direct_page_vector(const char __user *data,
17 loff_t off, size_t len)
22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
24 return ERR_PTR(-ENOMEM);
26 down_read(¤t->mm->mmap_sem);
27 rc = get_user_pages(current, current->mm, (unsigned long)data,
28 num_pages, 0, 0, pages, NULL);
29 up_read(¤t->mm->mmap_sem);
38 EXPORT_SYMBOL(ceph_get_direct_page_vector);
40 void ceph_put_page_vector(struct page **pages, int num_pages)
44 for (i = 0; i < num_pages; i++)
48 EXPORT_SYMBOL(ceph_put_page_vector);
50 void ceph_release_page_vector(struct page **pages, int num_pages)
54 for (i = 0; i < num_pages; i++)
55 __free_pages(pages[i], 0);
58 EXPORT_SYMBOL(ceph_release_page_vector);
61 * allocate a vector new pages
63 struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
68 pages = kmalloc(sizeof(*pages) * num_pages, flags);
70 return ERR_PTR(-ENOMEM);
71 for (i = 0; i < num_pages; i++) {
72 pages[i] = __page_cache_alloc(flags);
73 if (pages[i] == NULL) {
74 ceph_release_page_vector(pages, i);
75 return ERR_PTR(-ENOMEM);
80 EXPORT_SYMBOL(ceph_alloc_page_vector);
83 * copy user data into a page vector
85 int ceph_copy_user_to_page_vector(struct page **pages,
86 const char __user *data,
87 loff_t off, size_t len)
90 int po = off & ~PAGE_CACHE_MASK;
95 l = min_t(int, PAGE_CACHE_SIZE-po, left);
96 bad = copy_from_user(page_address(pages[i]) + po, data, l);
102 if (po == PAGE_CACHE_SIZE) {
109 EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
111 int ceph_copy_to_page_vector(struct page **pages,
113 loff_t off, size_t len)
116 size_t po = off & ~PAGE_CACHE_MASK;
121 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
122 memcpy(page_address(pages[i]) + po, data, l);
126 if (po == PAGE_CACHE_SIZE) {
133 EXPORT_SYMBOL(ceph_copy_to_page_vector);
135 int ceph_copy_from_page_vector(struct page **pages,
137 loff_t off, size_t len)
140 size_t po = off & ~PAGE_CACHE_MASK;
145 l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
146 memcpy(data, page_address(pages[i]) + po, l);
150 if (po == PAGE_CACHE_SIZE) {
157 EXPORT_SYMBOL(ceph_copy_from_page_vector);
160 * copy user data from a page vector into a user pointer
162 int ceph_copy_page_vector_to_user(struct page **pages,
164 loff_t off, size_t len)
167 int po = off & ~PAGE_CACHE_MASK;
172 l = min_t(int, left, PAGE_CACHE_SIZE-po);
173 bad = copy_to_user(data, page_address(pages[i]) + po, l);
180 if (po == PAGE_CACHE_SIZE)
187 EXPORT_SYMBOL(ceph_copy_page_vector_to_user);
190 * Zero an extent within a page vector. Offset is relative to the
191 * start of the first page.
193 void ceph_zero_page_vector_range(int off, int len, struct page **pages)
195 int i = off >> PAGE_CACHE_SHIFT;
197 off &= ~PAGE_CACHE_MASK;
199 dout("zero_page_vector_page %u~%u\n", off, len);
201 /* leading partial page? */
203 int end = min((int)PAGE_CACHE_SIZE, off + len);
204 dout("zeroing %d %p head from %d\n", i, pages[i],
206 zero_user_segment(pages[i], off, end);
210 while (len >= PAGE_CACHE_SIZE) {
211 dout("zeroing %d %p len=%d\n", i, pages[i], len);
212 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
213 len -= PAGE_CACHE_SIZE;
216 /* trailing partial page? */
218 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
219 zero_user_segment(pages[i], 0, len);
222 EXPORT_SYMBOL(ceph_zero_page_vector_range);