1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/mount.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 #include <linux/falloc.h>
13 #include "mds_client.h"
17 * Ceph file operations
19 * Implement basic open/close functionality, and implement
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
38 * Calculate the length sum of direct io vectors that can
39 * be combined into one page vector.
41 static size_t dio_get_pagev_size(const struct iov_iter *it)
43 const struct iovec *iov = it->iov;
44 const struct iovec *iovend = iov + it->nr_segs;
47 size = iov->iov_len - it->iov_offset;
49 * An iov can be page vectored when both the current tail
50 * and the next base are page aligned.
52 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
56 dout("dio_get_pagevlen len = %zu\n", size);
61 * Allocate a page vector based on (@it, @nbytes).
62 * The return value is the tuple describing a page vector,
63 * that is (@pages, @page_align, @num_pages).
66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 size_t *page_align, int *num_pages)
69 struct iov_iter tmp_it = *it;
72 int ret = 0, idx, npages;
74 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
76 npages = calc_pages_for(align, nbytes);
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
79 pages = vmalloc(sizeof(*pages) * npages);
81 return ERR_PTR(-ENOMEM);
84 for (idx = 0; idx < npages; ) {
86 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 npages - idx, &start);
91 iov_iter_advance(&tmp_it, ret);
93 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
102 ceph_put_page_vector(pages, idx, false);
107 * Prepare an open request. Preallocate ceph_cap to avoid an
108 * inopportune ENOMEM later.
110 static struct ceph_mds_request *
111 prepare_open_request(struct super_block *sb, int flags, int create_mode)
113 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 struct ceph_mds_client *mdsc = fsc->mdsc;
115 struct ceph_mds_request *req;
116 int want_auth = USE_ANY_MDS;
117 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
119 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 want_auth = USE_AUTH_MDS;
122 req = ceph_mdsc_create_request(mdsc, op, want_auth);
125 req->r_fmode = ceph_flags_to_mode(flags);
126 req->r_args.open.flags = cpu_to_le32(flags);
127 req->r_args.open.mode = cpu_to_le32(create_mode);
133 * initialize private struct file data.
134 * if we fail, clean up by dropping fmode reference on the ceph_inode
136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
138 struct ceph_file_info *cf;
140 struct ceph_inode_info *ci = ceph_inode(inode);
141 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 struct ceph_mds_client *mdsc = fsc->mdsc;
144 switch (inode->i_mode & S_IFMT) {
146 /* First file open request creates the cookie, we want to keep
147 * this cookie around for the filetime of the inode as not to
148 * have to worry about fscache register / revoke / operation
151 * Also, if we know the operation is going to invalidate data
152 * (non readonly) just nuke the cache right away.
154 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 if ((fmode & CEPH_FILE_MODE_WR))
156 ceph_fscache_invalidate(inode);
158 dout("init_file %p %p 0%o (regular)\n", inode, file,
160 cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO);
162 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
167 cf->readdir_cache_idx = -1;
168 file->private_data = cf;
169 BUG_ON(inode->i_fop->release != ceph_release);
173 dout("init_file %p %p 0%o (symlink)\n", inode, file,
175 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
179 dout("init_file %p %p 0%o (special)\n", inode, file,
182 * we need to drop the open ref now, since we don't
183 * have .release set to ceph_release.
185 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
186 BUG_ON(inode->i_fop->release == ceph_release);
188 /* call the proper open fop */
189 ret = inode->i_fop->open(inode, file);
195 * If we already have the requisite capabilities, we can satisfy
196 * the open request locally (no need to request new caps from the
197 * MDS). We do, however, need to inform the MDS (asynchronously)
198 * if our wanted caps set expands.
200 int ceph_open(struct inode *inode, struct file *file)
202 struct ceph_inode_info *ci = ceph_inode(inode);
203 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
204 struct ceph_mds_client *mdsc = fsc->mdsc;
205 struct ceph_mds_request *req;
206 struct ceph_file_info *cf = file->private_data;
208 int flags, fmode, wanted;
211 dout("open file %p is already opened\n", file);
215 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
216 flags = file->f_flags & ~(O_CREAT|O_EXCL);
217 if (S_ISDIR(inode->i_mode))
218 flags = O_DIRECTORY; /* mds likes to know */
220 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
221 ceph_vinop(inode), file, flags, file->f_flags);
222 fmode = ceph_flags_to_mode(flags);
223 wanted = ceph_caps_for_mode(fmode);
225 /* snapped files are read-only */
226 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
229 /* trivially open snapdir */
230 if (ceph_snap(inode) == CEPH_SNAPDIR) {
231 spin_lock(&ci->i_ceph_lock);
232 __ceph_get_fmode(ci, fmode);
233 spin_unlock(&ci->i_ceph_lock);
234 return ceph_init_file(inode, file, fmode);
238 * No need to block if we have caps on the auth MDS (for
239 * write) or any MDS (for read). Update wanted set
242 spin_lock(&ci->i_ceph_lock);
243 if (__ceph_is_any_real_caps(ci) &&
244 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
245 int mds_wanted = __ceph_caps_mds_wanted(ci);
246 int issued = __ceph_caps_issued(ci, NULL);
248 dout("open %p fmode %d want %s issued %s using existing\n",
249 inode, fmode, ceph_cap_string(wanted),
250 ceph_cap_string(issued));
251 __ceph_get_fmode(ci, fmode);
252 spin_unlock(&ci->i_ceph_lock);
255 if ((issued & wanted) != wanted &&
256 (mds_wanted & wanted) != wanted &&
257 ceph_snap(inode) != CEPH_SNAPDIR)
258 ceph_check_caps(ci, 0, NULL);
260 return ceph_init_file(inode, file, fmode);
261 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
262 (ci->i_snap_caps & wanted) == wanted) {
263 __ceph_get_fmode(ci, fmode);
264 spin_unlock(&ci->i_ceph_lock);
265 return ceph_init_file(inode, file, fmode);
268 spin_unlock(&ci->i_ceph_lock);
270 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
271 req = prepare_open_request(inode->i_sb, flags, 0);
276 req->r_inode = inode;
280 err = ceph_mdsc_do_request(mdsc, NULL, req);
282 err = ceph_init_file(inode, file, req->r_fmode);
283 ceph_mdsc_put_request(req);
284 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
291 * Do a lookup + open with a single request. If we get a non-existent
292 * file or symlink, return 1 so the VFS can retry.
294 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
295 struct file *file, unsigned flags, umode_t mode,
298 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
299 struct ceph_mds_client *mdsc = fsc->mdsc;
300 struct ceph_mds_request *req;
302 struct ceph_acls_info acls = {};
305 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
307 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
309 if (dentry->d_name.len > NAME_MAX)
310 return -ENAMETOOLONG;
312 err = ceph_init_dentry(dentry);
316 if (flags & O_CREAT) {
317 err = ceph_pre_init_acls(dir, &mode, &acls);
323 req = prepare_open_request(dir->i_sb, flags, mode);
328 req->r_dentry = dget(dentry);
330 if (flags & O_CREAT) {
331 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
332 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
334 req->r_pagelist = acls.pagelist;
335 acls.pagelist = NULL;
338 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
339 err = ceph_mdsc_do_request(mdsc,
340 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
342 err = ceph_handle_snapdir(req, dentry, err);
346 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
347 err = ceph_handle_notrace_create(dir, dentry);
349 if (d_unhashed(dentry)) {
350 dn = ceph_finish_lookup(req, dentry, err);
354 /* we were given a hashed negative dentry */
359 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
360 /* make vfs retry on splice, ENOENT, or symlink */
361 dout("atomic_open finish_no_open on dn %p\n", dn);
362 err = finish_no_open(file, dn);
364 dout("atomic_open finish_open on dn %p\n", dn);
365 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
366 ceph_init_inode_acls(d_inode(dentry), &acls);
367 *opened |= FILE_CREATED;
369 err = finish_open(file, dentry, ceph_open, opened);
372 if (!req->r_err && req->r_target_inode)
373 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
374 ceph_mdsc_put_request(req);
376 ceph_release_acls_info(&acls);
377 dout("atomic_open result=%d\n", err);
381 int ceph_release(struct inode *inode, struct file *file)
383 struct ceph_inode_info *ci = ceph_inode(inode);
384 struct ceph_file_info *cf = file->private_data;
386 dout("release inode %p file %p\n", inode, file);
387 ceph_put_fmode(ci, cf->fmode);
388 if (cf->last_readdir)
389 ceph_mdsc_put_request(cf->last_readdir);
390 kfree(cf->last_name);
392 kmem_cache_free(ceph_file_cachep, cf);
394 /* wake up anyone waiting for caps on this inode */
395 wake_up_all(&ci->i_cap_wq);
405 * Read a range of bytes striped over one or more objects. Iterate over
406 * objects we stripe over. (That's not atomic, but good enough for now.)
408 * If we get a short result from the OSD, check against i_size; we need to
409 * only return a short read to the caller if we hit EOF.
411 static int striped_read(struct inode *inode,
413 struct page **pages, int num_pages,
414 int *checkeof, bool o_direct,
415 unsigned long buf_align)
417 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
418 struct ceph_inode_info *ci = ceph_inode(inode);
419 u64 pos, this_len, left;
420 int io_align, page_align;
423 struct page **page_pos;
425 bool hit_stripe, was_short;
428 * we may need to do multiple reads. not atomic, unfortunately.
433 pages_left = num_pages;
435 io_align = off & ~PAGE_MASK;
439 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
441 page_align = pos & ~PAGE_MASK;
443 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
444 &ci->i_layout, pos, &this_len,
447 page_pos, pages_left, page_align);
450 hit_stripe = this_len < left;
451 was_short = ret >= 0 && ret < this_len;
452 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
453 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
457 if (was_short && (pos + ret < inode->i_size)) {
458 int zlen = min(this_len - ret,
459 inode->i_size - pos - ret);
460 int zoff = (o_direct ? buf_align : io_align) +
462 dout(" zero gap %llu to %llu\n",
463 pos + ret, pos + ret + zlen);
464 ceph_zero_page_vector_range(zoff, zlen, pages);
468 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
472 page_pos += didpages;
473 pages_left -= didpages;
475 /* hit stripe and need continue*/
476 if (left && hit_stripe && pos < inode->i_size)
482 /* did we bounce off eof? */
483 if (pos + left > inode->i_size)
484 *checkeof = CHECK_EOF;
487 dout("striped_read returns %d\n", ret);
492 * Completely synchronous read and write methods. Direct from __user
493 * buffer to osd, or directly to user pages (if O_DIRECT).
495 * If the read spans object boundary, just do multiple reads.
497 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
500 struct file *file = iocb->ki_filp;
501 struct inode *inode = file_inode(file);
503 u64 off = iocb->ki_pos;
505 size_t len = iov_iter_count(i);
507 dout("sync_read on file %p %llu~%u %s\n", file, off,
509 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
514 * flush any page cache pages in this range. this
515 * will make concurrent normal and sync io slow,
516 * but it will at least behave sensibly when they are
519 ret = filemap_write_and_wait_range(inode->i_mapping, off,
524 if (iocb->ki_flags & IOCB_DIRECT) {
525 while (iov_iter_count(i)) {
529 n = dio_get_pagev_size(i);
530 pages = dio_get_pages_alloc(i, n, &start, &num_pages);
532 return PTR_ERR(pages);
534 ret = striped_read(inode, off, n,
535 pages, num_pages, checkeof,
538 ceph_put_page_vector(pages, num_pages, true);
543 iov_iter_advance(i, ret);
548 num_pages = calc_pages_for(off, len);
549 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
551 return PTR_ERR(pages);
552 ret = striped_read(inode, off, len, pages,
553 num_pages, checkeof, 0, 0);
559 size_t page_off = off & ~PAGE_MASK;
560 size_t copy = min_t(size_t,
561 PAGE_SIZE - page_off, left);
562 l = copy_page_to_iter(pages[k++], page_off,
570 ceph_release_page_vector(pages, num_pages);
573 if (off > iocb->ki_pos) {
574 ret = off - iocb->ki_pos;
578 dout("sync_read result %d\n", ret);
583 * Write commit request unsafe callback, called to tell us when a
584 * request is unsafe (that is, in flight--has been handed to the
585 * messenger to send to its target osd). It is called again when
586 * we've received a response message indicating the request is
587 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
588 * is completed early (and unsuccessfully) due to a timeout or
591 * This is used if we requested both an ACK and ONDISK commit reply
594 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
596 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
598 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
601 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
602 spin_lock(&ci->i_unsafe_lock);
603 list_add_tail(&req->r_unsafe_item,
604 &ci->i_unsafe_writes);
605 spin_unlock(&ci->i_unsafe_lock);
607 spin_lock(&ci->i_unsafe_lock);
608 list_del_init(&req->r_unsafe_item);
609 spin_unlock(&ci->i_unsafe_lock);
610 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
616 * Synchronous write, straight from __user pointer or user pages.
618 * If write spans object boundary, just do multiple writes. (For a
619 * correct atomic write, we should e.g. take write locks on all
620 * objects, rollback on failure, etc.)
623 ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
624 struct ceph_snap_context *snapc)
626 struct file *file = iocb->ki_filp;
627 struct inode *inode = file_inode(file);
628 struct ceph_inode_info *ci = ceph_inode(inode);
629 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
630 struct ceph_vino vino;
631 struct ceph_osd_request *req;
638 struct timespec mtime = CURRENT_TIME;
639 size_t count = iov_iter_count(from);
641 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
644 dout("sync_direct_write on file %p %lld~%u\n", file, pos,
647 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
651 ret = invalidate_inode_pages2_range(inode->i_mapping,
652 pos >> PAGE_CACHE_SHIFT,
653 (pos + count) >> PAGE_CACHE_SHIFT);
655 dout("invalidate_inode_pages2_range returned %d\n", ret);
657 flags = CEPH_OSD_FLAG_ORDERSNAP |
658 CEPH_OSD_FLAG_ONDISK |
661 while (iov_iter_count(from) > 0) {
662 u64 len = dio_get_pagev_size(from);
666 vino = ceph_vino(inode);
667 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
669 2,/*include a 'startsync' command*/
670 CEPH_OSD_OP_WRITE, flags, snapc,
679 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
682 pages = dio_get_pages_alloc(from, len, &start, &num_pages);
684 ceph_osdc_put_request(req);
685 ret = PTR_ERR(pages);
690 * throw out any page cache pages in this range. this
693 truncate_inode_pages_range(inode->i_mapping, pos,
694 (pos+n) | (PAGE_CACHE_SIZE-1));
695 osd_req_op_extent_osd_data_pages(req, 0, pages, n, start,
698 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
699 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
701 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
703 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
705 ceph_put_page_vector(pages, num_pages, false);
707 ceph_osdc_put_request(req);
712 iov_iter_advance(from, n);
714 if (pos > i_size_read(inode)) {
715 check_caps = ceph_inode_set_size(inode, pos);
717 ceph_check_caps(ceph_inode(inode),
723 if (ret != -EOLDSNAPC && written > 0) {
732 * Synchronous write, straight from __user pointer or user pages.
734 * If write spans object boundary, just do multiple writes. (For a
735 * correct atomic write, we should e.g. take write locks on all
736 * objects, rollback on failure, etc.)
739 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
740 struct ceph_snap_context *snapc)
742 struct file *file = iocb->ki_filp;
743 struct inode *inode = file_inode(file);
744 struct ceph_inode_info *ci = ceph_inode(inode);
745 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
746 struct ceph_vino vino;
747 struct ceph_osd_request *req;
755 struct timespec mtime = CURRENT_TIME;
756 size_t count = iov_iter_count(from);
758 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
761 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
763 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
767 ret = invalidate_inode_pages2_range(inode->i_mapping,
768 pos >> PAGE_CACHE_SHIFT,
769 (pos + count) >> PAGE_CACHE_SHIFT);
771 dout("invalidate_inode_pages2_range returned %d\n", ret);
773 flags = CEPH_OSD_FLAG_ORDERSNAP |
774 CEPH_OSD_FLAG_ONDISK |
775 CEPH_OSD_FLAG_WRITE |
778 while ((len = iov_iter_count(from)) > 0) {
782 vino = ceph_vino(inode);
783 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
784 vino, pos, &len, 0, 1,
785 CEPH_OSD_OP_WRITE, flags, snapc,
795 * write from beginning of first page,
796 * regardless of io alignment
798 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
800 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
802 ret = PTR_ERR(pages);
807 for (n = 0; n < num_pages; n++) {
808 size_t plen = min_t(size_t, left, PAGE_SIZE);
809 ret = copy_page_from_iter(pages[n], 0, plen, from);
818 ceph_release_page_vector(pages, num_pages);
822 /* get a second commit callback */
823 req->r_unsafe_callback = ceph_sync_write_unsafe;
824 req->r_inode = inode;
826 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
829 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
830 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
832 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
834 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
837 ceph_osdc_put_request(req);
842 if (pos > i_size_read(inode)) {
843 check_caps = ceph_inode_set_size(inode, pos);
845 ceph_check_caps(ceph_inode(inode),
853 if (ret != -EOLDSNAPC && written > 0) {
861 * Wrap generic_file_aio_read with checks for cap bits on the inode.
862 * Atomically grab references, so that those bits are not released
863 * back to the MDS mid-read.
865 * Hmm, the sync read case isn't actually async... should it be?
867 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
869 struct file *filp = iocb->ki_filp;
870 struct ceph_file_info *fi = filp->private_data;
871 size_t len = iov_iter_count(to);
872 struct inode *inode = file_inode(filp);
873 struct ceph_inode_info *ci = ceph_inode(inode);
874 struct page *pinned_page = NULL;
877 int retry_op = 0, read = 0;
880 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
881 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
883 if (fi->fmode & CEPH_FILE_MODE_LAZY)
884 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
886 want = CEPH_CAP_FILE_CACHE;
887 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
891 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
892 (iocb->ki_flags & IOCB_DIRECT) ||
893 (fi->flags & CEPH_F_SYNC)) {
895 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
896 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
897 ceph_cap_string(got));
899 if (ci->i_inline_version == CEPH_INLINE_NONE) {
900 /* hmm, this isn't really async... */
901 ret = ceph_sync_read(iocb, to, &retry_op);
903 retry_op = READ_INLINE;
906 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
907 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
908 ceph_cap_string(got));
910 ret = generic_file_read_iter(iocb, to);
912 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
913 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
915 page_cache_release(pinned_page);
918 ceph_put_cap_refs(ci, got);
919 if (retry_op && ret >= 0) {
921 struct page *page = NULL;
923 if (retry_op == READ_INLINE) {
924 page = __page_cache_alloc(GFP_KERNEL);
929 statret = __ceph_do_getattr(inode, page,
930 CEPH_STAT_CAP_INLINE_DATA, !!page);
933 if (statret == -ENODATA) {
934 BUG_ON(retry_op != READ_INLINE);
940 i_size = i_size_read(inode);
941 if (retry_op == READ_INLINE) {
942 BUG_ON(ret > 0 || read > 0);
943 if (iocb->ki_pos < i_size &&
944 iocb->ki_pos < PAGE_CACHE_SIZE) {
945 loff_t end = min_t(loff_t, i_size,
947 end = min_t(loff_t, end, PAGE_CACHE_SIZE);
949 zero_user_segment(page, statret, end);
950 ret = copy_page_to_iter(page,
951 iocb->ki_pos & ~PAGE_MASK,
952 end - iocb->ki_pos, to);
956 if (iocb->ki_pos < i_size && read < len) {
957 size_t zlen = min_t(size_t, len - read,
958 i_size - iocb->ki_pos);
959 ret = iov_iter_zero(zlen, to);
963 __free_pages(page, 0);
967 /* hit EOF or hole? */
968 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
970 dout("sync_read hit hole, ppos %lld < size %lld"
971 ", reading more\n", iocb->ki_pos,
988 * Take cap references to avoid releasing caps to MDS mid-write.
990 * If we are synchronous, and write with an old snap context, the OSD
991 * may return EOLDSNAPC. In that case, retry the write.. _after_
992 * dropping our cap refs and allowing the pending snap to logically
993 * complete _before_ this write occurs.
995 * If we are near ENOSPC, write synchronously.
997 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
999 struct file *file = iocb->ki_filp;
1000 struct ceph_file_info *fi = file->private_data;
1001 struct inode *inode = file_inode(file);
1002 struct ceph_inode_info *ci = ceph_inode(inode);
1003 struct ceph_osd_client *osdc =
1004 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1005 struct ceph_cap_flush *prealloc_cf;
1006 ssize_t count, written = 0;
1010 if (ceph_snap(inode) != CEPH_NOSNAP)
1013 prealloc_cf = ceph_alloc_cap_flush();
1019 /* We can write back this queue in page reclaim */
1020 current->backing_dev_info = inode_to_bdi(inode);
1022 if (iocb->ki_flags & IOCB_APPEND) {
1023 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1028 err = generic_write_checks(iocb, from);
1033 count = iov_iter_count(from);
1034 err = file_remove_privs(file);
1038 err = file_update_time(file);
1042 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1043 err = ceph_uninline_data(file, NULL);
1049 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
1054 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1055 inode, ceph_vinop(inode), pos, count, inode->i_size);
1056 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1057 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1059 want = CEPH_CAP_FILE_BUFFER;
1061 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1066 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1067 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1069 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1070 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1071 struct ceph_snap_context *snapc;
1072 struct iov_iter data;
1073 inode_unlock(inode);
1075 spin_lock(&ci->i_ceph_lock);
1076 if (__ceph_have_pending_cap_snap(ci)) {
1077 struct ceph_cap_snap *capsnap =
1078 list_last_entry(&ci->i_cap_snaps,
1079 struct ceph_cap_snap,
1081 snapc = ceph_get_snap_context(capsnap->context);
1083 BUG_ON(!ci->i_head_snapc);
1084 snapc = ceph_get_snap_context(ci->i_head_snapc);
1086 spin_unlock(&ci->i_ceph_lock);
1088 /* we might need to revert back to that point */
1090 if (iocb->ki_flags & IOCB_DIRECT)
1091 written = ceph_sync_direct_write(iocb, &data, pos,
1094 written = ceph_sync_write(iocb, &data, pos, snapc);
1095 if (written == -EOLDSNAPC) {
1096 dout("aio_write %p %llx.%llx %llu~%u"
1097 "got EOLDSNAPC, retrying\n",
1098 inode, ceph_vinop(inode),
1099 pos, (unsigned)count);
1104 iov_iter_advance(from, written);
1105 ceph_put_snap_context(snapc);
1107 loff_t old_size = inode->i_size;
1109 * No need to acquire the i_truncate_mutex. Because
1110 * the MDS revokes Fwb caps before sending truncate
1111 * message to us. We can't get Fwb cap while there
1112 * are pending vmtruncate. So write and vmtruncate
1113 * can not run at the same time
1115 written = generic_perform_write(file, from, pos);
1116 if (likely(written >= 0))
1117 iocb->ki_pos = pos + written;
1118 if (inode->i_size > old_size)
1119 ceph_fscache_update_objectsize(inode);
1120 inode_unlock(inode);
1125 spin_lock(&ci->i_ceph_lock);
1126 ci->i_inline_version = CEPH_INLINE_NONE;
1127 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1129 spin_unlock(&ci->i_ceph_lock);
1131 __mark_inode_dirty(inode, dirty);
1134 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1135 inode, ceph_vinop(inode), pos, (unsigned)count,
1136 ceph_cap_string(got));
1137 ceph_put_cap_refs(ci, got);
1140 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
1141 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
1142 err = vfs_fsync_range(file, pos, pos + written - 1, 1);
1150 inode_unlock(inode);
1152 ceph_free_cap_flush(prealloc_cf);
1153 current->backing_dev_info = NULL;
1154 return written ? written : err;
1158 * llseek. be sure to verify file size on SEEK_END.
1160 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1162 struct inode *inode = file->f_mapping->host;
1167 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1168 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1177 offset += inode->i_size;
1181 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1182 * position-querying operation. Avoid rewriting the "same"
1183 * f_pos value back to the file because a concurrent read(),
1184 * write() or lseek() might have altered it
1187 offset = file->f_pos;
1190 offset += file->f_pos;
1193 if (offset >= inode->i_size) {
1199 if (offset >= inode->i_size) {
1203 offset = inode->i_size;
1207 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1210 inode_unlock(inode);
1214 static inline void ceph_zero_partial_page(
1215 struct inode *inode, loff_t offset, unsigned size)
1218 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
1220 page = find_lock_page(inode->i_mapping, index);
1222 wait_on_page_writeback(page);
1223 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
1225 page_cache_release(page);
1229 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1232 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
1233 if (offset < nearly) {
1234 loff_t size = nearly - offset;
1237 ceph_zero_partial_page(inode, offset, size);
1241 if (length >= PAGE_CACHE_SIZE) {
1242 loff_t size = round_down(length, PAGE_CACHE_SIZE);
1243 truncate_pagecache_range(inode, offset, offset + size - 1);
1248 ceph_zero_partial_page(inode, offset, length);
1251 static int ceph_zero_partial_object(struct inode *inode,
1252 loff_t offset, loff_t *length)
1254 struct ceph_inode_info *ci = ceph_inode(inode);
1255 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1256 struct ceph_osd_request *req;
1262 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1265 op = CEPH_OSD_OP_ZERO;
1268 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1272 CEPH_OSD_FLAG_WRITE |
1273 CEPH_OSD_FLAG_ONDISK,
1280 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
1283 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1285 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1289 ceph_osdc_put_request(req);
1295 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1298 struct ceph_inode_info *ci = ceph_inode(inode);
1299 s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1300 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1301 s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1302 u64 object_set_size = object_size * stripe_count;
1305 /* round offset up to next period boundary */
1306 nearly = offset + object_set_size - 1;
1308 nearly -= do_div(t, object_set_size);
1310 while (length && offset < nearly) {
1311 loff_t size = length;
1312 ret = ceph_zero_partial_object(inode, offset, &size);
1318 while (length >= object_set_size) {
1320 loff_t pos = offset;
1321 for (i = 0; i < stripe_count; ++i) {
1322 ret = ceph_zero_partial_object(inode, pos, NULL);
1327 offset += object_set_size;
1328 length -= object_set_size;
1331 loff_t size = length;
1332 ret = ceph_zero_partial_object(inode, offset, &size);
1341 static long ceph_fallocate(struct file *file, int mode,
1342 loff_t offset, loff_t length)
1344 struct ceph_file_info *fi = file->private_data;
1345 struct inode *inode = file_inode(file);
1346 struct ceph_inode_info *ci = ceph_inode(inode);
1347 struct ceph_osd_client *osdc =
1348 &ceph_inode_to_client(inode)->client->osdc;
1349 struct ceph_cap_flush *prealloc_cf;
1356 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1359 if (!S_ISREG(inode->i_mode))
1362 prealloc_cf = ceph_alloc_cap_flush();
1368 if (ceph_snap(inode) != CEPH_NOSNAP) {
1373 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1374 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1379 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1380 ret = ceph_uninline_data(file, NULL);
1385 size = i_size_read(inode);
1386 if (!(mode & FALLOC_FL_KEEP_SIZE))
1387 endoff = offset + length;
1389 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1390 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1392 want = CEPH_CAP_FILE_BUFFER;
1394 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1398 if (mode & FALLOC_FL_PUNCH_HOLE) {
1400 ceph_zero_pagecache_range(inode, offset, length);
1401 ret = ceph_zero_objects(inode, offset, length);
1402 } else if (endoff > size) {
1403 truncate_pagecache_range(inode, size, -1);
1404 if (ceph_inode_set_size(inode, endoff))
1405 ceph_check_caps(ceph_inode(inode),
1406 CHECK_CAPS_AUTHONLY, NULL);
1410 spin_lock(&ci->i_ceph_lock);
1411 ci->i_inline_version = CEPH_INLINE_NONE;
1412 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1414 spin_unlock(&ci->i_ceph_lock);
1416 __mark_inode_dirty(inode, dirty);
1419 ceph_put_cap_refs(ci, got);
1421 inode_unlock(inode);
1422 ceph_free_cap_flush(prealloc_cf);
1426 const struct file_operations ceph_file_fops = {
1428 .release = ceph_release,
1429 .llseek = ceph_llseek,
1430 .read_iter = ceph_read_iter,
1431 .write_iter = ceph_write_iter,
1433 .fsync = ceph_fsync,
1435 .flock = ceph_flock,
1436 .splice_read = generic_file_splice_read,
1437 .splice_write = iter_file_splice_write,
1438 .unlocked_ioctl = ceph_ioctl,
1439 .compat_ioctl = ceph_ioctl,
1440 .fallocate = ceph_fallocate,