1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/mount.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 #include <linux/falloc.h>
13 #include "mds_client.h"
17 * Ceph file operations
19 * Implement basic open/close functionality, and implement
22 * We implement three modes of file I/O:
23 * - buffered uses the generic_file_aio_{read,write} helpers
25 * - synchronous is used when there is multi-client read/write
26 * sharing, avoids the page cache, and synchronously waits for an
29 * - direct io takes the variant of the sync path that references
30 * user pages directly.
32 * fsync() flushes and waits on dirty pages, but just queues metadata
33 * for writeback: since the MDS can recover size and mtime there is no
34 * need to wait for MDS acknowledgement.
39 * Prepare an open request. Preallocate ceph_cap to avoid an
40 * inopportune ENOMEM later.
42 static struct ceph_mds_request *
43 prepare_open_request(struct super_block *sb, int flags, int create_mode)
45 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
46 struct ceph_mds_client *mdsc = fsc->mdsc;
47 struct ceph_mds_request *req;
48 int want_auth = USE_ANY_MDS;
49 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
51 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
52 want_auth = USE_AUTH_MDS;
54 req = ceph_mdsc_create_request(mdsc, op, want_auth);
57 req->r_fmode = ceph_flags_to_mode(flags);
58 req->r_args.open.flags = cpu_to_le32(flags);
59 req->r_args.open.mode = cpu_to_le32(create_mode);
65 * initialize private struct file data.
66 * if we fail, clean up by dropping fmode reference on the ceph_inode
68 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
70 struct ceph_file_info *cf;
72 struct ceph_inode_info *ci = ceph_inode(inode);
73 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
74 struct ceph_mds_client *mdsc = fsc->mdsc;
76 switch (inode->i_mode & S_IFMT) {
78 /* First file open request creates the cookie, we want to keep
79 * this cookie around for the filetime of the inode as not to
80 * have to worry about fscache register / revoke / operation
83 * Also, if we know the operation is going to invalidate data
84 * (non readonly) just nuke the cache right away.
86 ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
87 if ((fmode & CEPH_FILE_MODE_WR))
88 ceph_fscache_invalidate(inode);
90 dout("init_file %p %p 0%o (regular)\n", inode, file,
92 cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO);
94 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
99 cf->readdir_cache_idx = -1;
100 file->private_data = cf;
101 BUG_ON(inode->i_fop->release != ceph_release);
105 dout("init_file %p %p 0%o (symlink)\n", inode, file,
107 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
111 dout("init_file %p %p 0%o (special)\n", inode, file,
114 * we need to drop the open ref now, since we don't
115 * have .release set to ceph_release.
117 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
118 BUG_ON(inode->i_fop->release == ceph_release);
120 /* call the proper open fop */
121 ret = inode->i_fop->open(inode, file);
127 * If we already have the requisite capabilities, we can satisfy
128 * the open request locally (no need to request new caps from the
129 * MDS). We do, however, need to inform the MDS (asynchronously)
130 * if our wanted caps set expands.
132 int ceph_open(struct inode *inode, struct file *file)
134 struct ceph_inode_info *ci = ceph_inode(inode);
135 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
136 struct ceph_mds_client *mdsc = fsc->mdsc;
137 struct ceph_mds_request *req;
138 struct ceph_file_info *cf = file->private_data;
140 int flags, fmode, wanted;
143 dout("open file %p is already opened\n", file);
147 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
148 flags = file->f_flags & ~(O_CREAT|O_EXCL);
149 if (S_ISDIR(inode->i_mode))
150 flags = O_DIRECTORY; /* mds likes to know */
152 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
153 ceph_vinop(inode), file, flags, file->f_flags);
154 fmode = ceph_flags_to_mode(flags);
155 wanted = ceph_caps_for_mode(fmode);
157 /* snapped files are read-only */
158 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
161 /* trivially open snapdir */
162 if (ceph_snap(inode) == CEPH_SNAPDIR) {
163 spin_lock(&ci->i_ceph_lock);
164 __ceph_get_fmode(ci, fmode);
165 spin_unlock(&ci->i_ceph_lock);
166 return ceph_init_file(inode, file, fmode);
170 * No need to block if we have caps on the auth MDS (for
171 * write) or any MDS (for read). Update wanted set
174 spin_lock(&ci->i_ceph_lock);
175 if (__ceph_is_any_real_caps(ci) &&
176 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
177 int mds_wanted = __ceph_caps_mds_wanted(ci);
178 int issued = __ceph_caps_issued(ci, NULL);
180 dout("open %p fmode %d want %s issued %s using existing\n",
181 inode, fmode, ceph_cap_string(wanted),
182 ceph_cap_string(issued));
183 __ceph_get_fmode(ci, fmode);
184 spin_unlock(&ci->i_ceph_lock);
187 if ((issued & wanted) != wanted &&
188 (mds_wanted & wanted) != wanted &&
189 ceph_snap(inode) != CEPH_SNAPDIR)
190 ceph_check_caps(ci, 0, NULL);
192 return ceph_init_file(inode, file, fmode);
193 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
194 (ci->i_snap_caps & wanted) == wanted) {
195 __ceph_get_fmode(ci, fmode);
196 spin_unlock(&ci->i_ceph_lock);
197 return ceph_init_file(inode, file, fmode);
200 spin_unlock(&ci->i_ceph_lock);
202 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
203 req = prepare_open_request(inode->i_sb, flags, 0);
208 req->r_inode = inode;
212 err = ceph_mdsc_do_request(mdsc, NULL, req);
214 err = ceph_init_file(inode, file, req->r_fmode);
215 ceph_mdsc_put_request(req);
216 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
223 * Do a lookup + open with a single request. If we get a non-existent
224 * file or symlink, return 1 so the VFS can retry.
226 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
227 struct file *file, unsigned flags, umode_t mode,
230 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
231 struct ceph_mds_client *mdsc = fsc->mdsc;
232 struct ceph_mds_request *req;
234 struct ceph_acls_info acls = {};
237 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
239 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
241 if (dentry->d_name.len > NAME_MAX)
242 return -ENAMETOOLONG;
244 err = ceph_init_dentry(dentry);
248 if (flags & O_CREAT) {
249 err = ceph_pre_init_acls(dir, &mode, &acls);
255 req = prepare_open_request(dir->i_sb, flags, mode);
260 req->r_dentry = dget(dentry);
262 if (flags & O_CREAT) {
263 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
264 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
266 req->r_pagelist = acls.pagelist;
267 acls.pagelist = NULL;
270 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
271 err = ceph_mdsc_do_request(mdsc,
272 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
274 err = ceph_handle_snapdir(req, dentry, err);
278 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
279 err = ceph_handle_notrace_create(dir, dentry);
281 if (d_unhashed(dentry)) {
282 dn = ceph_finish_lookup(req, dentry, err);
286 /* we were given a hashed negative dentry */
291 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
292 /* make vfs retry on splice, ENOENT, or symlink */
293 dout("atomic_open finish_no_open on dn %p\n", dn);
294 err = finish_no_open(file, dn);
296 dout("atomic_open finish_open on dn %p\n", dn);
297 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
298 ceph_init_inode_acls(d_inode(dentry), &acls);
299 *opened |= FILE_CREATED;
301 err = finish_open(file, dentry, ceph_open, opened);
304 if (!req->r_err && req->r_target_inode)
305 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
306 ceph_mdsc_put_request(req);
308 ceph_release_acls_info(&acls);
309 dout("atomic_open result=%d\n", err);
313 int ceph_release(struct inode *inode, struct file *file)
315 struct ceph_inode_info *ci = ceph_inode(inode);
316 struct ceph_file_info *cf = file->private_data;
318 dout("release inode %p file %p\n", inode, file);
319 ceph_put_fmode(ci, cf->fmode);
320 if (cf->last_readdir)
321 ceph_mdsc_put_request(cf->last_readdir);
322 kfree(cf->last_name);
324 kmem_cache_free(ceph_file_cachep, cf);
326 /* wake up anyone waiting for caps on this inode */
327 wake_up_all(&ci->i_cap_wq);
337 * Read a range of bytes striped over one or more objects. Iterate over
338 * objects we stripe over. (That's not atomic, but good enough for now.)
340 * If we get a short result from the OSD, check against i_size; we need to
341 * only return a short read to the caller if we hit EOF.
343 static int striped_read(struct inode *inode,
345 struct page **pages, int num_pages,
346 int *checkeof, bool o_direct,
347 unsigned long buf_align)
349 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
350 struct ceph_inode_info *ci = ceph_inode(inode);
351 u64 pos, this_len, left;
352 int io_align, page_align;
355 struct page **page_pos;
357 bool hit_stripe, was_short;
360 * we may need to do multiple reads. not atomic, unfortunately.
365 pages_left = num_pages;
367 io_align = off & ~PAGE_MASK;
371 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
373 page_align = pos & ~PAGE_MASK;
375 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
376 &ci->i_layout, pos, &this_len,
379 page_pos, pages_left, page_align);
382 hit_stripe = this_len < left;
383 was_short = ret >= 0 && ret < this_len;
384 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
385 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
389 if (was_short && (pos + ret < inode->i_size)) {
390 int zlen = min(this_len - ret,
391 inode->i_size - pos - ret);
392 int zoff = (o_direct ? buf_align : io_align) +
394 dout(" zero gap %llu to %llu\n",
395 pos + ret, pos + ret + zlen);
396 ceph_zero_page_vector_range(zoff, zlen, pages);
400 didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
404 page_pos += didpages;
405 pages_left -= didpages;
407 /* hit stripe and need continue*/
408 if (left && hit_stripe && pos < inode->i_size)
414 /* did we bounce off eof? */
415 if (pos + left > inode->i_size)
416 *checkeof = CHECK_EOF;
419 dout("striped_read returns %d\n", ret);
424 * Completely synchronous read and write methods. Direct from __user
425 * buffer to osd, or directly to user pages (if O_DIRECT).
427 * If the read spans object boundary, just do multiple reads.
429 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
432 struct file *file = iocb->ki_filp;
433 struct inode *inode = file_inode(file);
435 u64 off = iocb->ki_pos;
437 size_t len = iov_iter_count(i);
439 dout("sync_read on file %p %llu~%u %s\n", file, off,
441 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
446 * flush any page cache pages in this range. this
447 * will make concurrent normal and sync io slow,
448 * but it will at least behave sensibly when they are
451 ret = filemap_write_and_wait_range(inode->i_mapping, off,
456 if (iocb->ki_flags & IOCB_DIRECT) {
457 while (iov_iter_count(i)) {
461 n = iov_iter_get_pages_alloc(i, &pages, INT_MAX, &start);
465 num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
467 ret = striped_read(inode, off, n,
468 pages, num_pages, checkeof,
471 ceph_put_page_vector(pages, num_pages, true);
476 iov_iter_advance(i, ret);
481 num_pages = calc_pages_for(off, len);
482 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
484 return PTR_ERR(pages);
485 ret = striped_read(inode, off, len, pages,
486 num_pages, checkeof, 0, 0);
492 size_t page_off = off & ~PAGE_MASK;
493 size_t copy = min_t(size_t,
494 PAGE_SIZE - page_off, left);
495 l = copy_page_to_iter(pages[k++], page_off,
503 ceph_release_page_vector(pages, num_pages);
506 if (off > iocb->ki_pos) {
507 ret = off - iocb->ki_pos;
511 dout("sync_read result %d\n", ret);
516 * Write commit request unsafe callback, called to tell us when a
517 * request is unsafe (that is, in flight--has been handed to the
518 * messenger to send to its target osd). It is called again when
519 * we've received a response message indicating the request is
520 * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
521 * is completed early (and unsuccessfully) due to a timeout or
524 * This is used if we requested both an ACK and ONDISK commit reply
527 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
529 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
531 dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
534 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
535 spin_lock(&ci->i_unsafe_lock);
536 list_add_tail(&req->r_unsafe_item,
537 &ci->i_unsafe_writes);
538 spin_unlock(&ci->i_unsafe_lock);
540 spin_lock(&ci->i_unsafe_lock);
541 list_del_init(&req->r_unsafe_item);
542 spin_unlock(&ci->i_unsafe_lock);
543 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
549 * Synchronous write, straight from __user pointer or user pages.
551 * If write spans object boundary, just do multiple writes. (For a
552 * correct atomic write, we should e.g. take write locks on all
553 * objects, rollback on failure, etc.)
556 ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
557 struct ceph_snap_context *snapc)
559 struct file *file = iocb->ki_filp;
560 struct inode *inode = file_inode(file);
561 struct ceph_inode_info *ci = ceph_inode(inode);
562 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
563 struct ceph_vino vino;
564 struct ceph_osd_request *req;
571 struct timespec mtime = CURRENT_TIME;
572 size_t count = iov_iter_count(from);
574 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
577 dout("sync_direct_write on file %p %lld~%u\n", file, pos,
580 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
584 ret = invalidate_inode_pages2_range(inode->i_mapping,
585 pos >> PAGE_CACHE_SHIFT,
586 (pos + count) >> PAGE_CACHE_SHIFT);
588 dout("invalidate_inode_pages2_range returned %d\n", ret);
590 flags = CEPH_OSD_FLAG_ORDERSNAP |
591 CEPH_OSD_FLAG_ONDISK |
594 while (iov_iter_count(from) > 0) {
595 u64 len = iov_iter_single_seg_count(from);
599 vino = ceph_vino(inode);
600 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
602 2,/*include a 'startsync' command*/
603 CEPH_OSD_OP_WRITE, flags, snapc,
612 osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
614 n = iov_iter_get_pages_alloc(from, &pages, len, &start);
615 if (unlikely(n < 0)) {
617 ceph_osdc_put_request(req);
621 num_pages = (n + start + PAGE_SIZE - 1) / PAGE_SIZE;
623 * throw out any page cache pages in this range. this
626 truncate_inode_pages_range(inode->i_mapping, pos,
627 (pos+n) | (PAGE_CACHE_SIZE-1));
628 osd_req_op_extent_osd_data_pages(req, 0, pages, n, start,
631 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
632 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
634 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
636 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
638 ceph_put_page_vector(pages, num_pages, false);
640 ceph_osdc_put_request(req);
645 iov_iter_advance(from, n);
647 if (pos > i_size_read(inode)) {
648 check_caps = ceph_inode_set_size(inode, pos);
650 ceph_check_caps(ceph_inode(inode),
656 if (ret != -EOLDSNAPC && written > 0) {
665 * Synchronous write, straight from __user pointer or user pages.
667 * If write spans object boundary, just do multiple writes. (For a
668 * correct atomic write, we should e.g. take write locks on all
669 * objects, rollback on failure, etc.)
672 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
673 struct ceph_snap_context *snapc)
675 struct file *file = iocb->ki_filp;
676 struct inode *inode = file_inode(file);
677 struct ceph_inode_info *ci = ceph_inode(inode);
678 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
679 struct ceph_vino vino;
680 struct ceph_osd_request *req;
688 struct timespec mtime = CURRENT_TIME;
689 size_t count = iov_iter_count(from);
691 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
694 dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
696 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
700 ret = invalidate_inode_pages2_range(inode->i_mapping,
701 pos >> PAGE_CACHE_SHIFT,
702 (pos + count) >> PAGE_CACHE_SHIFT);
704 dout("invalidate_inode_pages2_range returned %d\n", ret);
706 flags = CEPH_OSD_FLAG_ORDERSNAP |
707 CEPH_OSD_FLAG_ONDISK |
708 CEPH_OSD_FLAG_WRITE |
711 while ((len = iov_iter_count(from)) > 0) {
715 vino = ceph_vino(inode);
716 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
717 vino, pos, &len, 0, 1,
718 CEPH_OSD_OP_WRITE, flags, snapc,
728 * write from beginning of first page,
729 * regardless of io alignment
731 num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
733 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
735 ret = PTR_ERR(pages);
740 for (n = 0; n < num_pages; n++) {
741 size_t plen = min_t(size_t, left, PAGE_SIZE);
742 ret = copy_page_from_iter(pages[n], 0, plen, from);
751 ceph_release_page_vector(pages, num_pages);
755 /* get a second commit callback */
756 req->r_unsafe_callback = ceph_sync_write_unsafe;
757 req->r_inode = inode;
759 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
762 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
763 ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
765 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
767 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
770 ceph_osdc_put_request(req);
775 if (pos > i_size_read(inode)) {
776 check_caps = ceph_inode_set_size(inode, pos);
778 ceph_check_caps(ceph_inode(inode),
786 if (ret != -EOLDSNAPC && written > 0) {
794 * Wrap generic_file_aio_read with checks for cap bits on the inode.
795 * Atomically grab references, so that those bits are not released
796 * back to the MDS mid-read.
798 * Hmm, the sync read case isn't actually async... should it be?
800 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
802 struct file *filp = iocb->ki_filp;
803 struct ceph_file_info *fi = filp->private_data;
804 size_t len = iov_iter_count(to);
805 struct inode *inode = file_inode(filp);
806 struct ceph_inode_info *ci = ceph_inode(inode);
807 struct page *pinned_page = NULL;
810 int retry_op = 0, read = 0;
813 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
814 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
816 if (fi->fmode & CEPH_FILE_MODE_LAZY)
817 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
819 want = CEPH_CAP_FILE_CACHE;
820 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
824 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
825 (iocb->ki_flags & IOCB_DIRECT) ||
826 (fi->flags & CEPH_F_SYNC)) {
828 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
829 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
830 ceph_cap_string(got));
832 if (ci->i_inline_version == CEPH_INLINE_NONE) {
833 /* hmm, this isn't really async... */
834 ret = ceph_sync_read(iocb, to, &retry_op);
836 retry_op = READ_INLINE;
839 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
840 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
841 ceph_cap_string(got));
843 ret = generic_file_read_iter(iocb, to);
845 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
846 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
848 page_cache_release(pinned_page);
851 ceph_put_cap_refs(ci, got);
852 if (retry_op && ret >= 0) {
854 struct page *page = NULL;
856 if (retry_op == READ_INLINE) {
857 page = __page_cache_alloc(GFP_KERNEL);
862 statret = __ceph_do_getattr(inode, page,
863 CEPH_STAT_CAP_INLINE_DATA, !!page);
866 if (statret == -ENODATA) {
867 BUG_ON(retry_op != READ_INLINE);
873 i_size = i_size_read(inode);
874 if (retry_op == READ_INLINE) {
875 BUG_ON(ret > 0 || read > 0);
876 if (iocb->ki_pos < i_size &&
877 iocb->ki_pos < PAGE_CACHE_SIZE) {
878 loff_t end = min_t(loff_t, i_size,
880 end = min_t(loff_t, end, PAGE_CACHE_SIZE);
882 zero_user_segment(page, statret, end);
883 ret = copy_page_to_iter(page,
884 iocb->ki_pos & ~PAGE_MASK,
885 end - iocb->ki_pos, to);
889 if (iocb->ki_pos < i_size && read < len) {
890 size_t zlen = min_t(size_t, len - read,
891 i_size - iocb->ki_pos);
892 ret = iov_iter_zero(zlen, to);
896 __free_pages(page, 0);
900 /* hit EOF or hole? */
901 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
903 dout("sync_read hit hole, ppos %lld < size %lld"
904 ", reading more\n", iocb->ki_pos,
921 * Take cap references to avoid releasing caps to MDS mid-write.
923 * If we are synchronous, and write with an old snap context, the OSD
924 * may return EOLDSNAPC. In that case, retry the write.. _after_
925 * dropping our cap refs and allowing the pending snap to logically
926 * complete _before_ this write occurs.
928 * If we are near ENOSPC, write synchronously.
930 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
932 struct file *file = iocb->ki_filp;
933 struct ceph_file_info *fi = file->private_data;
934 struct inode *inode = file_inode(file);
935 struct ceph_inode_info *ci = ceph_inode(inode);
936 struct ceph_osd_client *osdc =
937 &ceph_sb_to_client(inode->i_sb)->client->osdc;
938 struct ceph_cap_flush *prealloc_cf;
939 ssize_t count, written = 0;
943 if (ceph_snap(inode) != CEPH_NOSNAP)
946 prealloc_cf = ceph_alloc_cap_flush();
950 mutex_lock(&inode->i_mutex);
952 /* We can write back this queue in page reclaim */
953 current->backing_dev_info = inode_to_bdi(inode);
955 if (iocb->ki_flags & IOCB_APPEND) {
956 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
961 err = generic_write_checks(iocb, from);
966 count = iov_iter_count(from);
967 err = file_remove_privs(file);
971 err = file_update_time(file);
975 if (ci->i_inline_version != CEPH_INLINE_NONE) {
976 err = ceph_uninline_data(file, NULL);
982 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
987 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
988 inode, ceph_vinop(inode), pos, count, inode->i_size);
989 if (fi->fmode & CEPH_FILE_MODE_LAZY)
990 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
992 want = CEPH_CAP_FILE_BUFFER;
994 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
999 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1000 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1002 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1003 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1004 struct ceph_snap_context *snapc;
1005 struct iov_iter data;
1006 mutex_unlock(&inode->i_mutex);
1008 spin_lock(&ci->i_ceph_lock);
1009 if (__ceph_have_pending_cap_snap(ci)) {
1010 struct ceph_cap_snap *capsnap =
1011 list_last_entry(&ci->i_cap_snaps,
1012 struct ceph_cap_snap,
1014 snapc = ceph_get_snap_context(capsnap->context);
1016 BUG_ON(!ci->i_head_snapc);
1017 snapc = ceph_get_snap_context(ci->i_head_snapc);
1019 spin_unlock(&ci->i_ceph_lock);
1021 /* we might need to revert back to that point */
1023 if (iocb->ki_flags & IOCB_DIRECT)
1024 written = ceph_sync_direct_write(iocb, &data, pos,
1027 written = ceph_sync_write(iocb, &data, pos, snapc);
1028 if (written == -EOLDSNAPC) {
1029 dout("aio_write %p %llx.%llx %llu~%u"
1030 "got EOLDSNAPC, retrying\n",
1031 inode, ceph_vinop(inode),
1032 pos, (unsigned)count);
1033 mutex_lock(&inode->i_mutex);
1037 iov_iter_advance(from, written);
1038 ceph_put_snap_context(snapc);
1040 loff_t old_size = inode->i_size;
1042 * No need to acquire the i_truncate_mutex. Because
1043 * the MDS revokes Fwb caps before sending truncate
1044 * message to us. We can't get Fwb cap while there
1045 * are pending vmtruncate. So write and vmtruncate
1046 * can not run at the same time
1048 written = generic_perform_write(file, from, pos);
1049 if (likely(written >= 0))
1050 iocb->ki_pos = pos + written;
1051 if (inode->i_size > old_size)
1052 ceph_fscache_update_objectsize(inode);
1053 mutex_unlock(&inode->i_mutex);
1058 spin_lock(&ci->i_ceph_lock);
1059 ci->i_inline_version = CEPH_INLINE_NONE;
1060 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1062 spin_unlock(&ci->i_ceph_lock);
1064 __mark_inode_dirty(inode, dirty);
1067 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1068 inode, ceph_vinop(inode), pos, (unsigned)count,
1069 ceph_cap_string(got));
1070 ceph_put_cap_refs(ci, got);
1073 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) ||
1074 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
1075 err = vfs_fsync_range(file, pos, pos + written - 1, 1);
1083 mutex_unlock(&inode->i_mutex);
1085 ceph_free_cap_flush(prealloc_cf);
1086 current->backing_dev_info = NULL;
1087 return written ? written : err;
1091 * llseek. be sure to verify file size on SEEK_END.
1093 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1095 struct inode *inode = file->f_mapping->host;
1098 mutex_lock(&inode->i_mutex);
1100 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1101 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1110 offset += inode->i_size;
1114 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1115 * position-querying operation. Avoid rewriting the "same"
1116 * f_pos value back to the file because a concurrent read(),
1117 * write() or lseek() might have altered it
1120 offset = file->f_pos;
1123 offset += file->f_pos;
1126 if (offset >= inode->i_size) {
1132 if (offset >= inode->i_size) {
1136 offset = inode->i_size;
1140 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1143 mutex_unlock(&inode->i_mutex);
1147 static inline void ceph_zero_partial_page(
1148 struct inode *inode, loff_t offset, unsigned size)
1151 pgoff_t index = offset >> PAGE_CACHE_SHIFT;
1153 page = find_lock_page(inode->i_mapping, index);
1155 wait_on_page_writeback(page);
1156 zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size);
1158 page_cache_release(page);
1162 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1165 loff_t nearly = round_up(offset, PAGE_CACHE_SIZE);
1166 if (offset < nearly) {
1167 loff_t size = nearly - offset;
1170 ceph_zero_partial_page(inode, offset, size);
1174 if (length >= PAGE_CACHE_SIZE) {
1175 loff_t size = round_down(length, PAGE_CACHE_SIZE);
1176 truncate_pagecache_range(inode, offset, offset + size - 1);
1181 ceph_zero_partial_page(inode, offset, length);
1184 static int ceph_zero_partial_object(struct inode *inode,
1185 loff_t offset, loff_t *length)
1187 struct ceph_inode_info *ci = ceph_inode(inode);
1188 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1189 struct ceph_osd_request *req;
1195 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1198 op = CEPH_OSD_OP_ZERO;
1201 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1205 CEPH_OSD_FLAG_WRITE |
1206 CEPH_OSD_FLAG_ONDISK,
1213 ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
1216 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1218 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1222 ceph_osdc_put_request(req);
1228 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1231 struct ceph_inode_info *ci = ceph_inode(inode);
1232 s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1233 s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1234 s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1235 u64 object_set_size = object_size * stripe_count;
1238 /* round offset up to next period boundary */
1239 nearly = offset + object_set_size - 1;
1241 nearly -= do_div(t, object_set_size);
1243 while (length && offset < nearly) {
1244 loff_t size = length;
1245 ret = ceph_zero_partial_object(inode, offset, &size);
1251 while (length >= object_set_size) {
1253 loff_t pos = offset;
1254 for (i = 0; i < stripe_count; ++i) {
1255 ret = ceph_zero_partial_object(inode, pos, NULL);
1260 offset += object_set_size;
1261 length -= object_set_size;
1264 loff_t size = length;
1265 ret = ceph_zero_partial_object(inode, offset, &size);
1274 static long ceph_fallocate(struct file *file, int mode,
1275 loff_t offset, loff_t length)
1277 struct ceph_file_info *fi = file->private_data;
1278 struct inode *inode = file_inode(file);
1279 struct ceph_inode_info *ci = ceph_inode(inode);
1280 struct ceph_osd_client *osdc =
1281 &ceph_inode_to_client(inode)->client->osdc;
1282 struct ceph_cap_flush *prealloc_cf;
1289 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1292 if (!S_ISREG(inode->i_mode))
1295 prealloc_cf = ceph_alloc_cap_flush();
1299 mutex_lock(&inode->i_mutex);
1301 if (ceph_snap(inode) != CEPH_NOSNAP) {
1306 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1307 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1312 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1313 ret = ceph_uninline_data(file, NULL);
1318 size = i_size_read(inode);
1319 if (!(mode & FALLOC_FL_KEEP_SIZE))
1320 endoff = offset + length;
1322 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1323 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1325 want = CEPH_CAP_FILE_BUFFER;
1327 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1331 if (mode & FALLOC_FL_PUNCH_HOLE) {
1333 ceph_zero_pagecache_range(inode, offset, length);
1334 ret = ceph_zero_objects(inode, offset, length);
1335 } else if (endoff > size) {
1336 truncate_pagecache_range(inode, size, -1);
1337 if (ceph_inode_set_size(inode, endoff))
1338 ceph_check_caps(ceph_inode(inode),
1339 CHECK_CAPS_AUTHONLY, NULL);
1343 spin_lock(&ci->i_ceph_lock);
1344 ci->i_inline_version = CEPH_INLINE_NONE;
1345 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1347 spin_unlock(&ci->i_ceph_lock);
1349 __mark_inode_dirty(inode, dirty);
1352 ceph_put_cap_refs(ci, got);
1354 mutex_unlock(&inode->i_mutex);
1355 ceph_free_cap_flush(prealloc_cf);
1359 const struct file_operations ceph_file_fops = {
1361 .release = ceph_release,
1362 .llseek = ceph_llseek,
1363 .read_iter = ceph_read_iter,
1364 .write_iter = ceph_write_iter,
1366 .fsync = ceph_fsync,
1368 .flock = ceph_flock,
1369 .splice_read = generic_file_splice_read,
1370 .splice_write = iter_file_splice_write,
1371 .unlocked_ioctl = ceph_ioctl,
1372 .compat_ioctl = ceph_ioctl,
1373 .fallocate = ceph_fallocate,