1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/namei.h>
8 #include <linux/writeback.h>
11 #include "mds_client.h"
14 * Ceph file operations
16 * Implement basic open/close functionality, and implement
19 * We implement three modes of file I/O:
20 * - buffered uses the generic_file_aio_{read,write} helpers
22 * - synchronous is used when there is multi-client read/write
23 * sharing, avoids the page cache, and synchronously waits for an
26 * - direct io takes the variant of the sync path that references
27 * user pages directly.
29 * fsync() flushes and waits on dirty pages, but just queues metadata
30 * for writeback: since the MDS can recover size and mtime there is no
31 * need to wait for MDS acknowledgement.
36 * Prepare an open request. Preallocate ceph_cap to avoid an
37 * inopportune ENOMEM later.
39 static struct ceph_mds_request *
40 prepare_open_request(struct super_block *sb, int flags, int create_mode)
42 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
43 struct ceph_mds_client *mdsc = fsc->mdsc;
44 struct ceph_mds_request *req;
45 int want_auth = USE_ANY_MDS;
46 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
48 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
49 want_auth = USE_AUTH_MDS;
51 req = ceph_mdsc_create_request(mdsc, op, want_auth);
54 req->r_fmode = ceph_flags_to_mode(flags);
55 req->r_args.open.flags = cpu_to_le32(flags);
56 req->r_args.open.mode = cpu_to_le32(create_mode);
57 req->r_args.open.preferred = cpu_to_le32(-1);
63 * initialize private struct file data.
64 * if we fail, clean up by dropping fmode reference on the ceph_inode
66 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
68 struct ceph_file_info *cf;
71 switch (inode->i_mode & S_IFMT) {
74 dout("init_file %p %p 0%o (regular)\n", inode, file,
76 cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
78 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
83 file->private_data = cf;
84 BUG_ON(inode->i_fop->release != ceph_release);
88 dout("init_file %p %p 0%o (symlink)\n", inode, file,
90 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
94 dout("init_file %p %p 0%o (special)\n", inode, file,
97 * we need to drop the open ref now, since we don't
98 * have .release set to ceph_release.
100 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
101 BUG_ON(inode->i_fop->release == ceph_release);
103 /* call the proper open fop */
104 ret = inode->i_fop->open(inode, file);
110 * If the filp already has private_data, that means the file was
111 * already opened by intent during lookup, and we do nothing.
113 * If we already have the requisite capabilities, we can satisfy
114 * the open request locally (no need to request new caps from the
115 * MDS). We do, however, need to inform the MDS (asynchronously)
116 * if our wanted caps set expands.
118 int ceph_open(struct inode *inode, struct file *file)
120 struct ceph_inode_info *ci = ceph_inode(inode);
121 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
122 struct ceph_mds_client *mdsc = fsc->mdsc;
123 struct ceph_mds_request *req;
124 struct ceph_file_info *cf = file->private_data;
125 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
127 int flags, fmode, wanted;
130 dout("open file %p is already opened\n", file);
134 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
135 flags = file->f_flags & ~(O_CREAT|O_EXCL);
136 if (S_ISDIR(inode->i_mode))
137 flags = O_DIRECTORY; /* mds likes to know */
139 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
140 ceph_vinop(inode), file, flags, file->f_flags);
141 fmode = ceph_flags_to_mode(flags);
142 wanted = ceph_caps_for_mode(fmode);
144 /* snapped files are read-only */
145 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
148 /* trivially open snapdir */
149 if (ceph_snap(inode) == CEPH_SNAPDIR) {
150 spin_lock(&inode->i_lock);
151 __ceph_get_fmode(ci, fmode);
152 spin_unlock(&inode->i_lock);
153 return ceph_init_file(inode, file, fmode);
157 * No need to block if we have caps on the auth MDS (for
158 * write) or any MDS (for read). Update wanted set
161 spin_lock(&inode->i_lock);
162 if (__ceph_is_any_real_caps(ci) &&
163 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
164 int mds_wanted = __ceph_caps_mds_wanted(ci);
165 int issued = __ceph_caps_issued(ci, NULL);
167 dout("open %p fmode %d want %s issued %s using existing\n",
168 inode, fmode, ceph_cap_string(wanted),
169 ceph_cap_string(issued));
170 __ceph_get_fmode(ci, fmode);
171 spin_unlock(&inode->i_lock);
174 if ((issued & wanted) != wanted &&
175 (mds_wanted & wanted) != wanted &&
176 ceph_snap(inode) != CEPH_SNAPDIR)
177 ceph_check_caps(ci, 0, NULL);
179 return ceph_init_file(inode, file, fmode);
180 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
181 (ci->i_snap_caps & wanted) == wanted) {
182 __ceph_get_fmode(ci, fmode);
183 spin_unlock(&inode->i_lock);
184 return ceph_init_file(inode, file, fmode);
186 spin_unlock(&inode->i_lock);
188 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
189 req = prepare_open_request(inode->i_sb, flags, 0);
194 req->r_inode = igrab(inode);
196 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
198 err = ceph_init_file(inode, file, req->r_fmode);
199 ceph_mdsc_put_request(req);
200 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
207 * Do a lookup + open with a single request.
209 * If this succeeds, but some subsequent check in the vfs
210 * may_open() fails, the struct *file gets cleaned up (i.e.
211 * ceph_release gets called). So fear not!
215 * path_lookup_open -> LOOKUP_OPEN
216 * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
218 struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
219 struct nameidata *nd, int mode,
222 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
223 struct ceph_mds_client *mdsc = fsc->mdsc;
224 struct file *file = nd->intent.open.file;
225 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
226 struct ceph_mds_request *req;
228 int flags = nd->intent.open.flags - 1; /* silly vfs! */
230 dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
231 dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
234 req = prepare_open_request(dir->i_sb, flags, mode);
236 return ERR_CAST(req);
237 req->r_dentry = dget(dentry);
239 if (flags & O_CREAT) {
240 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
241 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
243 req->r_locked_dir = dir; /* caller holds dir->i_mutex */
244 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
245 dentry = ceph_finish_lookup(req, dentry, err);
246 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
247 err = ceph_handle_notrace_create(dir, dentry);
249 err = ceph_init_file(req->r_dentry->d_inode, file,
251 ceph_mdsc_put_request(req);
252 dout("ceph_lookup_open result=%p\n", dentry);
256 int ceph_release(struct inode *inode, struct file *file)
258 struct ceph_inode_info *ci = ceph_inode(inode);
259 struct ceph_file_info *cf = file->private_data;
261 dout("release inode %p file %p\n", inode, file);
262 ceph_put_fmode(ci, cf->fmode);
263 if (cf->last_readdir)
264 ceph_mdsc_put_request(cf->last_readdir);
265 kfree(cf->last_name);
268 kmem_cache_free(ceph_file_cachep, cf);
270 /* wake up anyone waiting for caps on this inode */
271 wake_up_all(&ci->i_cap_wq);
276 * Read a range of bytes striped over one or more objects. Iterate over
277 * objects we stripe over. (That's not atomic, but good enough for now.)
279 * If we get a short result from the OSD, check against i_size; we need to
280 * only return a short read to the caller if we hit EOF.
282 static int striped_read(struct inode *inode,
284 struct page **pages, int num_pages,
285 int *checkeof, bool align_to_pages)
287 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
288 struct ceph_inode_info *ci = ceph_inode(inode);
290 int io_align, page_align;
291 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
292 int left, pages_left;
294 struct page **page_pos;
296 bool hit_stripe, was_short;
299 * we may need to do multiple reads. not atomic, unfortunately.
304 pages_left = num_pages;
306 io_align = off & ~PAGE_MASK;
310 page_align = (pos - io_align) & ~PAGE_MASK;
312 page_align = pos & ~PAGE_MASK;
314 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
315 &ci->i_layout, pos, &this_len,
318 page_pos, pages_left, page_align);
319 hit_stripe = this_len < left;
320 was_short = ret >= 0 && ret < this_len;
323 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
324 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
328 ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT;
330 if (read < pos - off) {
331 dout(" zero gap %llu to %llu\n", off + read, pos);
332 ceph_zero_page_vector_range(page_off + read,
333 pos - off - read, pages);
338 page_pos += didpages;
339 pages_left -= didpages;
342 if (left && hit_stripe)
347 /* was original extent fully inside i_size? */
348 if (pos + left <= inode->i_size) {
350 ceph_zero_page_vector_range(page_off + read, len - read,
363 dout("striped_read returns %d\n", ret);
368 * Completely synchronous read and write methods. Direct from __user
369 * buffer to osd, or directly to user pages (if O_DIRECT).
371 * If the read spans object boundary, just do multiple reads.
373 static ssize_t ceph_sync_read(struct file *file, char __user *data,
374 unsigned len, loff_t *poff, int *checkeof)
376 struct inode *inode = file->f_dentry->d_inode;
379 int num_pages = calc_pages_for(off, len);
382 dout("sync_read on file %p %llu~%u %s\n", file, off, len,
383 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
385 if (file->f_flags & O_DIRECT)
386 pages = ceph_get_direct_page_vector(data, num_pages);
388 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
390 return PTR_ERR(pages);
393 * flush any page cache pages in this range. this
394 * will make concurrent normal and sync io slow,
395 * but it will at least behave sensibly when they are
398 ret = filemap_write_and_wait(inode->i_mapping);
402 ret = striped_read(inode, off, len, pages, num_pages, checkeof,
403 file->f_flags & O_DIRECT);
405 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
406 ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
411 if (file->f_flags & O_DIRECT)
412 ceph_put_page_vector(pages, num_pages);
414 ceph_release_page_vector(pages, num_pages);
415 dout("sync_read result %d\n", ret);
420 * Write commit callback, called if we requested both an ACK and
421 * ONDISK commit reply from the OSD.
423 static void sync_write_commit(struct ceph_osd_request *req,
424 struct ceph_msg *msg)
426 struct ceph_inode_info *ci = ceph_inode(req->r_inode);
428 dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
429 spin_lock(&ci->i_unsafe_lock);
430 list_del_init(&req->r_unsafe_item);
431 spin_unlock(&ci->i_unsafe_lock);
432 ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
436 * Synchronous write, straight from __user pointer or user pages (if
439 * If write spans object boundary, just do multiple writes. (For a
440 * correct atomic write, we should e.g. take write locks on all
441 * objects, rollback on failure, etc.)
443 static ssize_t ceph_sync_write(struct file *file, const char __user *data,
444 size_t left, loff_t *offset)
446 struct inode *inode = file->f_dentry->d_inode;
447 struct ceph_inode_info *ci = ceph_inode(inode);
448 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
449 struct ceph_osd_request *req;
452 long long unsigned pos;
458 int page_align, io_align;
460 struct timespec mtime = CURRENT_TIME;
462 if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
465 dout("sync_write on file %p %lld~%u %s\n", file, *offset,
466 (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
468 if (file->f_flags & O_APPEND)
469 pos = i_size_read(inode);
473 io_align = pos & ~PAGE_MASK;
475 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
479 ret = invalidate_inode_pages2_range(inode->i_mapping,
480 pos >> PAGE_CACHE_SHIFT,
481 (pos + left) >> PAGE_CACHE_SHIFT);
483 dout("invalidate_inode_pages2_range returned %d\n", ret);
485 flags = CEPH_OSD_FLAG_ORDERSNAP |
486 CEPH_OSD_FLAG_ONDISK |
488 if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
489 flags |= CEPH_OSD_FLAG_ACK;
494 * we may need to do multiple writes here if we span an object
495 * boundary. this isn't atomic, unfortunately. :(
499 if (file->f_flags & O_DIRECT)
500 /* write from beginning of first page, regardless of
502 page_align = (pos - io_align) & ~PAGE_MASK;
504 page_align = pos & ~PAGE_MASK;
505 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
506 ceph_vino(inode), pos, &len,
507 CEPH_OSD_OP_WRITE, flags,
508 ci->i_snap_realm->cached_context,
510 ci->i_truncate_seq, ci->i_truncate_size,
511 &mtime, false, 2, page_align);
515 num_pages = calc_pages_for(pos, len);
517 if (file->f_flags & O_DIRECT) {
518 pages = ceph_get_direct_page_vector(data, num_pages);
520 ret = PTR_ERR(pages);
525 * throw out any page cache pages in this range. this
528 truncate_inode_pages_range(inode->i_mapping, pos,
529 (pos+len) | (PAGE_CACHE_SIZE-1));
531 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
533 ret = PTR_ERR(pages);
536 ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
538 ceph_release_page_vector(pages, num_pages);
542 if ((file->f_flags & O_SYNC) == 0) {
543 /* get a second commit callback */
544 req->r_safe_callback = sync_write_commit;
545 req->r_own_pages = 1;
548 req->r_pages = pages;
549 req->r_num_pages = num_pages;
550 req->r_inode = inode;
552 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
554 if (req->r_safe_callback) {
556 * Add to inode unsafe list only after we
557 * start_request so that a tid has been assigned.
559 spin_lock(&ci->i_unsafe_lock);
560 list_add(&req->r_unsafe_item, &ci->i_unsafe_writes);
561 spin_unlock(&ci->i_unsafe_lock);
562 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
564 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
567 if (file->f_flags & O_DIRECT)
568 ceph_put_page_vector(pages, num_pages);
569 else if (file->f_flags & O_SYNC)
570 ceph_release_page_vector(pages, num_pages);
573 ceph_osdc_put_request(req);
583 if (pos > i_size_read(inode))
584 check_caps = ceph_inode_set_size(inode, pos);
586 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
593 * Wrap generic_file_aio_read with checks for cap bits on the inode.
594 * Atomically grab references, so that those bits are not released
595 * back to the MDS mid-read.
597 * Hmm, the sync read case isn't actually async... should it be?
599 static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
600 unsigned long nr_segs, loff_t pos)
602 struct file *filp = iocb->ki_filp;
603 struct ceph_file_info *fi = filp->private_data;
604 loff_t *ppos = &iocb->ki_pos;
605 size_t len = iov->iov_len;
606 struct inode *inode = filp->f_dentry->d_inode;
607 struct ceph_inode_info *ci = ceph_inode(inode);
608 void __user *base = iov->iov_base;
611 int checkeof = 0, read = 0;
613 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
614 inode, ceph_vinop(inode), pos, (unsigned)len, inode);
616 __ceph_do_pending_vmtruncate(inode);
617 if (fi->fmode & CEPH_FILE_MODE_LAZY)
618 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
620 want = CEPH_CAP_FILE_CACHE;
621 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
624 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
625 inode, ceph_vinop(inode), pos, (unsigned)len,
626 ceph_cap_string(got));
628 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
629 (iocb->ki_filp->f_flags & O_DIRECT) ||
630 (inode->i_sb->s_flags & MS_SYNCHRONOUS))
631 /* hmm, this isn't really async... */
632 ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
634 ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
637 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
638 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
639 ceph_put_cap_refs(ci, got);
641 if (checkeof && ret >= 0) {
642 int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
644 /* hit EOF or hole? */
645 if (statret == 0 && *ppos < inode->i_size) {
646 dout("aio_read sync_read hit hole, reading more\n");
661 * Take cap references to avoid releasing caps to MDS mid-write.
663 * If we are synchronous, and write with an old snap context, the OSD
664 * may return EOLDSNAPC. In that case, retry the write.. _after_
665 * dropping our cap refs and allowing the pending snap to logically
666 * complete _before_ this write occurs.
668 * If we are near ENOSPC, write synchronously.
670 static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
671 unsigned long nr_segs, loff_t pos)
673 struct file *file = iocb->ki_filp;
674 struct ceph_file_info *fi = file->private_data;
675 struct inode *inode = file->f_dentry->d_inode;
676 struct ceph_inode_info *ci = ceph_inode(inode);
677 struct ceph_osd_client *osdc =
678 &ceph_sb_to_client(inode->i_sb)->client->osdc;
679 loff_t endoff = pos + iov->iov_len;
683 if (ceph_snap(inode) != CEPH_NOSNAP)
687 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
689 __ceph_do_pending_vmtruncate(inode);
690 dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
691 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
693 if (fi->fmode & CEPH_FILE_MODE_LAZY)
694 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
696 want = CEPH_CAP_FILE_BUFFER;
697 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff);
701 dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
702 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
703 ceph_cap_string(got));
705 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
706 (iocb->ki_filp->f_flags & O_DIRECT) ||
707 (inode->i_sb->s_flags & MS_SYNCHRONOUS)) {
708 ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
711 ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
713 if ((ret >= 0 || ret == -EIOCBQUEUED) &&
714 ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
715 || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) {
716 err = vfs_fsync_range(file, pos, pos + ret - 1, 1);
722 spin_lock(&inode->i_lock);
723 __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
724 spin_unlock(&inode->i_lock);
728 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
729 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
730 ceph_cap_string(got));
731 ceph_put_cap_refs(ci, got);
733 if (ret == -EOLDSNAPC) {
734 dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
735 inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
743 * llseek. be sure to verify file size on SEEK_END.
745 static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
747 struct inode *inode = file->f_mapping->host;
750 mutex_lock(&inode->i_mutex);
751 __ceph_do_pending_vmtruncate(inode);
754 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
759 offset += inode->i_size;
763 * Here we special-case the lseek(fd, 0, SEEK_CUR)
764 * position-querying operation. Avoid rewriting the "same"
765 * f_pos value back to the file because a concurrent read(),
766 * write() or lseek() might have altered it
769 offset = file->f_pos;
772 offset += file->f_pos;
776 if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
781 /* Special lock needed here? */
782 if (offset != file->f_pos) {
783 file->f_pos = offset;
788 mutex_unlock(&inode->i_mutex);
792 const struct file_operations ceph_file_fops = {
794 .release = ceph_release,
795 .llseek = ceph_llseek,
796 .read = do_sync_read,
797 .write = do_sync_write,
798 .aio_read = ceph_aio_read,
799 .aio_write = ceph_aio_write,
804 .splice_read = generic_file_splice_read,
805 .splice_write = generic_file_splice_write,
806 .unlocked_ioctl = ceph_ioctl,
807 .compat_ioctl = ceph_ioctl,