]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/fuse/file.c
fuse: writepages: fix aggregation
[karo-tx-linux.git] / fs / fuse / file.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/module.h>
16 #include <linux/compat.h>
17 #include <linux/swap.h>
18 #include <linux/aio.h>
19 #include <linux/falloc.h>
20
21 static const struct file_operations fuse_direct_io_file_operations;
22
23 static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
24                           int opcode, struct fuse_open_out *outargp)
25 {
26         struct fuse_open_in inarg;
27         struct fuse_req *req;
28         int err;
29
30         req = fuse_get_req_nopages(fc);
31         if (IS_ERR(req))
32                 return PTR_ERR(req);
33
34         memset(&inarg, 0, sizeof(inarg));
35         inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
36         if (!fc->atomic_o_trunc)
37                 inarg.flags &= ~O_TRUNC;
38         req->in.h.opcode = opcode;
39         req->in.h.nodeid = nodeid;
40         req->in.numargs = 1;
41         req->in.args[0].size = sizeof(inarg);
42         req->in.args[0].value = &inarg;
43         req->out.numargs = 1;
44         req->out.args[0].size = sizeof(*outargp);
45         req->out.args[0].value = outargp;
46         fuse_request_send(fc, req);
47         err = req->out.h.error;
48         fuse_put_request(fc, req);
49
50         return err;
51 }
52
53 struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
54 {
55         struct fuse_file *ff;
56
57         ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
58         if (unlikely(!ff))
59                 return NULL;
60
61         ff->fc = fc;
62         ff->reserved_req = fuse_request_alloc(0);
63         if (unlikely(!ff->reserved_req)) {
64                 kfree(ff);
65                 return NULL;
66         }
67
68         INIT_LIST_HEAD(&ff->write_entry);
69         atomic_set(&ff->count, 0);
70         RB_CLEAR_NODE(&ff->polled_node);
71         init_waitqueue_head(&ff->poll_wait);
72
73         spin_lock(&fc->lock);
74         ff->kh = ++fc->khctr;
75         spin_unlock(&fc->lock);
76
77         return ff;
78 }
79
80 void fuse_file_free(struct fuse_file *ff)
81 {
82         fuse_request_free(ff->reserved_req);
83         kfree(ff);
84 }
85
86 struct fuse_file *fuse_file_get(struct fuse_file *ff)
87 {
88         atomic_inc(&ff->count);
89         return ff;
90 }
91
92 static void fuse_release_async(struct work_struct *work)
93 {
94         struct fuse_req *req;
95         struct fuse_conn *fc;
96         struct path path;
97
98         req = container_of(work, struct fuse_req, misc.release.work);
99         path = req->misc.release.path;
100         fc = get_fuse_conn(path.dentry->d_inode);
101
102         fuse_put_request(fc, req);
103         path_put(&path);
104 }
105
106 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
107 {
108         if (fc->destroy_req) {
109                 /*
110                  * If this is a fuseblk mount, then it's possible that
111                  * releasing the path will result in releasing the
112                  * super block and sending the DESTROY request.  If
113                  * the server is single threaded, this would hang.
114                  * For this reason do the path_put() in a separate
115                  * thread.
116                  */
117                 atomic_inc(&req->count);
118                 INIT_WORK(&req->misc.release.work, fuse_release_async);
119                 schedule_work(&req->misc.release.work);
120         } else {
121                 path_put(&req->misc.release.path);
122         }
123 }
124
125 static void fuse_file_put(struct fuse_file *ff, bool sync)
126 {
127         if (atomic_dec_and_test(&ff->count)) {
128                 struct fuse_req *req = ff->reserved_req;
129
130                 if (sync) {
131                         req->background = 0;
132                         fuse_request_send(ff->fc, req);
133                         path_put(&req->misc.release.path);
134                         fuse_put_request(ff->fc, req);
135                 } else {
136                         req->end = fuse_release_end;
137                         req->background = 1;
138                         fuse_request_send_background(ff->fc, req);
139                 }
140                 kfree(ff);
141         }
142 }
143
144 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
145                  bool isdir)
146 {
147         struct fuse_open_out outarg;
148         struct fuse_file *ff;
149         int err;
150         int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
151
152         ff = fuse_file_alloc(fc);
153         if (!ff)
154                 return -ENOMEM;
155
156         err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
157         if (err) {
158                 fuse_file_free(ff);
159                 return err;
160         }
161
162         if (isdir)
163                 outarg.open_flags &= ~FOPEN_DIRECT_IO;
164
165         ff->fh = outarg.fh;
166         ff->nodeid = nodeid;
167         ff->open_flags = outarg.open_flags;
168         file->private_data = fuse_file_get(ff);
169
170         return 0;
171 }
172 EXPORT_SYMBOL_GPL(fuse_do_open);
173
174 void fuse_finish_open(struct inode *inode, struct file *file)
175 {
176         struct fuse_file *ff = file->private_data;
177         struct fuse_conn *fc = get_fuse_conn(inode);
178
179         if (ff->open_flags & FOPEN_DIRECT_IO)
180                 file->f_op = &fuse_direct_io_file_operations;
181         if (!(ff->open_flags & FOPEN_KEEP_CACHE))
182                 invalidate_inode_pages2(inode->i_mapping);
183         if (ff->open_flags & FOPEN_NONSEEKABLE)
184                 nonseekable_open(inode, file);
185         if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
186                 struct fuse_inode *fi = get_fuse_inode(inode);
187
188                 spin_lock(&fc->lock);
189                 fi->attr_version = ++fc->attr_version;
190                 i_size_write(inode, 0);
191                 spin_unlock(&fc->lock);
192                 fuse_invalidate_attr(inode);
193         }
194 }
195
196 int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
197 {
198         struct fuse_conn *fc = get_fuse_conn(inode);
199         int err;
200
201         err = generic_file_open(inode, file);
202         if (err)
203                 return err;
204
205         err = fuse_do_open(fc, get_node_id(inode), file, isdir);
206         if (err)
207                 return err;
208
209         fuse_finish_open(inode, file);
210
211         return 0;
212 }
213
214 static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
215 {
216         struct fuse_conn *fc = ff->fc;
217         struct fuse_req *req = ff->reserved_req;
218         struct fuse_release_in *inarg = &req->misc.release.in;
219
220         spin_lock(&fc->lock);
221         list_del(&ff->write_entry);
222         if (!RB_EMPTY_NODE(&ff->polled_node))
223                 rb_erase(&ff->polled_node, &fc->polled_files);
224         spin_unlock(&fc->lock);
225
226         wake_up_interruptible_all(&ff->poll_wait);
227
228         inarg->fh = ff->fh;
229         inarg->flags = flags;
230         req->in.h.opcode = opcode;
231         req->in.h.nodeid = ff->nodeid;
232         req->in.numargs = 1;
233         req->in.args[0].size = sizeof(struct fuse_release_in);
234         req->in.args[0].value = inarg;
235 }
236
237 void fuse_release_common(struct file *file, int opcode)
238 {
239         struct fuse_file *ff;
240         struct fuse_req *req;
241
242         ff = file->private_data;
243         if (unlikely(!ff))
244                 return;
245
246         req = ff->reserved_req;
247         fuse_prepare_release(ff, file->f_flags, opcode);
248
249         if (ff->flock) {
250                 struct fuse_release_in *inarg = &req->misc.release.in;
251                 inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
252                 inarg->lock_owner = fuse_lock_owner_id(ff->fc,
253                                                        (fl_owner_t) file);
254         }
255         /* Hold vfsmount and dentry until release is finished */
256         path_get(&file->f_path);
257         req->misc.release.path = file->f_path;
258
259         /*
260          * Normally this will send the RELEASE request, however if
261          * some asynchronous READ or WRITE requests are outstanding,
262          * the sending will be delayed.
263          *
264          * Make the release synchronous if this is a fuseblk mount,
265          * synchronous RELEASE is allowed (and desirable) in this case
266          * because the server can be trusted not to screw up.
267          */
268         fuse_file_put(ff, ff->fc->destroy_req != NULL);
269 }
270
271 static int fuse_open(struct inode *inode, struct file *file)
272 {
273         return fuse_open_common(inode, file, false);
274 }
275
276 static int fuse_release(struct inode *inode, struct file *file)
277 {
278         fuse_release_common(file, FUSE_RELEASE);
279
280         /* return value is ignored by VFS */
281         return 0;
282 }
283
284 void fuse_sync_release(struct fuse_file *ff, int flags)
285 {
286         WARN_ON(atomic_read(&ff->count) > 1);
287         fuse_prepare_release(ff, flags, FUSE_RELEASE);
288         ff->reserved_req->force = 1;
289         ff->reserved_req->background = 0;
290         fuse_request_send(ff->fc, ff->reserved_req);
291         fuse_put_request(ff->fc, ff->reserved_req);
292         kfree(ff);
293 }
294 EXPORT_SYMBOL_GPL(fuse_sync_release);
295
296 /*
297  * Scramble the ID space with XTEA, so that the value of the files_struct
298  * pointer is not exposed to userspace.
299  */
300 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
301 {
302         u32 *k = fc->scramble_key;
303         u64 v = (unsigned long) id;
304         u32 v0 = v;
305         u32 v1 = v >> 32;
306         u32 sum = 0;
307         int i;
308
309         for (i = 0; i < 32; i++) {
310                 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
311                 sum += 0x9E3779B9;
312                 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
313         }
314
315         return (u64) v0 + ((u64) v1 << 32);
316 }
317
318 /*
319  * Check if page is under writeback
320  *
321  * This is currently done by walking the list of writepage requests
322  * for the inode, which can be pretty inefficient.
323  */
324 static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
325 {
326         struct fuse_conn *fc = get_fuse_conn(inode);
327         struct fuse_inode *fi = get_fuse_inode(inode);
328         struct fuse_req *req;
329         bool found = false;
330
331         spin_lock(&fc->lock);
332         list_for_each_entry(req, &fi->writepages, writepages_entry) {
333                 pgoff_t curr_index;
334
335                 BUG_ON(req->inode != inode);
336                 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
337                 if (curr_index <= index &&
338                     index < curr_index + req->num_pages) {
339                         found = true;
340                         break;
341                 }
342         }
343         spin_unlock(&fc->lock);
344
345         return found;
346 }
347
348 /*
349  * Wait for page writeback to be completed.
350  *
351  * Since fuse doesn't rely on the VM writeback tracking, this has to
352  * use some other means.
353  */
354 static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
355 {
356         struct fuse_inode *fi = get_fuse_inode(inode);
357
358         wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
359         return 0;
360 }
361
362 static int fuse_flush(struct file *file, fl_owner_t id)
363 {
364         struct inode *inode = file_inode(file);
365         struct fuse_conn *fc = get_fuse_conn(inode);
366         struct fuse_file *ff = file->private_data;
367         struct fuse_req *req;
368         struct fuse_flush_in inarg;
369         int err;
370
371         if (is_bad_inode(inode))
372                 return -EIO;
373
374         if (fc->no_flush)
375                 return 0;
376
377         req = fuse_get_req_nofail_nopages(fc, file);
378         memset(&inarg, 0, sizeof(inarg));
379         inarg.fh = ff->fh;
380         inarg.lock_owner = fuse_lock_owner_id(fc, id);
381         req->in.h.opcode = FUSE_FLUSH;
382         req->in.h.nodeid = get_node_id(inode);
383         req->in.numargs = 1;
384         req->in.args[0].size = sizeof(inarg);
385         req->in.args[0].value = &inarg;
386         req->force = 1;
387         fuse_request_send(fc, req);
388         err = req->out.h.error;
389         fuse_put_request(fc, req);
390         if (err == -ENOSYS) {
391                 fc->no_flush = 1;
392                 err = 0;
393         }
394         return err;
395 }
396
397 /*
398  * Wait for all pending writepages on the inode to finish.
399  *
400  * This is currently done by blocking further writes with FUSE_NOWRITE
401  * and waiting for all sent writes to complete.
402  *
403  * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
404  * could conflict with truncation.
405  */
406 static void fuse_sync_writes(struct inode *inode)
407 {
408         fuse_set_nowrite(inode);
409         fuse_release_nowrite(inode);
410 }
411
412 int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
413                       int datasync, int isdir)
414 {
415         struct inode *inode = file->f_mapping->host;
416         struct fuse_conn *fc = get_fuse_conn(inode);
417         struct fuse_file *ff = file->private_data;
418         struct fuse_req *req;
419         struct fuse_fsync_in inarg;
420         int err;
421
422         if (is_bad_inode(inode))
423                 return -EIO;
424
425         err = filemap_write_and_wait_range(inode->i_mapping, start, end);
426         if (err)
427                 return err;
428
429         if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
430                 return 0;
431
432         mutex_lock(&inode->i_mutex);
433
434         /*
435          * Start writeback against all dirty pages of the inode, then
436          * wait for all outstanding writes, before sending the FSYNC
437          * request.
438          */
439         err = write_inode_now(inode, 0);
440         if (err)
441                 goto out;
442
443         fuse_sync_writes(inode);
444
445         req = fuse_get_req_nopages(fc);
446         if (IS_ERR(req)) {
447                 err = PTR_ERR(req);
448                 goto out;
449         }
450
451         memset(&inarg, 0, sizeof(inarg));
452         inarg.fh = ff->fh;
453         inarg.fsync_flags = datasync ? 1 : 0;
454         req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
455         req->in.h.nodeid = get_node_id(inode);
456         req->in.numargs = 1;
457         req->in.args[0].size = sizeof(inarg);
458         req->in.args[0].value = &inarg;
459         fuse_request_send(fc, req);
460         err = req->out.h.error;
461         fuse_put_request(fc, req);
462         if (err == -ENOSYS) {
463                 if (isdir)
464                         fc->no_fsyncdir = 1;
465                 else
466                         fc->no_fsync = 1;
467                 err = 0;
468         }
469 out:
470         mutex_unlock(&inode->i_mutex);
471         return err;
472 }
473
474 static int fuse_fsync(struct file *file, loff_t start, loff_t end,
475                       int datasync)
476 {
477         return fuse_fsync_common(file, start, end, datasync, 0);
478 }
479
480 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
481                     size_t count, int opcode)
482 {
483         struct fuse_read_in *inarg = &req->misc.read.in;
484         struct fuse_file *ff = file->private_data;
485
486         inarg->fh = ff->fh;
487         inarg->offset = pos;
488         inarg->size = count;
489         inarg->flags = file->f_flags;
490         req->in.h.opcode = opcode;
491         req->in.h.nodeid = ff->nodeid;
492         req->in.numargs = 1;
493         req->in.args[0].size = sizeof(struct fuse_read_in);
494         req->in.args[0].value = inarg;
495         req->out.argvar = 1;
496         req->out.numargs = 1;
497         req->out.args[0].size = count;
498 }
499
500 static void fuse_release_user_pages(struct fuse_req *req, int write)
501 {
502         unsigned i;
503
504         for (i = 0; i < req->num_pages; i++) {
505                 struct page *page = req->pages[i];
506                 if (write)
507                         set_page_dirty_lock(page);
508                 put_page(page);
509         }
510 }
511
512 /**
513  * In case of short read, the caller sets 'pos' to the position of
514  * actual end of fuse request in IO request. Otherwise, if bytes_requested
515  * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
516  *
517  * An example:
518  * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
519  * both submitted asynchronously. The first of them was ACKed by userspace as
520  * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
521  * second request was ACKed as short, e.g. only 1K was read, resulting in
522  * pos == 33K.
523  *
524  * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
525  * will be equal to the length of the longest contiguous fragment of
526  * transferred data starting from the beginning of IO request.
527  */
528 static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
529 {
530         int left;
531
532         spin_lock(&io->lock);
533         if (err)
534                 io->err = io->err ? : err;
535         else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
536                 io->bytes = pos;
537
538         left = --io->reqs;
539         spin_unlock(&io->lock);
540
541         if (!left) {
542                 long res;
543
544                 if (io->err)
545                         res = io->err;
546                 else if (io->bytes >= 0 && io->write)
547                         res = -EIO;
548                 else {
549                         res = io->bytes < 0 ? io->size : io->bytes;
550
551                         if (!is_sync_kiocb(io->iocb)) {
552                                 struct inode *inode = file_inode(io->iocb->ki_filp);
553                                 struct fuse_conn *fc = get_fuse_conn(inode);
554                                 struct fuse_inode *fi = get_fuse_inode(inode);
555
556                                 spin_lock(&fc->lock);
557                                 fi->attr_version = ++fc->attr_version;
558                                 spin_unlock(&fc->lock);
559                         }
560                 }
561
562                 aio_complete(io->iocb, res, 0);
563                 kfree(io);
564         }
565 }
566
567 static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
568 {
569         struct fuse_io_priv *io = req->io;
570         ssize_t pos = -1;
571
572         fuse_release_user_pages(req, !io->write);
573
574         if (io->write) {
575                 if (req->misc.write.in.size != req->misc.write.out.size)
576                         pos = req->misc.write.in.offset - io->offset +
577                                 req->misc.write.out.size;
578         } else {
579                 if (req->misc.read.in.size != req->out.args[0].size)
580                         pos = req->misc.read.in.offset - io->offset +
581                                 req->out.args[0].size;
582         }
583
584         fuse_aio_complete(io, req->out.h.error, pos);
585 }
586
587 static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
588                 size_t num_bytes, struct fuse_io_priv *io)
589 {
590         spin_lock(&io->lock);
591         io->size += num_bytes;
592         io->reqs++;
593         spin_unlock(&io->lock);
594
595         req->io = io;
596         req->end = fuse_aio_complete_req;
597
598         __fuse_get_request(req);
599         fuse_request_send_background(fc, req);
600
601         return num_bytes;
602 }
603
604 static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
605                              loff_t pos, size_t count, fl_owner_t owner)
606 {
607         struct file *file = io->file;
608         struct fuse_file *ff = file->private_data;
609         struct fuse_conn *fc = ff->fc;
610
611         fuse_read_fill(req, file, pos, count, FUSE_READ);
612         if (owner != NULL) {
613                 struct fuse_read_in *inarg = &req->misc.read.in;
614
615                 inarg->read_flags |= FUSE_READ_LOCKOWNER;
616                 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
617         }
618
619         if (io->async)
620                 return fuse_async_req_send(fc, req, count, io);
621
622         fuse_request_send(fc, req);
623         return req->out.args[0].size;
624 }
625
626 static void fuse_read_update_size(struct inode *inode, loff_t size,
627                                   u64 attr_ver)
628 {
629         struct fuse_conn *fc = get_fuse_conn(inode);
630         struct fuse_inode *fi = get_fuse_inode(inode);
631
632         spin_lock(&fc->lock);
633         if (attr_ver == fi->attr_version && size < inode->i_size &&
634             !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
635                 fi->attr_version = ++fc->attr_version;
636                 i_size_write(inode, size);
637         }
638         spin_unlock(&fc->lock);
639 }
640
641 static int fuse_readpage(struct file *file, struct page *page)
642 {
643         struct fuse_io_priv io = { .async = 0, .file = file };
644         struct inode *inode = page->mapping->host;
645         struct fuse_conn *fc = get_fuse_conn(inode);
646         struct fuse_req *req;
647         size_t num_read;
648         loff_t pos = page_offset(page);
649         size_t count = PAGE_CACHE_SIZE;
650         u64 attr_ver;
651         int err;
652
653         err = -EIO;
654         if (is_bad_inode(inode))
655                 goto out;
656
657         /*
658          * Page writeback can extend beyond the lifetime of the
659          * page-cache page, so make sure we read a properly synced
660          * page.
661          */
662         fuse_wait_on_page_writeback(inode, page->index);
663
664         req = fuse_get_req(fc, 1);
665         err = PTR_ERR(req);
666         if (IS_ERR(req))
667                 goto out;
668
669         attr_ver = fuse_get_attr_version(fc);
670
671         req->out.page_zeroing = 1;
672         req->out.argpages = 1;
673         req->num_pages = 1;
674         req->pages[0] = page;
675         req->page_descs[0].length = count;
676         num_read = fuse_send_read(req, &io, pos, count, NULL);
677         err = req->out.h.error;
678         fuse_put_request(fc, req);
679
680         if (!err) {
681                 /*
682                  * Short read means EOF.  If file size is larger, truncate it
683                  */
684                 if (num_read < count)
685                         fuse_read_update_size(inode, pos + num_read, attr_ver);
686
687                 SetPageUptodate(page);
688         }
689
690         fuse_invalidate_attr(inode); /* atime changed */
691  out:
692         unlock_page(page);
693         return err;
694 }
695
696 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
697 {
698         int i;
699         size_t count = req->misc.read.in.size;
700         size_t num_read = req->out.args[0].size;
701         struct address_space *mapping = NULL;
702
703         for (i = 0; mapping == NULL && i < req->num_pages; i++)
704                 mapping = req->pages[i]->mapping;
705
706         if (mapping) {
707                 struct inode *inode = mapping->host;
708
709                 /*
710                  * Short read means EOF. If file size is larger, truncate it
711                  */
712                 if (!req->out.h.error && num_read < count) {
713                         loff_t pos;
714
715                         pos = page_offset(req->pages[0]) + num_read;
716                         fuse_read_update_size(inode, pos,
717                                               req->misc.read.attr_ver);
718                 }
719                 fuse_invalidate_attr(inode); /* atime changed */
720         }
721
722         for (i = 0; i < req->num_pages; i++) {
723                 struct page *page = req->pages[i];
724                 if (!req->out.h.error)
725                         SetPageUptodate(page);
726                 else
727                         SetPageError(page);
728                 unlock_page(page);
729                 page_cache_release(page);
730         }
731         if (req->ff)
732                 fuse_file_put(req->ff, false);
733 }
734
735 static void fuse_send_readpages(struct fuse_req *req, struct file *file)
736 {
737         struct fuse_file *ff = file->private_data;
738         struct fuse_conn *fc = ff->fc;
739         loff_t pos = page_offset(req->pages[0]);
740         size_t count = req->num_pages << PAGE_CACHE_SHIFT;
741
742         req->out.argpages = 1;
743         req->out.page_zeroing = 1;
744         req->out.page_replace = 1;
745         fuse_read_fill(req, file, pos, count, FUSE_READ);
746         req->misc.read.attr_ver = fuse_get_attr_version(fc);
747         if (fc->async_read) {
748                 req->ff = fuse_file_get(ff);
749                 req->end = fuse_readpages_end;
750                 fuse_request_send_background(fc, req);
751         } else {
752                 fuse_request_send(fc, req);
753                 fuse_readpages_end(fc, req);
754                 fuse_put_request(fc, req);
755         }
756 }
757
758 struct fuse_fill_data {
759         struct fuse_req *req;
760         struct file *file;
761         struct inode *inode;
762         unsigned nr_pages;
763 };
764
765 static int fuse_readpages_fill(void *_data, struct page *page)
766 {
767         struct fuse_fill_data *data = _data;
768         struct fuse_req *req = data->req;
769         struct inode *inode = data->inode;
770         struct fuse_conn *fc = get_fuse_conn(inode);
771
772         fuse_wait_on_page_writeback(inode, page->index);
773
774         if (req->num_pages &&
775             (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
776              (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
777              req->pages[req->num_pages - 1]->index + 1 != page->index)) {
778                 int nr_alloc = min_t(unsigned, data->nr_pages,
779                                      FUSE_MAX_PAGES_PER_REQ);
780                 fuse_send_readpages(req, data->file);
781                 if (fc->async_read)
782                         req = fuse_get_req_for_background(fc, nr_alloc);
783                 else
784                         req = fuse_get_req(fc, nr_alloc);
785
786                 data->req = req;
787                 if (IS_ERR(req)) {
788                         unlock_page(page);
789                         return PTR_ERR(req);
790                 }
791         }
792
793         if (WARN_ON(req->num_pages >= req->max_pages)) {
794                 fuse_put_request(fc, req);
795                 return -EIO;
796         }
797
798         page_cache_get(page);
799         req->pages[req->num_pages] = page;
800         req->page_descs[req->num_pages].length = PAGE_SIZE;
801         req->num_pages++;
802         data->nr_pages--;
803         return 0;
804 }
805
806 static int fuse_readpages(struct file *file, struct address_space *mapping,
807                           struct list_head *pages, unsigned nr_pages)
808 {
809         struct inode *inode = mapping->host;
810         struct fuse_conn *fc = get_fuse_conn(inode);
811         struct fuse_fill_data data;
812         int err;
813         int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
814
815         err = -EIO;
816         if (is_bad_inode(inode))
817                 goto out;
818
819         data.file = file;
820         data.inode = inode;
821         if (fc->async_read)
822                 data.req = fuse_get_req_for_background(fc, nr_alloc);
823         else
824                 data.req = fuse_get_req(fc, nr_alloc);
825         data.nr_pages = nr_pages;
826         err = PTR_ERR(data.req);
827         if (IS_ERR(data.req))
828                 goto out;
829
830         err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
831         if (!err) {
832                 if (data.req->num_pages)
833                         fuse_send_readpages(data.req, file);
834                 else
835                         fuse_put_request(fc, data.req);
836         }
837 out:
838         return err;
839 }
840
841 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
842                                   unsigned long nr_segs, loff_t pos)
843 {
844         struct inode *inode = iocb->ki_filp->f_mapping->host;
845         struct fuse_conn *fc = get_fuse_conn(inode);
846
847         /*
848          * In auto invalidate mode, always update attributes on read.
849          * Otherwise, only update if we attempt to read past EOF (to ensure
850          * i_size is up to date).
851          */
852         if (fc->auto_inval_data ||
853             (pos + iov_length(iov, nr_segs) > i_size_read(inode))) {
854                 int err;
855                 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
856                 if (err)
857                         return err;
858         }
859
860         return generic_file_aio_read(iocb, iov, nr_segs, pos);
861 }
862
863 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
864                             loff_t pos, size_t count)
865 {
866         struct fuse_write_in *inarg = &req->misc.write.in;
867         struct fuse_write_out *outarg = &req->misc.write.out;
868
869         inarg->fh = ff->fh;
870         inarg->offset = pos;
871         inarg->size = count;
872         req->in.h.opcode = FUSE_WRITE;
873         req->in.h.nodeid = ff->nodeid;
874         req->in.numargs = 2;
875         if (ff->fc->minor < 9)
876                 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
877         else
878                 req->in.args[0].size = sizeof(struct fuse_write_in);
879         req->in.args[0].value = inarg;
880         req->in.args[1].size = count;
881         req->out.numargs = 1;
882         req->out.args[0].size = sizeof(struct fuse_write_out);
883         req->out.args[0].value = outarg;
884 }
885
886 static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
887                               loff_t pos, size_t count, fl_owner_t owner)
888 {
889         struct file *file = io->file;
890         struct fuse_file *ff = file->private_data;
891         struct fuse_conn *fc = ff->fc;
892         struct fuse_write_in *inarg = &req->misc.write.in;
893
894         fuse_write_fill(req, ff, pos, count);
895         inarg->flags = file->f_flags;
896         if (owner != NULL) {
897                 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
898                 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
899         }
900
901         if (io->async)
902                 return fuse_async_req_send(fc, req, count, io);
903
904         fuse_request_send(fc, req);
905         return req->misc.write.out.size;
906 }
907
908 void fuse_write_update_size(struct inode *inode, loff_t pos)
909 {
910         struct fuse_conn *fc = get_fuse_conn(inode);
911         struct fuse_inode *fi = get_fuse_inode(inode);
912
913         spin_lock(&fc->lock);
914         fi->attr_version = ++fc->attr_version;
915         if (pos > inode->i_size)
916                 i_size_write(inode, pos);
917         spin_unlock(&fc->lock);
918 }
919
920 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
921                                     struct inode *inode, loff_t pos,
922                                     size_t count)
923 {
924         size_t res;
925         unsigned offset;
926         unsigned i;
927         struct fuse_io_priv io = { .async = 0, .file = file };
928
929         for (i = 0; i < req->num_pages; i++)
930                 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
931
932         res = fuse_send_write(req, &io, pos, count, NULL);
933
934         offset = req->page_descs[0].offset;
935         count = res;
936         for (i = 0; i < req->num_pages; i++) {
937                 struct page *page = req->pages[i];
938
939                 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
940                         SetPageUptodate(page);
941
942                 if (count > PAGE_CACHE_SIZE - offset)
943                         count -= PAGE_CACHE_SIZE - offset;
944                 else
945                         count = 0;
946                 offset = 0;
947
948                 unlock_page(page);
949                 page_cache_release(page);
950         }
951
952         return res;
953 }
954
955 static ssize_t fuse_fill_write_pages(struct fuse_req *req,
956                                struct address_space *mapping,
957                                struct iov_iter *ii, loff_t pos)
958 {
959         struct fuse_conn *fc = get_fuse_conn(mapping->host);
960         unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
961         size_t count = 0;
962         int err;
963
964         req->in.argpages = 1;
965         req->page_descs[0].offset = offset;
966
967         do {
968                 size_t tmp;
969                 struct page *page;
970                 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
971                 size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
972                                      iov_iter_count(ii));
973
974                 bytes = min_t(size_t, bytes, fc->max_write - count);
975
976  again:
977                 err = -EFAULT;
978                 if (iov_iter_fault_in_readable(ii, bytes))
979                         break;
980
981                 err = -ENOMEM;
982                 page = grab_cache_page_write_begin(mapping, index, 0);
983                 if (!page)
984                         break;
985
986                 if (mapping_writably_mapped(mapping))
987                         flush_dcache_page(page);
988
989                 pagefault_disable();
990                 tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
991                 pagefault_enable();
992                 flush_dcache_page(page);
993
994                 mark_page_accessed(page);
995
996                 if (!tmp) {
997                         unlock_page(page);
998                         page_cache_release(page);
999                         bytes = min(bytes, iov_iter_single_seg_count(ii));
1000                         goto again;
1001                 }
1002
1003                 err = 0;
1004                 req->pages[req->num_pages] = page;
1005                 req->page_descs[req->num_pages].length = tmp;
1006                 req->num_pages++;
1007
1008                 iov_iter_advance(ii, tmp);
1009                 count += tmp;
1010                 pos += tmp;
1011                 offset += tmp;
1012                 if (offset == PAGE_CACHE_SIZE)
1013                         offset = 0;
1014
1015                 if (!fc->big_writes)
1016                         break;
1017         } while (iov_iter_count(ii) && count < fc->max_write &&
1018                  req->num_pages < req->max_pages && offset == 0);
1019
1020         return count > 0 ? count : err;
1021 }
1022
1023 static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
1024 {
1025         return min_t(unsigned,
1026                      ((pos + len - 1) >> PAGE_CACHE_SHIFT) -
1027                      (pos >> PAGE_CACHE_SHIFT) + 1,
1028                      FUSE_MAX_PAGES_PER_REQ);
1029 }
1030
1031 static ssize_t fuse_perform_write(struct file *file,
1032                                   struct address_space *mapping,
1033                                   struct iov_iter *ii, loff_t pos)
1034 {
1035         struct inode *inode = mapping->host;
1036         struct fuse_conn *fc = get_fuse_conn(inode);
1037         struct fuse_inode *fi = get_fuse_inode(inode);
1038         int err = 0;
1039         ssize_t res = 0;
1040
1041         if (is_bad_inode(inode))
1042                 return -EIO;
1043
1044         if (inode->i_size < pos + iov_iter_count(ii))
1045                 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1046
1047         do {
1048                 struct fuse_req *req;
1049                 ssize_t count;
1050                 unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
1051
1052                 req = fuse_get_req(fc, nr_pages);
1053                 if (IS_ERR(req)) {
1054                         err = PTR_ERR(req);
1055                         break;
1056                 }
1057
1058                 count = fuse_fill_write_pages(req, mapping, ii, pos);
1059                 if (count <= 0) {
1060                         err = count;
1061                 } else {
1062                         size_t num_written;
1063
1064                         num_written = fuse_send_write_pages(req, file, inode,
1065                                                             pos, count);
1066                         err = req->out.h.error;
1067                         if (!err) {
1068                                 res += num_written;
1069                                 pos += num_written;
1070
1071                                 /* break out of the loop on short write */
1072                                 if (num_written != count)
1073                                         err = -EIO;
1074                         }
1075                 }
1076                 fuse_put_request(fc, req);
1077         } while (!err && iov_iter_count(ii));
1078
1079         if (res > 0)
1080                 fuse_write_update_size(inode, pos);
1081
1082         clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
1083         fuse_invalidate_attr(inode);
1084
1085         return res > 0 ? res : err;
1086 }
1087
1088 static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
1089                                    unsigned long nr_segs, loff_t pos)
1090 {
1091         struct file *file = iocb->ki_filp;
1092         struct address_space *mapping = file->f_mapping;
1093         size_t count = 0;
1094         size_t ocount = 0;
1095         ssize_t written = 0;
1096         ssize_t written_buffered = 0;
1097         struct inode *inode = mapping->host;
1098         ssize_t err;
1099         struct iov_iter i;
1100         loff_t endbyte = 0;
1101
1102         WARN_ON(iocb->ki_pos != pos);
1103
1104         ocount = 0;
1105         err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1106         if (err)
1107                 return err;
1108
1109         count = ocount;
1110         mutex_lock(&inode->i_mutex);
1111
1112         /* We can write back this queue in page reclaim */
1113         current->backing_dev_info = mapping->backing_dev_info;
1114
1115         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1116         if (err)
1117                 goto out;
1118
1119         if (count == 0)
1120                 goto out;
1121
1122         err = file_remove_suid(file);
1123         if (err)
1124                 goto out;
1125
1126         err = file_update_time(file);
1127         if (err)
1128                 goto out;
1129
1130         if (file->f_flags & O_DIRECT) {
1131                 written = generic_file_direct_write(iocb, iov, &nr_segs,
1132                                                     pos, &iocb->ki_pos,
1133                                                     count, ocount);
1134                 if (written < 0 || written == count)
1135                         goto out;
1136
1137                 pos += written;
1138                 count -= written;
1139
1140                 iov_iter_init(&i, iov, nr_segs, count, written);
1141                 written_buffered = fuse_perform_write(file, mapping, &i, pos);
1142                 if (written_buffered < 0) {
1143                         err = written_buffered;
1144                         goto out;
1145                 }
1146                 endbyte = pos + written_buffered - 1;
1147
1148                 err = filemap_write_and_wait_range(file->f_mapping, pos,
1149                                                    endbyte);
1150                 if (err)
1151                         goto out;
1152
1153                 invalidate_mapping_pages(file->f_mapping,
1154                                          pos >> PAGE_CACHE_SHIFT,
1155                                          endbyte >> PAGE_CACHE_SHIFT);
1156
1157                 written += written_buffered;
1158                 iocb->ki_pos = pos + written_buffered;
1159         } else {
1160                 iov_iter_init(&i, iov, nr_segs, count, 0);
1161                 written = fuse_perform_write(file, mapping, &i, pos);
1162                 if (written >= 0)
1163                         iocb->ki_pos = pos + written;
1164         }
1165 out:
1166         current->backing_dev_info = NULL;
1167         mutex_unlock(&inode->i_mutex);
1168
1169         return written ? written : err;
1170 }
1171
1172 static inline void fuse_page_descs_length_init(struct fuse_req *req,
1173                 unsigned index, unsigned nr_pages)
1174 {
1175         int i;
1176
1177         for (i = index; i < index + nr_pages; i++)
1178                 req->page_descs[i].length = PAGE_SIZE -
1179                         req->page_descs[i].offset;
1180 }
1181
1182 static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
1183 {
1184         return (unsigned long)ii->iov->iov_base + ii->iov_offset;
1185 }
1186
1187 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
1188                                         size_t max_size)
1189 {
1190         return min(iov_iter_single_seg_count(ii), max_size);
1191 }
1192
1193 static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
1194                                size_t *nbytesp, int write)
1195 {
1196         size_t nbytes = 0;  /* # bytes already packed in req */
1197
1198         /* Special case for kernel I/O: can copy directly into the buffer */
1199         if (segment_eq(get_fs(), KERNEL_DS)) {
1200                 unsigned long user_addr = fuse_get_user_addr(ii);
1201                 size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
1202
1203                 if (write)
1204                         req->in.args[1].value = (void *) user_addr;
1205                 else
1206                         req->out.args[0].value = (void *) user_addr;
1207
1208                 iov_iter_advance(ii, frag_size);
1209                 *nbytesp = frag_size;
1210                 return 0;
1211         }
1212
1213         while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
1214                 unsigned npages;
1215                 unsigned long user_addr = fuse_get_user_addr(ii);
1216                 unsigned offset = user_addr & ~PAGE_MASK;
1217                 size_t frag_size = fuse_get_frag_size(ii, *nbytesp - nbytes);
1218                 int ret;
1219
1220                 unsigned n = req->max_pages - req->num_pages;
1221                 frag_size = min_t(size_t, frag_size, n << PAGE_SHIFT);
1222
1223                 npages = (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1224                 npages = clamp(npages, 1U, n);
1225
1226                 ret = get_user_pages_fast(user_addr, npages, !write,
1227                                           &req->pages[req->num_pages]);
1228                 if (ret < 0)
1229                         return ret;
1230
1231                 npages = ret;
1232                 frag_size = min_t(size_t, frag_size,
1233                                   (npages << PAGE_SHIFT) - offset);
1234                 iov_iter_advance(ii, frag_size);
1235
1236                 req->page_descs[req->num_pages].offset = offset;
1237                 fuse_page_descs_length_init(req, req->num_pages, npages);
1238
1239                 req->num_pages += npages;
1240                 req->page_descs[req->num_pages - 1].length -=
1241                         (npages << PAGE_SHIFT) - offset - frag_size;
1242
1243                 nbytes += frag_size;
1244         }
1245
1246         if (write)
1247                 req->in.argpages = 1;
1248         else
1249                 req->out.argpages = 1;
1250
1251         *nbytesp = nbytes;
1252
1253         return 0;
1254 }
1255
1256 static inline int fuse_iter_npages(const struct iov_iter *ii_p)
1257 {
1258         struct iov_iter ii = *ii_p;
1259         int npages = 0;
1260
1261         while (iov_iter_count(&ii) && npages < FUSE_MAX_PAGES_PER_REQ) {
1262                 unsigned long user_addr = fuse_get_user_addr(&ii);
1263                 unsigned offset = user_addr & ~PAGE_MASK;
1264                 size_t frag_size = iov_iter_single_seg_count(&ii);
1265
1266                 npages += (frag_size + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1267                 iov_iter_advance(&ii, frag_size);
1268         }
1269
1270         return min(npages, FUSE_MAX_PAGES_PER_REQ);
1271 }
1272
1273 ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
1274                        unsigned long nr_segs, size_t count, loff_t *ppos,
1275                        int write)
1276 {
1277         struct file *file = io->file;
1278         struct fuse_file *ff = file->private_data;
1279         struct fuse_conn *fc = ff->fc;
1280         size_t nmax = write ? fc->max_write : fc->max_read;
1281         loff_t pos = *ppos;
1282         ssize_t res = 0;
1283         struct fuse_req *req;
1284         struct iov_iter ii;
1285
1286         iov_iter_init(&ii, iov, nr_segs, count, 0);
1287
1288         if (io->async)
1289                 req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
1290         else
1291                 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1292         if (IS_ERR(req))
1293                 return PTR_ERR(req);
1294
1295         while (count) {
1296                 size_t nres;
1297                 fl_owner_t owner = current->files;
1298                 size_t nbytes = min(count, nmax);
1299                 int err = fuse_get_user_pages(req, &ii, &nbytes, write);
1300                 if (err) {
1301                         res = err;
1302                         break;
1303                 }
1304
1305                 if (write)
1306                         nres = fuse_send_write(req, io, pos, nbytes, owner);
1307                 else
1308                         nres = fuse_send_read(req, io, pos, nbytes, owner);
1309
1310                 if (!io->async)
1311                         fuse_release_user_pages(req, !write);
1312                 if (req->out.h.error) {
1313                         if (!res)
1314                                 res = req->out.h.error;
1315                         break;
1316                 } else if (nres > nbytes) {
1317                         res = -EIO;
1318                         break;
1319                 }
1320                 count -= nres;
1321                 res += nres;
1322                 pos += nres;
1323                 if (nres != nbytes)
1324                         break;
1325                 if (count) {
1326                         fuse_put_request(fc, req);
1327                         if (io->async)
1328                                 req = fuse_get_req_for_background(fc,
1329                                         fuse_iter_npages(&ii));
1330                         else
1331                                 req = fuse_get_req(fc, fuse_iter_npages(&ii));
1332                         if (IS_ERR(req))
1333                                 break;
1334                 }
1335         }
1336         if (!IS_ERR(req))
1337                 fuse_put_request(fc, req);
1338         if (res > 0)
1339                 *ppos = pos;
1340
1341         return res;
1342 }
1343 EXPORT_SYMBOL_GPL(fuse_direct_io);
1344
1345 static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1346                                   const struct iovec *iov,
1347                                   unsigned long nr_segs, loff_t *ppos,
1348                                   size_t count)
1349 {
1350         ssize_t res;
1351         struct file *file = io->file;
1352         struct inode *inode = file_inode(file);
1353
1354         if (is_bad_inode(inode))
1355                 return -EIO;
1356
1357         res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
1358
1359         fuse_invalidate_attr(inode);
1360
1361         return res;
1362 }
1363
1364 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1365                                      size_t count, loff_t *ppos)
1366 {
1367         struct fuse_io_priv io = { .async = 0, .file = file };
1368         struct iovec iov = { .iov_base = buf, .iov_len = count };
1369         return __fuse_direct_read(&io, &iov, 1, ppos, count);
1370 }
1371
1372 static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
1373                                    const struct iovec *iov,
1374                                    unsigned long nr_segs, loff_t *ppos)
1375 {
1376         struct file *file = io->file;
1377         struct inode *inode = file_inode(file);
1378         size_t count = iov_length(iov, nr_segs);
1379         ssize_t res;
1380
1381         res = generic_write_checks(file, ppos, &count, 0);
1382         if (!res)
1383                 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
1384
1385         fuse_invalidate_attr(inode);
1386
1387         return res;
1388 }
1389
1390 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1391                                  size_t count, loff_t *ppos)
1392 {
1393         struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
1394         struct inode *inode = file_inode(file);
1395         ssize_t res;
1396         struct fuse_io_priv io = { .async = 0, .file = file };
1397
1398         if (is_bad_inode(inode))
1399                 return -EIO;
1400
1401         /* Don't allow parallel writes to the same file */
1402         mutex_lock(&inode->i_mutex);
1403         res = __fuse_direct_write(&io, &iov, 1, ppos);
1404         if (res > 0)
1405                 fuse_write_update_size(inode, *ppos);
1406         mutex_unlock(&inode->i_mutex);
1407
1408         return res;
1409 }
1410
1411 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1412 {
1413         int i;
1414
1415         for (i = 0; i < req->num_pages; i++)
1416                 __free_page(req->pages[i]);
1417         fuse_file_put(req->ff, false);
1418 }
1419
1420 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1421 {
1422         struct inode *inode = req->inode;
1423         struct fuse_inode *fi = get_fuse_inode(inode);
1424         struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
1425         int i;
1426
1427         list_del(&req->writepages_entry);
1428         for (i = 0; i < req->num_pages; i++) {
1429                 dec_bdi_stat(bdi, BDI_WRITEBACK);
1430                 dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
1431                 bdi_writeout_inc(bdi);
1432         }
1433         wake_up(&fi->page_waitq);
1434 }
1435
1436 /* Called under fc->lock, may release and reacquire it */
1437 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
1438 __releases(fc->lock)
1439 __acquires(fc->lock)
1440 {
1441         struct fuse_inode *fi = get_fuse_inode(req->inode);
1442         loff_t size = i_size_read(req->inode);
1443         struct fuse_write_in *inarg = &req->misc.write.in;
1444         __u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
1445
1446         if (!fc->connected)
1447                 goto out_free;
1448
1449         if (inarg->offset + data_size <= size) {
1450                 inarg->size = data_size;
1451         } else if (inarg->offset < size) {
1452                 inarg->size = size - inarg->offset;
1453         } else {
1454                 /* Got truncated off completely */
1455                 goto out_free;
1456         }
1457
1458         req->in.args[1].size = inarg->size;
1459         fi->writectr++;
1460         fuse_request_send_background_locked(fc, req);
1461         return;
1462
1463  out_free:
1464         fuse_writepage_finish(fc, req);
1465         spin_unlock(&fc->lock);
1466         fuse_writepage_free(fc, req);
1467         fuse_put_request(fc, req);
1468         spin_lock(&fc->lock);
1469 }
1470
1471 /*
1472  * If fi->writectr is positive (no truncate or fsync going on) send
1473  * all queued writepage requests.
1474  *
1475  * Called with fc->lock
1476  */
1477 void fuse_flush_writepages(struct inode *inode)
1478 __releases(fc->lock)
1479 __acquires(fc->lock)
1480 {
1481         struct fuse_conn *fc = get_fuse_conn(inode);
1482         struct fuse_inode *fi = get_fuse_inode(inode);
1483         struct fuse_req *req;
1484
1485         while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
1486                 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1487                 list_del_init(&req->list);
1488                 fuse_send_writepage(fc, req);
1489         }
1490 }
1491
1492 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
1493 {
1494         struct inode *inode = req->inode;
1495         struct fuse_inode *fi = get_fuse_inode(inode);
1496
1497         mapping_set_error(inode->i_mapping, req->out.h.error);
1498         spin_lock(&fc->lock);
1499         fi->writectr--;
1500         fuse_writepage_finish(fc, req);
1501         spin_unlock(&fc->lock);
1502         fuse_writepage_free(fc, req);
1503 }
1504
1505 static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
1506                                              struct fuse_inode *fi)
1507 {
1508         struct fuse_file *ff = NULL;
1509
1510         spin_lock(&fc->lock);
1511         if (!WARN_ON(list_empty(&fi->write_files))) {
1512                 ff = list_entry(fi->write_files.next, struct fuse_file,
1513                                 write_entry);
1514                 fuse_file_get(ff);
1515         }
1516         spin_unlock(&fc->lock);
1517
1518         return ff;
1519 }
1520
1521 static int fuse_writepage_locked(struct page *page)
1522 {
1523         struct address_space *mapping = page->mapping;
1524         struct inode *inode = mapping->host;
1525         struct fuse_conn *fc = get_fuse_conn(inode);
1526         struct fuse_inode *fi = get_fuse_inode(inode);
1527         struct fuse_req *req;
1528         struct page *tmp_page;
1529         int error = -ENOMEM;
1530
1531         set_page_writeback(page);
1532
1533         req = fuse_request_alloc_nofs(1);
1534         if (!req)
1535                 goto err;
1536
1537         req->background = 1; /* writeback always goes to bg_queue */
1538         tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1539         if (!tmp_page)
1540                 goto err_free;
1541
1542         error = -EIO;
1543         req->ff = fuse_write_file_get(fc, fi);
1544         if (!req->ff)
1545                 goto err_free;
1546
1547         fuse_write_fill(req, req->ff, page_offset(page), 0);
1548
1549         copy_highpage(tmp_page, page);
1550         req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1551         req->in.argpages = 1;
1552         req->num_pages = 1;
1553         req->pages[0] = tmp_page;
1554         req->page_descs[0].offset = 0;
1555         req->page_descs[0].length = PAGE_SIZE;
1556         req->end = fuse_writepage_end;
1557         req->inode = inode;
1558
1559         inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
1560         inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1561
1562         spin_lock(&fc->lock);
1563         list_add(&req->writepages_entry, &fi->writepages);
1564         list_add_tail(&req->list, &fi->queued_writes);
1565         fuse_flush_writepages(inode);
1566         spin_unlock(&fc->lock);
1567
1568         end_page_writeback(page);
1569
1570         return 0;
1571
1572 err_free:
1573         fuse_request_free(req);
1574 err:
1575         end_page_writeback(page);
1576         return error;
1577 }
1578
1579 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
1580 {
1581         int err;
1582
1583         err = fuse_writepage_locked(page);
1584         unlock_page(page);
1585
1586         return err;
1587 }
1588
1589 struct fuse_fill_wb_data {
1590         struct fuse_req *req;
1591         struct fuse_file *ff;
1592         struct inode *inode;
1593         struct page **orig_pages;
1594 };
1595
1596 static void fuse_writepages_send(struct fuse_fill_wb_data *data)
1597 {
1598         struct fuse_req *req = data->req;
1599         struct inode *inode = data->inode;
1600         struct fuse_conn *fc = get_fuse_conn(inode);
1601         struct fuse_inode *fi = get_fuse_inode(inode);
1602         int num_pages = req->num_pages;
1603         int i;
1604
1605         req->ff = fuse_file_get(data->ff);
1606         spin_lock(&fc->lock);
1607         list_add_tail(&req->list, &fi->queued_writes);
1608         fuse_flush_writepages(inode);
1609         spin_unlock(&fc->lock);
1610
1611         for (i = 0; i < num_pages; i++)
1612                 end_page_writeback(data->orig_pages[i]);
1613 }
1614
1615 static int fuse_writepages_fill(struct page *page,
1616                 struct writeback_control *wbc, void *_data)
1617 {
1618         struct fuse_fill_wb_data *data = _data;
1619         struct fuse_req *req = data->req;
1620         struct inode *inode = data->inode;
1621         struct fuse_conn *fc = get_fuse_conn(inode);
1622         struct page *tmp_page;
1623         int err;
1624
1625         if (!data->ff) {
1626                 err = -EIO;
1627                 data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
1628                 if (!data->ff)
1629                         goto out_unlock;
1630         }
1631
1632         if (req) {
1633                 BUG_ON(!req->num_pages);
1634                 if (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
1635                     (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
1636                     data->orig_pages[req->num_pages - 1]->index + 1 != page->index) {
1637
1638                         fuse_writepages_send(data);
1639                         data->req = NULL;
1640                 }
1641         }
1642         err = -ENOMEM;
1643         tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1644         if (!tmp_page)
1645                 goto out_unlock;
1646
1647         /*
1648          * The page must not be redirtied until the writeout is completed
1649          * (i.e. userspace has sent a reply to the write request).  Otherwise
1650          * there could be more than one temporary page instance for each real
1651          * page.
1652          *
1653          * This is ensured by holding the page lock in page_mkwrite() while
1654          * checking fuse_page_is_writeback().  We already hold the page lock
1655          * since clear_page_dirty_for_io() and keep it held until we add the
1656          * request to the fi->writepages list and increment req->num_pages.
1657          * After this fuse_page_is_writeback() will indicate that the page is
1658          * under writeback, so we can release the page lock.
1659          */
1660         if (data->req == NULL) {
1661                 struct fuse_inode *fi = get_fuse_inode(inode);
1662
1663                 err = -ENOMEM;
1664                 req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
1665                 if (!req) {
1666                         __free_page(tmp_page);
1667                         goto out_unlock;
1668                 }
1669
1670                 fuse_write_fill(req, data->ff, page_offset(page), 0);
1671                 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1672                 req->in.argpages = 1;
1673                 req->background = 1;
1674                 req->num_pages = 0;
1675                 req->end = fuse_writepage_end;
1676                 req->inode = inode;
1677
1678                 spin_lock(&fc->lock);
1679                 list_add(&req->writepages_entry, &fi->writepages);
1680                 spin_unlock(&fc->lock);
1681
1682                 data->req = req;
1683         }
1684         set_page_writeback(page);
1685
1686         copy_highpage(tmp_page, page);
1687         req->pages[req->num_pages] = tmp_page;
1688         req->page_descs[req->num_pages].offset = 0;
1689         req->page_descs[req->num_pages].length = PAGE_SIZE;
1690
1691         inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
1692         inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1693         data->orig_pages[req->num_pages] = page;
1694
1695         /*
1696          * Protected by fc->lock against concurrent access by
1697          * fuse_page_is_writeback().
1698          */
1699         spin_lock(&fc->lock);
1700         req->num_pages++;
1701         spin_unlock(&fc->lock);
1702
1703         err = 0;
1704 out_unlock:
1705         unlock_page(page);
1706
1707         return err;
1708 }
1709
1710 static int fuse_writepages(struct address_space *mapping,
1711                            struct writeback_control *wbc)
1712 {
1713         struct inode *inode = mapping->host;
1714         struct fuse_fill_wb_data data;
1715         int err;
1716
1717         err = -EIO;
1718         if (is_bad_inode(inode))
1719                 goto out;
1720
1721         data.inode = inode;
1722         data.req = NULL;
1723         data.ff = NULL;
1724
1725         err = -ENOMEM;
1726         data.orig_pages = kzalloc(sizeof(struct page *) *
1727                                   FUSE_MAX_PAGES_PER_REQ,
1728                                   GFP_NOFS);
1729         if (!data.orig_pages)
1730                 goto out;
1731
1732         err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
1733         if (data.req) {
1734                 /* Ignore errors if we can write at least one page */
1735                 BUG_ON(!data.req->num_pages);
1736                 fuse_writepages_send(&data);
1737                 err = 0;
1738         }
1739         if (data.ff)
1740                 fuse_file_put(data.ff, false);
1741
1742         kfree(data.orig_pages);
1743 out:
1744         return err;
1745 }
1746
1747 static int fuse_launder_page(struct page *page)
1748 {
1749         int err = 0;
1750         if (clear_page_dirty_for_io(page)) {
1751                 struct inode *inode = page->mapping->host;
1752                 err = fuse_writepage_locked(page);
1753                 if (!err)
1754                         fuse_wait_on_page_writeback(inode, page->index);
1755         }
1756         return err;
1757 }
1758
1759 /*
1760  * Write back dirty pages now, because there may not be any suitable
1761  * open files later
1762  */
1763 static void fuse_vma_close(struct vm_area_struct *vma)
1764 {
1765         filemap_write_and_wait(vma->vm_file->f_mapping);
1766 }
1767
1768 /*
1769  * Wait for writeback against this page to complete before allowing it
1770  * to be marked dirty again, and hence written back again, possibly
1771  * before the previous writepage completed.
1772  *
1773  * Block here, instead of in ->writepage(), so that the userspace fs
1774  * can only block processes actually operating on the filesystem.
1775  *
1776  * Otherwise unprivileged userspace fs would be able to block
1777  * unrelated:
1778  *
1779  * - page migration
1780  * - sync(2)
1781  * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
1782  */
1783 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1784 {
1785         struct page *page = vmf->page;
1786         struct inode *inode = file_inode(vma->vm_file);
1787
1788         file_update_time(vma->vm_file);
1789         lock_page(page);
1790         if (page->mapping != inode->i_mapping) {
1791                 unlock_page(page);
1792                 return VM_FAULT_NOPAGE;
1793         }
1794
1795         fuse_wait_on_page_writeback(inode, page->index);
1796         return VM_FAULT_LOCKED;
1797 }
1798
1799 static const struct vm_operations_struct fuse_file_vm_ops = {
1800         .close          = fuse_vma_close,
1801         .fault          = filemap_fault,
1802         .page_mkwrite   = fuse_page_mkwrite,
1803         .remap_pages    = generic_file_remap_pages,
1804 };
1805
1806 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
1807 {
1808         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
1809                 struct inode *inode = file_inode(file);
1810                 struct fuse_conn *fc = get_fuse_conn(inode);
1811                 struct fuse_inode *fi = get_fuse_inode(inode);
1812                 struct fuse_file *ff = file->private_data;
1813                 /*
1814                  * file may be written through mmap, so chain it onto the
1815                  * inodes's write_file list
1816                  */
1817                 spin_lock(&fc->lock);
1818                 if (list_empty(&ff->write_entry))
1819                         list_add(&ff->write_entry, &fi->write_files);
1820                 spin_unlock(&fc->lock);
1821         }
1822         file_accessed(file);
1823         vma->vm_ops = &fuse_file_vm_ops;
1824         return 0;
1825 }
1826
1827 static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
1828 {
1829         /* Can't provide the coherency needed for MAP_SHARED */
1830         if (vma->vm_flags & VM_MAYSHARE)
1831                 return -ENODEV;
1832
1833         invalidate_inode_pages2(file->f_mapping);
1834
1835         return generic_file_mmap(file, vma);
1836 }
1837
1838 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
1839                                   struct file_lock *fl)
1840 {
1841         switch (ffl->type) {
1842         case F_UNLCK:
1843                 break;
1844
1845         case F_RDLCK:
1846         case F_WRLCK:
1847                 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
1848                     ffl->end < ffl->start)
1849                         return -EIO;
1850
1851                 fl->fl_start = ffl->start;
1852                 fl->fl_end = ffl->end;
1853                 fl->fl_pid = ffl->pid;
1854                 break;
1855
1856         default:
1857                 return -EIO;
1858         }
1859         fl->fl_type = ffl->type;
1860         return 0;
1861 }
1862
1863 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
1864                          const struct file_lock *fl, int opcode, pid_t pid,
1865                          int flock)
1866 {
1867         struct inode *inode = file_inode(file);
1868         struct fuse_conn *fc = get_fuse_conn(inode);
1869         struct fuse_file *ff = file->private_data;
1870         struct fuse_lk_in *arg = &req->misc.lk_in;
1871
1872         arg->fh = ff->fh;
1873         arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
1874         arg->lk.start = fl->fl_start;
1875         arg->lk.end = fl->fl_end;
1876         arg->lk.type = fl->fl_type;
1877         arg->lk.pid = pid;
1878         if (flock)
1879                 arg->lk_flags |= FUSE_LK_FLOCK;
1880         req->in.h.opcode = opcode;
1881         req->in.h.nodeid = get_node_id(inode);
1882         req->in.numargs = 1;
1883         req->in.args[0].size = sizeof(*arg);
1884         req->in.args[0].value = arg;
1885 }
1886
1887 static int fuse_getlk(struct file *file, struct file_lock *fl)
1888 {
1889         struct inode *inode = file_inode(file);
1890         struct fuse_conn *fc = get_fuse_conn(inode);
1891         struct fuse_req *req;
1892         struct fuse_lk_out outarg;
1893         int err;
1894
1895         req = fuse_get_req_nopages(fc);
1896         if (IS_ERR(req))
1897                 return PTR_ERR(req);
1898
1899         fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
1900         req->out.numargs = 1;
1901         req->out.args[0].size = sizeof(outarg);
1902         req->out.args[0].value = &outarg;
1903         fuse_request_send(fc, req);
1904         err = req->out.h.error;
1905         fuse_put_request(fc, req);
1906         if (!err)
1907                 err = convert_fuse_file_lock(&outarg.lk, fl);
1908
1909         return err;
1910 }
1911
1912 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
1913 {
1914         struct inode *inode = file_inode(file);
1915         struct fuse_conn *fc = get_fuse_conn(inode);
1916         struct fuse_req *req;
1917         int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
1918         pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
1919         int err;
1920
1921         if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
1922                 /* NLM needs asynchronous locks, which we don't support yet */
1923                 return -ENOLCK;
1924         }
1925
1926         /* Unlock on close is handled by the flush method */
1927         if (fl->fl_flags & FL_CLOSE)
1928                 return 0;
1929
1930         req = fuse_get_req_nopages(fc);
1931         if (IS_ERR(req))
1932                 return PTR_ERR(req);
1933
1934         fuse_lk_fill(req, file, fl, opcode, pid, flock);
1935         fuse_request_send(fc, req);
1936         err = req->out.h.error;
1937         /* locking is restartable */
1938         if (err == -EINTR)
1939                 err = -ERESTARTSYS;
1940         fuse_put_request(fc, req);
1941         return err;
1942 }
1943
1944 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
1945 {
1946         struct inode *inode = file_inode(file);
1947         struct fuse_conn *fc = get_fuse_conn(inode);
1948         int err;
1949
1950         if (cmd == F_CANCELLK) {
1951                 err = 0;
1952         } else if (cmd == F_GETLK) {
1953                 if (fc->no_lock) {
1954                         posix_test_lock(file, fl);
1955                         err = 0;
1956                 } else
1957                         err = fuse_getlk(file, fl);
1958         } else {
1959                 if (fc->no_lock)
1960                         err = posix_lock_file(file, fl, NULL);
1961                 else
1962                         err = fuse_setlk(file, fl, 0);
1963         }
1964         return err;
1965 }
1966
1967 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
1968 {
1969         struct inode *inode = file_inode(file);
1970         struct fuse_conn *fc = get_fuse_conn(inode);
1971         int err;
1972
1973         if (fc->no_flock) {
1974                 err = flock_lock_file_wait(file, fl);
1975         } else {
1976                 struct fuse_file *ff = file->private_data;
1977
1978                 /* emulate flock with POSIX locks */
1979                 fl->fl_owner = (fl_owner_t) file;
1980                 ff->flock = true;
1981                 err = fuse_setlk(file, fl, 1);
1982         }
1983
1984         return err;
1985 }
1986
1987 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
1988 {
1989         struct inode *inode = mapping->host;
1990         struct fuse_conn *fc = get_fuse_conn(inode);
1991         struct fuse_req *req;
1992         struct fuse_bmap_in inarg;
1993         struct fuse_bmap_out outarg;
1994         int err;
1995
1996         if (!inode->i_sb->s_bdev || fc->no_bmap)
1997                 return 0;
1998
1999         req = fuse_get_req_nopages(fc);
2000         if (IS_ERR(req))
2001                 return 0;
2002
2003         memset(&inarg, 0, sizeof(inarg));
2004         inarg.block = block;
2005         inarg.blocksize = inode->i_sb->s_blocksize;
2006         req->in.h.opcode = FUSE_BMAP;
2007         req->in.h.nodeid = get_node_id(inode);
2008         req->in.numargs = 1;
2009         req->in.args[0].size = sizeof(inarg);
2010         req->in.args[0].value = &inarg;
2011         req->out.numargs = 1;
2012         req->out.args[0].size = sizeof(outarg);
2013         req->out.args[0].value = &outarg;
2014         fuse_request_send(fc, req);
2015         err = req->out.h.error;
2016         fuse_put_request(fc, req);
2017         if (err == -ENOSYS)
2018                 fc->no_bmap = 1;
2019
2020         return err ? 0 : outarg.block;
2021 }
2022
2023 static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
2024 {
2025         loff_t retval;
2026         struct inode *inode = file_inode(file);
2027
2028         /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
2029         if (whence == SEEK_CUR || whence == SEEK_SET)
2030                 return generic_file_llseek(file, offset, whence);
2031
2032         mutex_lock(&inode->i_mutex);
2033         retval = fuse_update_attributes(inode, NULL, file, NULL);
2034         if (!retval)
2035                 retval = generic_file_llseek(file, offset, whence);
2036         mutex_unlock(&inode->i_mutex);
2037
2038         return retval;
2039 }
2040
2041 static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
2042                         unsigned int nr_segs, size_t bytes, bool to_user)
2043 {
2044         struct iov_iter ii;
2045         int page_idx = 0;
2046
2047         if (!bytes)
2048                 return 0;
2049
2050         iov_iter_init(&ii, iov, nr_segs, bytes, 0);
2051
2052         while (iov_iter_count(&ii)) {
2053                 struct page *page = pages[page_idx++];
2054                 size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
2055                 void *kaddr;
2056
2057                 kaddr = kmap(page);
2058
2059                 while (todo) {
2060                         char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
2061                         size_t iov_len = ii.iov->iov_len - ii.iov_offset;
2062                         size_t copy = min(todo, iov_len);
2063                         size_t left;
2064
2065                         if (!to_user)
2066                                 left = copy_from_user(kaddr, uaddr, copy);
2067                         else
2068                                 left = copy_to_user(uaddr, kaddr, copy);
2069
2070                         if (unlikely(left))
2071                                 return -EFAULT;
2072
2073                         iov_iter_advance(&ii, copy);
2074                         todo -= copy;
2075                         kaddr += copy;
2076                 }
2077
2078                 kunmap(page);
2079         }
2080
2081         return 0;
2082 }
2083
2084 /*
2085  * CUSE servers compiled on 32bit broke on 64bit kernels because the
2086  * ABI was defined to be 'struct iovec' which is different on 32bit
2087  * and 64bit.  Fortunately we can determine which structure the server
2088  * used from the size of the reply.
2089  */
2090 static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
2091                                      size_t transferred, unsigned count,
2092                                      bool is_compat)
2093 {
2094 #ifdef CONFIG_COMPAT
2095         if (count * sizeof(struct compat_iovec) == transferred) {
2096                 struct compat_iovec *ciov = src;
2097                 unsigned i;
2098
2099                 /*
2100                  * With this interface a 32bit server cannot support
2101                  * non-compat (i.e. ones coming from 64bit apps) ioctl
2102                  * requests
2103                  */
2104                 if (!is_compat)
2105                         return -EINVAL;
2106
2107                 for (i = 0; i < count; i++) {
2108                         dst[i].iov_base = compat_ptr(ciov[i].iov_base);
2109                         dst[i].iov_len = ciov[i].iov_len;
2110                 }
2111                 return 0;
2112         }
2113 #endif
2114
2115         if (count * sizeof(struct iovec) != transferred)
2116                 return -EIO;
2117
2118         memcpy(dst, src, transferred);
2119         return 0;
2120 }
2121
2122 /* Make sure iov_length() won't overflow */
2123 static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
2124 {
2125         size_t n;
2126         u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
2127
2128         for (n = 0; n < count; n++, iov++) {
2129                 if (iov->iov_len > (size_t) max)
2130                         return -ENOMEM;
2131                 max -= iov->iov_len;
2132         }
2133         return 0;
2134 }
2135
2136 static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
2137                                  void *src, size_t transferred, unsigned count,
2138                                  bool is_compat)
2139 {
2140         unsigned i;
2141         struct fuse_ioctl_iovec *fiov = src;
2142
2143         if (fc->minor < 16) {
2144                 return fuse_copy_ioctl_iovec_old(dst, src, transferred,
2145                                                  count, is_compat);
2146         }
2147
2148         if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
2149                 return -EIO;
2150
2151         for (i = 0; i < count; i++) {
2152                 /* Did the server supply an inappropriate value? */
2153                 if (fiov[i].base != (unsigned long) fiov[i].base ||
2154                     fiov[i].len != (unsigned long) fiov[i].len)
2155                         return -EIO;
2156
2157                 dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
2158                 dst[i].iov_len = (size_t) fiov[i].len;
2159
2160 #ifdef CONFIG_COMPAT
2161                 if (is_compat &&
2162                     (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
2163                      (compat_size_t) dst[i].iov_len != fiov[i].len))
2164                         return -EIO;
2165 #endif
2166         }
2167
2168         return 0;
2169 }
2170
2171
2172 /*
2173  * For ioctls, there is no generic way to determine how much memory
2174  * needs to be read and/or written.  Furthermore, ioctls are allowed
2175  * to dereference the passed pointer, so the parameter requires deep
2176  * copying but FUSE has no idea whatsoever about what to copy in or
2177  * out.
2178  *
2179  * This is solved by allowing FUSE server to retry ioctl with
2180  * necessary in/out iovecs.  Let's assume the ioctl implementation
2181  * needs to read in the following structure.
2182  *
2183  * struct a {
2184  *      char    *buf;
2185  *      size_t  buflen;
2186  * }
2187  *
2188  * On the first callout to FUSE server, inarg->in_size and
2189  * inarg->out_size will be NULL; then, the server completes the ioctl
2190  * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
2191  * the actual iov array to
2192  *
2193  * { { .iov_base = inarg.arg,   .iov_len = sizeof(struct a) } }
2194  *
2195  * which tells FUSE to copy in the requested area and retry the ioctl.
2196  * On the second round, the server has access to the structure and
2197  * from that it can tell what to look for next, so on the invocation,
2198  * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
2199  *
2200  * { { .iov_base = inarg.arg,   .iov_len = sizeof(struct a)     },
2201  *   { .iov_base = a.buf,       .iov_len = a.buflen             } }
2202  *
2203  * FUSE will copy both struct a and the pointed buffer from the
2204  * process doing the ioctl and retry ioctl with both struct a and the
2205  * buffer.
2206  *
2207  * This time, FUSE server has everything it needs and completes ioctl
2208  * without FUSE_IOCTL_RETRY which finishes the ioctl call.
2209  *
2210  * Copying data out works the same way.
2211  *
2212  * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
2213  * automatically initializes in and out iovs by decoding @cmd with
2214  * _IOC_* macros and the server is not allowed to request RETRY.  This
2215  * limits ioctl data transfers to well-formed ioctls and is the forced
2216  * behavior for all FUSE servers.
2217  */
2218 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
2219                    unsigned int flags)
2220 {
2221         struct fuse_file *ff = file->private_data;
2222         struct fuse_conn *fc = ff->fc;
2223         struct fuse_ioctl_in inarg = {
2224                 .fh = ff->fh,
2225                 .cmd = cmd,
2226                 .arg = arg,
2227                 .flags = flags
2228         };
2229         struct fuse_ioctl_out outarg;
2230         struct fuse_req *req = NULL;
2231         struct page **pages = NULL;
2232         struct iovec *iov_page = NULL;
2233         struct iovec *in_iov = NULL, *out_iov = NULL;
2234         unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
2235         size_t in_size, out_size, transferred;
2236         int err;
2237
2238 #if BITS_PER_LONG == 32
2239         inarg.flags |= FUSE_IOCTL_32BIT;
2240 #else
2241         if (flags & FUSE_IOCTL_COMPAT)
2242                 inarg.flags |= FUSE_IOCTL_32BIT;
2243 #endif
2244
2245         /* assume all the iovs returned by client always fits in a page */
2246         BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
2247
2248         err = -ENOMEM;
2249         pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
2250         iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
2251         if (!pages || !iov_page)
2252                 goto out;
2253
2254         /*
2255          * If restricted, initialize IO parameters as encoded in @cmd.
2256          * RETRY from server is not allowed.
2257          */
2258         if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
2259                 struct iovec *iov = iov_page;
2260
2261                 iov->iov_base = (void __user *)arg;
2262                 iov->iov_len = _IOC_SIZE(cmd);
2263
2264                 if (_IOC_DIR(cmd) & _IOC_WRITE) {
2265                         in_iov = iov;
2266                         in_iovs = 1;
2267                 }
2268
2269                 if (_IOC_DIR(cmd) & _IOC_READ) {
2270                         out_iov = iov;
2271                         out_iovs = 1;
2272                 }
2273         }
2274
2275  retry:
2276         inarg.in_size = in_size = iov_length(in_iov, in_iovs);
2277         inarg.out_size = out_size = iov_length(out_iov, out_iovs);
2278
2279         /*
2280          * Out data can be used either for actual out data or iovs,
2281          * make sure there always is at least one page.
2282          */
2283         out_size = max_t(size_t, out_size, PAGE_SIZE);
2284         max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
2285
2286         /* make sure there are enough buffer pages and init request with them */
2287         err = -ENOMEM;
2288         if (max_pages > FUSE_MAX_PAGES_PER_REQ)
2289                 goto out;
2290         while (num_pages < max_pages) {
2291                 pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2292                 if (!pages[num_pages])
2293                         goto out;
2294                 num_pages++;
2295         }
2296
2297         req = fuse_get_req(fc, num_pages);
2298         if (IS_ERR(req)) {
2299                 err = PTR_ERR(req);
2300                 req = NULL;
2301                 goto out;
2302         }
2303         memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
2304         req->num_pages = num_pages;
2305         fuse_page_descs_length_init(req, 0, req->num_pages);
2306
2307         /* okay, let's send it to the client */
2308         req->in.h.opcode = FUSE_IOCTL;
2309         req->in.h.nodeid = ff->nodeid;
2310         req->in.numargs = 1;
2311         req->in.args[0].size = sizeof(inarg);
2312         req->in.args[0].value = &inarg;
2313         if (in_size) {
2314                 req->in.numargs++;
2315                 req->in.args[1].size = in_size;
2316                 req->in.argpages = 1;
2317
2318                 err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
2319                                            false);
2320                 if (err)
2321                         goto out;
2322         }
2323
2324         req->out.numargs = 2;
2325         req->out.args[0].size = sizeof(outarg);
2326         req->out.args[0].value = &outarg;
2327         req->out.args[1].size = out_size;
2328         req->out.argpages = 1;
2329         req->out.argvar = 1;
2330
2331         fuse_request_send(fc, req);
2332         err = req->out.h.error;
2333         transferred = req->out.args[1].size;
2334         fuse_put_request(fc, req);
2335         req = NULL;
2336         if (err)
2337                 goto out;
2338
2339         /* did it ask for retry? */
2340         if (outarg.flags & FUSE_IOCTL_RETRY) {
2341                 void *vaddr;
2342
2343                 /* no retry if in restricted mode */
2344                 err = -EIO;
2345                 if (!(flags & FUSE_IOCTL_UNRESTRICTED))
2346                         goto out;
2347
2348                 in_iovs = outarg.in_iovs;
2349                 out_iovs = outarg.out_iovs;
2350
2351                 /*
2352                  * Make sure things are in boundary, separate checks
2353                  * are to protect against overflow.
2354                  */
2355                 err = -ENOMEM;
2356                 if (in_iovs > FUSE_IOCTL_MAX_IOV ||
2357                     out_iovs > FUSE_IOCTL_MAX_IOV ||
2358                     in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
2359                         goto out;
2360
2361                 vaddr = kmap_atomic(pages[0]);
2362                 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
2363                                             transferred, in_iovs + out_iovs,
2364                                             (flags & FUSE_IOCTL_COMPAT) != 0);
2365                 kunmap_atomic(vaddr);
2366                 if (err)
2367                         goto out;
2368
2369                 in_iov = iov_page;
2370                 out_iov = in_iov + in_iovs;
2371
2372                 err = fuse_verify_ioctl_iov(in_iov, in_iovs);
2373                 if (err)
2374                         goto out;
2375
2376                 err = fuse_verify_ioctl_iov(out_iov, out_iovs);
2377                 if (err)
2378                         goto out;
2379
2380                 goto retry;
2381         }
2382
2383         err = -EIO;
2384         if (transferred > inarg.out_size)
2385                 goto out;
2386
2387         err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
2388  out:
2389         if (req)
2390                 fuse_put_request(fc, req);
2391         free_page((unsigned long) iov_page);
2392         while (num_pages)
2393                 __free_page(pages[--num_pages]);
2394         kfree(pages);
2395
2396         return err ? err : outarg.result;
2397 }
2398 EXPORT_SYMBOL_GPL(fuse_do_ioctl);
2399
2400 long fuse_ioctl_common(struct file *file, unsigned int cmd,
2401                        unsigned long arg, unsigned int flags)
2402 {
2403         struct inode *inode = file_inode(file);
2404         struct fuse_conn *fc = get_fuse_conn(inode);
2405
2406         if (!fuse_allow_current_process(fc))
2407                 return -EACCES;
2408
2409         if (is_bad_inode(inode))
2410                 return -EIO;
2411
2412         return fuse_do_ioctl(file, cmd, arg, flags);
2413 }
2414
2415 static long fuse_file_ioctl(struct file *file, unsigned int cmd,
2416                             unsigned long arg)
2417 {
2418         return fuse_ioctl_common(file, cmd, arg, 0);
2419 }
2420
2421 static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
2422                                    unsigned long arg)
2423 {
2424         return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
2425 }
2426
2427 /*
2428  * All files which have been polled are linked to RB tree
2429  * fuse_conn->polled_files which is indexed by kh.  Walk the tree and
2430  * find the matching one.
2431  */
2432 static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
2433                                               struct rb_node **parent_out)
2434 {
2435         struct rb_node **link = &fc->polled_files.rb_node;
2436         struct rb_node *last = NULL;
2437
2438         while (*link) {
2439                 struct fuse_file *ff;
2440
2441                 last = *link;
2442                 ff = rb_entry(last, struct fuse_file, polled_node);
2443
2444                 if (kh < ff->kh)
2445                         link = &last->rb_left;
2446                 else if (kh > ff->kh)
2447                         link = &last->rb_right;
2448                 else
2449                         return link;
2450         }
2451
2452         if (parent_out)
2453                 *parent_out = last;
2454         return link;
2455 }
2456
2457 /*
2458  * The file is about to be polled.  Make sure it's on the polled_files
2459  * RB tree.  Note that files once added to the polled_files tree are
2460  * not removed before the file is released.  This is because a file
2461  * polled once is likely to be polled again.
2462  */
2463 static void fuse_register_polled_file(struct fuse_conn *fc,
2464                                       struct fuse_file *ff)
2465 {
2466         spin_lock(&fc->lock);
2467         if (RB_EMPTY_NODE(&ff->polled_node)) {
2468                 struct rb_node **link, *parent;
2469
2470                 link = fuse_find_polled_node(fc, ff->kh, &parent);
2471                 BUG_ON(*link);
2472                 rb_link_node(&ff->polled_node, parent, link);
2473                 rb_insert_color(&ff->polled_node, &fc->polled_files);
2474         }
2475         spin_unlock(&fc->lock);
2476 }
2477
2478 unsigned fuse_file_poll(struct file *file, poll_table *wait)
2479 {
2480         struct fuse_file *ff = file->private_data;
2481         struct fuse_conn *fc = ff->fc;
2482         struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
2483         struct fuse_poll_out outarg;
2484         struct fuse_req *req;
2485         int err;
2486
2487         if (fc->no_poll)
2488                 return DEFAULT_POLLMASK;
2489
2490         poll_wait(file, &ff->poll_wait, wait);
2491         inarg.events = (__u32)poll_requested_events(wait);
2492
2493         /*
2494          * Ask for notification iff there's someone waiting for it.
2495          * The client may ignore the flag and always notify.
2496          */
2497         if (waitqueue_active(&ff->poll_wait)) {
2498                 inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
2499                 fuse_register_polled_file(fc, ff);
2500         }
2501
2502         req = fuse_get_req_nopages(fc);
2503         if (IS_ERR(req))
2504                 return POLLERR;
2505
2506         req->in.h.opcode = FUSE_POLL;
2507         req->in.h.nodeid = ff->nodeid;
2508         req->in.numargs = 1;
2509         req->in.args[0].size = sizeof(inarg);
2510         req->in.args[0].value = &inarg;
2511         req->out.numargs = 1;
2512         req->out.args[0].size = sizeof(outarg);
2513         req->out.args[0].value = &outarg;
2514         fuse_request_send(fc, req);
2515         err = req->out.h.error;
2516         fuse_put_request(fc, req);
2517
2518         if (!err)
2519                 return outarg.revents;
2520         if (err == -ENOSYS) {
2521                 fc->no_poll = 1;
2522                 return DEFAULT_POLLMASK;
2523         }
2524         return POLLERR;
2525 }
2526 EXPORT_SYMBOL_GPL(fuse_file_poll);
2527
2528 /*
2529  * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
2530  * wakes up the poll waiters.
2531  */
2532 int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2533                             struct fuse_notify_poll_wakeup_out *outarg)
2534 {
2535         u64 kh = outarg->kh;
2536         struct rb_node **link;
2537
2538         spin_lock(&fc->lock);
2539
2540         link = fuse_find_polled_node(fc, kh, NULL);
2541         if (*link) {
2542                 struct fuse_file *ff;
2543
2544                 ff = rb_entry(*link, struct fuse_file, polled_node);
2545                 wake_up_interruptible_sync(&ff->poll_wait);
2546         }
2547
2548         spin_unlock(&fc->lock);
2549         return 0;
2550 }
2551
2552 static void fuse_do_truncate(struct file *file)
2553 {
2554         struct inode *inode = file->f_mapping->host;
2555         struct iattr attr;
2556
2557         attr.ia_valid = ATTR_SIZE;
2558         attr.ia_size = i_size_read(inode);
2559
2560         attr.ia_file = file;
2561         attr.ia_valid |= ATTR_FILE;
2562
2563         fuse_do_setattr(inode, &attr, file);
2564 }
2565
2566 static inline loff_t fuse_round_up(loff_t off)
2567 {
2568         return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
2569 }
2570
2571 static ssize_t
2572 fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2573                         loff_t offset, unsigned long nr_segs)
2574 {
2575         ssize_t ret = 0;
2576         struct file *file = iocb->ki_filp;
2577         struct fuse_file *ff = file->private_data;
2578         bool async_dio = ff->fc->async_dio;
2579         loff_t pos = 0;
2580         struct inode *inode;
2581         loff_t i_size;
2582         size_t count = iov_length(iov, nr_segs);
2583         struct fuse_io_priv *io;
2584
2585         pos = offset;
2586         inode = file->f_mapping->host;
2587         i_size = i_size_read(inode);
2588
2589         /* optimization for short read */
2590         if (async_dio && rw != WRITE && offset + count > i_size) {
2591                 if (offset >= i_size)
2592                         return 0;
2593                 count = min_t(loff_t, count, fuse_round_up(i_size - offset));
2594         }
2595
2596         io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2597         if (!io)
2598                 return -ENOMEM;
2599         spin_lock_init(&io->lock);
2600         io->reqs = 1;
2601         io->bytes = -1;
2602         io->size = 0;
2603         io->offset = offset;
2604         io->write = (rw == WRITE);
2605         io->err = 0;
2606         io->file = file;
2607         /*
2608          * By default, we want to optimize all I/Os with async request
2609          * submission to the client filesystem if supported.
2610          */
2611         io->async = async_dio;
2612         io->iocb = iocb;
2613
2614         /*
2615          * We cannot asynchronously extend the size of a file. We have no method
2616          * to wait on real async I/O requests, so we must submit this request
2617          * synchronously.
2618          */
2619         if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
2620                 io->async = false;
2621
2622         if (rw == WRITE)
2623                 ret = __fuse_direct_write(io, iov, nr_segs, &pos);
2624         else
2625                 ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
2626
2627         if (io->async) {
2628                 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2629
2630                 /* we have a non-extending, async request, so return */
2631                 if (!is_sync_kiocb(iocb))
2632                         return -EIOCBQUEUED;
2633
2634                 ret = wait_on_sync_kiocb(iocb);
2635         } else {
2636                 kfree(io);
2637         }
2638
2639         if (rw == WRITE) {
2640                 if (ret > 0)
2641                         fuse_write_update_size(inode, pos);
2642                 else if (ret < 0 && offset + count > i_size)
2643                         fuse_do_truncate(file);
2644         }
2645
2646         return ret;
2647 }
2648
2649 static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2650                                 loff_t length)
2651 {
2652         struct fuse_file *ff = file->private_data;
2653         struct inode *inode = file->f_inode;
2654         struct fuse_inode *fi = get_fuse_inode(inode);
2655         struct fuse_conn *fc = ff->fc;
2656         struct fuse_req *req;
2657         struct fuse_fallocate_in inarg = {
2658                 .fh = ff->fh,
2659                 .offset = offset,
2660                 .length = length,
2661                 .mode = mode
2662         };
2663         int err;
2664         bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
2665                            (mode & FALLOC_FL_PUNCH_HOLE);
2666
2667         if (fc->no_fallocate)
2668                 return -EOPNOTSUPP;
2669
2670         if (lock_inode) {
2671                 mutex_lock(&inode->i_mutex);
2672                 if (mode & FALLOC_FL_PUNCH_HOLE) {
2673                         loff_t endbyte = offset + length - 1;
2674                         err = filemap_write_and_wait_range(inode->i_mapping,
2675                                                            offset, endbyte);
2676                         if (err)
2677                                 goto out;
2678
2679                         fuse_sync_writes(inode);
2680                 }
2681         }
2682
2683         if (!(mode & FALLOC_FL_KEEP_SIZE))
2684                 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2685
2686         req = fuse_get_req_nopages(fc);
2687         if (IS_ERR(req)) {
2688                 err = PTR_ERR(req);
2689                 goto out;
2690         }
2691
2692         req->in.h.opcode = FUSE_FALLOCATE;
2693         req->in.h.nodeid = ff->nodeid;
2694         req->in.numargs = 1;
2695         req->in.args[0].size = sizeof(inarg);
2696         req->in.args[0].value = &inarg;
2697         fuse_request_send(fc, req);
2698         err = req->out.h.error;
2699         if (err == -ENOSYS) {
2700                 fc->no_fallocate = 1;
2701                 err = -EOPNOTSUPP;
2702         }
2703         fuse_put_request(fc, req);
2704
2705         if (err)
2706                 goto out;
2707
2708         /* we could have extended the file */
2709         if (!(mode & FALLOC_FL_KEEP_SIZE))
2710                 fuse_write_update_size(inode, offset + length);
2711
2712         if (mode & FALLOC_FL_PUNCH_HOLE)
2713                 truncate_pagecache_range(inode, offset, offset + length - 1);
2714
2715         fuse_invalidate_attr(inode);
2716
2717 out:
2718         if (!(mode & FALLOC_FL_KEEP_SIZE))
2719                 clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
2720
2721         if (lock_inode)
2722                 mutex_unlock(&inode->i_mutex);
2723
2724         return err;
2725 }
2726
2727 static const struct file_operations fuse_file_operations = {
2728         .llseek         = fuse_file_llseek,
2729         .read           = do_sync_read,
2730         .aio_read       = fuse_file_aio_read,
2731         .write          = do_sync_write,
2732         .aio_write      = fuse_file_aio_write,
2733         .mmap           = fuse_file_mmap,
2734         .open           = fuse_open,
2735         .flush          = fuse_flush,
2736         .release        = fuse_release,
2737         .fsync          = fuse_fsync,
2738         .lock           = fuse_file_lock,
2739         .flock          = fuse_file_flock,
2740         .splice_read    = generic_file_splice_read,
2741         .unlocked_ioctl = fuse_file_ioctl,
2742         .compat_ioctl   = fuse_file_compat_ioctl,
2743         .poll           = fuse_file_poll,
2744         .fallocate      = fuse_file_fallocate,
2745 };
2746
2747 static const struct file_operations fuse_direct_io_file_operations = {
2748         .llseek         = fuse_file_llseek,
2749         .read           = fuse_direct_read,
2750         .write          = fuse_direct_write,
2751         .mmap           = fuse_direct_mmap,
2752         .open           = fuse_open,
2753         .flush          = fuse_flush,
2754         .release        = fuse_release,
2755         .fsync          = fuse_fsync,
2756         .lock           = fuse_file_lock,
2757         .flock          = fuse_file_flock,
2758         .unlocked_ioctl = fuse_file_ioctl,
2759         .compat_ioctl   = fuse_file_compat_ioctl,
2760         .poll           = fuse_file_poll,
2761         .fallocate      = fuse_file_fallocate,
2762         /* no splice_read */
2763 };
2764
2765 static const struct address_space_operations fuse_file_aops  = {
2766         .readpage       = fuse_readpage,
2767         .writepage      = fuse_writepage,
2768         .writepages     = fuse_writepages,
2769         .launder_page   = fuse_launder_page,
2770         .readpages      = fuse_readpages,
2771         .set_page_dirty = __set_page_dirty_nobuffers,
2772         .bmap           = fuse_bmap,
2773         .direct_IO      = fuse_direct_IO,
2774 };
2775
2776 void fuse_init_file_inode(struct inode *inode)
2777 {
2778         inode->i_fop = &fuse_file_operations;
2779         inode->i_data.a_ops = &fuse_file_aops;
2780 }