]> git.karo-electronics.de Git - karo-tx-linux.git/blob - fs/splice.c
[PATCH] Add support for the sys_vmsplice syscall
[karo-tx-linux.git] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files, network, direct splicing, etc and
13  * fixing lots of bugs.
14  *
15  * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17  * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18  *
19  */
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
31
32 /*
33  * Passed to the actors
34  */
35 struct splice_desc {
36         unsigned int len, total_len;    /* current and remaining length */
37         unsigned int flags;             /* splice flags */
38         struct file *file;              /* file to read/write */
39         loff_t pos;                     /* file position */
40 };
41
42 struct partial_page {
43         unsigned int offset;
44         unsigned int len;
45 };
46
47 /*
48  * Passed to move_to_pipe
49  */
50 struct splice_pipe_desc {
51         struct page **pages;            /* page map */
52         struct partial_page *partial;   /* pages[] may not be contig */
53         int nr_pages;                   /* number of pages in map */
54         unsigned int flags;             /* splice flags */
55         struct pipe_buf_operations *ops;/* ops associated with output pipe */
56 };
57
58 /*
59  * Attempt to steal a page from a pipe buffer. This should perhaps go into
60  * a vm helper function, it's already simplified quite a bit by the
61  * addition of remove_mapping(). If success is returned, the caller may
62  * attempt to reuse this page for another destination.
63  */
64 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
65                                      struct pipe_buffer *buf)
66 {
67         struct page *page = buf->page;
68         struct address_space *mapping = page_mapping(page);
69
70         lock_page(page);
71
72         WARN_ON(!PageUptodate(page));
73
74         /*
75          * At least for ext2 with nobh option, we need to wait on writeback
76          * completing on this page, since we'll remove it from the pagecache.
77          * Otherwise truncate wont wait on the page, allowing the disk
78          * blocks to be reused by someone else before we actually wrote our
79          * data to them. fs corruption ensues.
80          */
81         wait_on_page_writeback(page);
82
83         if (PagePrivate(page))
84                 try_to_release_page(page, mapping_gfp_mask(mapping));
85
86         if (!remove_mapping(mapping, page)) {
87                 unlock_page(page);
88                 return 1;
89         }
90
91         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
92         return 0;
93 }
94
95 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
96                                         struct pipe_buffer *buf)
97 {
98         page_cache_release(buf->page);
99         buf->page = NULL;
100         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
101 }
102
103 static void *page_cache_pipe_buf_map(struct file *file,
104                                      struct pipe_inode_info *info,
105                                      struct pipe_buffer *buf)
106 {
107         struct page *page = buf->page;
108         int err;
109
110         if (!PageUptodate(page)) {
111                 lock_page(page);
112
113                 /*
114                  * Page got truncated/unhashed. This will cause a 0-byte
115                  * splice, if this is the first page.
116                  */
117                 if (!page->mapping) {
118                         err = -ENODATA;
119                         goto error;
120                 }
121
122                 /*
123                  * Uh oh, read-error from disk.
124                  */
125                 if (!PageUptodate(page)) {
126                         err = -EIO;
127                         goto error;
128                 }
129
130                 /*
131                  * Page is ok afterall, fall through to mapping.
132                  */
133                 unlock_page(page);
134         }
135
136         return kmap(page);
137 error:
138         unlock_page(page);
139         return ERR_PTR(err);
140 }
141
142 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
143                                       struct pipe_buffer *buf)
144 {
145         kunmap(buf->page);
146 }
147
148 static void *user_page_pipe_buf_map(struct file *file,
149                                     struct pipe_inode_info *pipe,
150                                     struct pipe_buffer *buf)
151 {
152         return kmap(buf->page);
153 }
154
155 static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe,
156                                      struct pipe_buffer *buf)
157 {
158         kunmap(buf->page);
159 }
160
161 static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
162                                     struct pipe_buffer *buf)
163 {
164         page_cache_get(buf->page);
165 }
166
167 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
168         .can_merge = 0,
169         .map = page_cache_pipe_buf_map,
170         .unmap = page_cache_pipe_buf_unmap,
171         .release = page_cache_pipe_buf_release,
172         .steal = page_cache_pipe_buf_steal,
173         .get = page_cache_pipe_buf_get,
174 };
175
176 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
177                                     struct pipe_buffer *buf)
178 {
179         return 1;
180 }
181
182 static struct pipe_buf_operations user_page_pipe_buf_ops = {
183         .can_merge = 0,
184         .map = user_page_pipe_buf_map,
185         .unmap = user_page_pipe_buf_unmap,
186         .release = page_cache_pipe_buf_release,
187         .steal = user_page_pipe_buf_steal,
188         .get = page_cache_pipe_buf_get,
189 };
190
191 /*
192  * Pipe output worker. This sets up our pipe format with the page cache
193  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
194  */
195 static ssize_t move_to_pipe(struct pipe_inode_info *pipe,
196                             struct splice_pipe_desc *spd)
197 {
198         int ret, do_wakeup, page_nr;
199
200         ret = 0;
201         do_wakeup = 0;
202         page_nr = 0;
203
204         if (pipe->inode)
205                 mutex_lock(&pipe->inode->i_mutex);
206
207         for (;;) {
208                 if (!pipe->readers) {
209                         send_sig(SIGPIPE, current, 0);
210                         if (!ret)
211                                 ret = -EPIPE;
212                         break;
213                 }
214
215                 if (pipe->nrbufs < PIPE_BUFFERS) {
216                         int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
217                         struct pipe_buffer *buf = pipe->bufs + newbuf;
218
219                         buf->page = spd->pages[page_nr];
220                         buf->offset = spd->partial[page_nr].offset;
221                         buf->len = spd->partial[page_nr].len;
222                         buf->ops = spd->ops;
223                         pipe->nrbufs++;
224                         page_nr++;
225                         ret += buf->len;
226
227                         if (pipe->inode)
228                                 do_wakeup = 1;
229
230                         if (!--spd->nr_pages)
231                                 break;
232                         if (pipe->nrbufs < PIPE_BUFFERS)
233                                 continue;
234
235                         break;
236                 }
237
238                 if (spd->flags & SPLICE_F_NONBLOCK) {
239                         if (!ret)
240                                 ret = -EAGAIN;
241                         break;
242                 }
243
244                 if (signal_pending(current)) {
245                         if (!ret)
246                                 ret = -ERESTARTSYS;
247                         break;
248                 }
249
250                 if (do_wakeup) {
251                         smp_mb();
252                         if (waitqueue_active(&pipe->wait))
253                                 wake_up_interruptible_sync(&pipe->wait);
254                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
255                         do_wakeup = 0;
256                 }
257
258                 pipe->waiting_writers++;
259                 pipe_wait(pipe);
260                 pipe->waiting_writers--;
261         }
262
263         if (pipe->inode)
264                 mutex_unlock(&pipe->inode->i_mutex);
265
266         if (do_wakeup) {
267                 smp_mb();
268                 if (waitqueue_active(&pipe->wait))
269                         wake_up_interruptible(&pipe->wait);
270                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
271         }
272
273         while (page_nr < spd->nr_pages)
274                 page_cache_release(spd->pages[page_nr++]);
275
276         return ret;
277 }
278
279 static int
280 __generic_file_splice_read(struct file *in, loff_t *ppos,
281                            struct pipe_inode_info *pipe, size_t len,
282                            unsigned int flags)
283 {
284         struct address_space *mapping = in->f_mapping;
285         unsigned int loff, nr_pages;
286         struct page *pages[PIPE_BUFFERS];
287         struct partial_page partial[PIPE_BUFFERS];
288         struct page *page;
289         pgoff_t index, end_index;
290         loff_t isize;
291         size_t total_len;
292         int error;
293         struct splice_pipe_desc spd = {
294                 .pages = pages,
295                 .partial = partial,
296                 .flags = flags,
297                 .ops = &page_cache_pipe_buf_ops,
298         };
299
300         index = *ppos >> PAGE_CACHE_SHIFT;
301         loff = *ppos & ~PAGE_CACHE_MASK;
302         nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
303
304         if (nr_pages > PIPE_BUFFERS)
305                 nr_pages = PIPE_BUFFERS;
306
307         /*
308          * Initiate read-ahead on this page range. however, don't call into
309          * read-ahead if this is a non-zero offset (we are likely doing small
310          * chunk splice and the page is already there) for a single page.
311          */
312         if (!loff || spd.nr_pages > 1)
313                 do_page_cache_readahead(mapping, in, index, spd.nr_pages);
314
315         /*
316          * Now fill in the holes:
317          */
318         error = 0;
319         total_len = 0;
320         for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) {
321                 unsigned int this_len;
322
323                 if (!len)
324                         break;
325
326                 /*
327                  * this_len is the max we'll use from this page
328                  */
329                 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
330 find_page:
331                 /*
332                  * lookup the page for this index
333                  */
334                 page = find_get_page(mapping, index);
335                 if (!page) {
336                         /*
337                          * page didn't exist, allocate one
338                          */
339                         page = page_cache_alloc_cold(mapping);
340                         if (!page)
341                                 break;
342
343                         error = add_to_page_cache_lru(page, mapping, index,
344                                                 mapping_gfp_mask(mapping));
345                         if (unlikely(error)) {
346                                 page_cache_release(page);
347                                 break;
348                         }
349
350                         goto readpage;
351                 }
352
353                 /*
354                  * If the page isn't uptodate, we may need to start io on it
355                  */
356                 if (!PageUptodate(page)) {
357                         /*
358                          * If in nonblock mode then dont block on waiting
359                          * for an in-flight io page
360                          */
361                         if (flags & SPLICE_F_NONBLOCK)
362                                 break;
363
364                         lock_page(page);
365
366                         /*
367                          * page was truncated, stop here. if this isn't the
368                          * first page, we'll just complete what we already
369                          * added
370                          */
371                         if (!page->mapping) {
372                                 unlock_page(page);
373                                 page_cache_release(page);
374                                 break;
375                         }
376                         /*
377                          * page was already under io and is now done, great
378                          */
379                         if (PageUptodate(page)) {
380                                 unlock_page(page);
381                                 goto fill_it;
382                         }
383
384 readpage:
385                         /*
386                          * need to read in the page
387                          */
388                         error = mapping->a_ops->readpage(in, page);
389
390                         if (unlikely(error)) {
391                                 page_cache_release(page);
392                                 if (error == AOP_TRUNCATED_PAGE)
393                                         goto find_page;
394                                 break;
395                         }
396
397                         /*
398                          * i_size must be checked after ->readpage().
399                          */
400                         isize = i_size_read(mapping->host);
401                         end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
402                         if (unlikely(!isize || index > end_index)) {
403                                 page_cache_release(page);
404                                 break;
405                         }
406
407                         /*
408                          * if this is the last page, see if we need to shrink
409                          * the length and stop
410                          */
411                         if (end_index == index) {
412                                 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
413                                 if (total_len + loff > isize) {
414                                         page_cache_release(page);
415                                         break;
416                                 }
417                                 /*
418                                  * force quit after adding this page
419                                  */
420                                 nr_pages = spd.nr_pages;
421                                 this_len = min(this_len, loff);
422                                 loff = 0;
423                         }
424                 }
425 fill_it:
426                 pages[spd.nr_pages] = page;
427                 partial[spd.nr_pages].offset = loff;
428                 partial[spd.nr_pages].len = this_len;
429                 len -= this_len;
430                 total_len += this_len;
431                 loff = 0;
432         }
433
434         if (spd.nr_pages)
435                 return move_to_pipe(pipe, &spd);
436
437         return error;
438 }
439
440 /**
441  * generic_file_splice_read - splice data from file to a pipe
442  * @in:         file to splice from
443  * @pipe:       pipe to splice to
444  * @len:        number of bytes to splice
445  * @flags:      splice modifier flags
446  *
447  * Will read pages from given file and fill them into a pipe.
448  */
449 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
450                                  struct pipe_inode_info *pipe, size_t len,
451                                  unsigned int flags)
452 {
453         ssize_t spliced;
454         int ret;
455
456         ret = 0;
457         spliced = 0;
458
459         while (len) {
460                 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
461
462                 if (ret < 0)
463                         break;
464                 else if (!ret) {
465                         if (spliced)
466                                 break;
467                         if (flags & SPLICE_F_NONBLOCK) {
468                                 ret = -EAGAIN;
469                                 break;
470                         }
471                 }
472
473                 *ppos += ret;
474                 len -= ret;
475                 spliced += ret;
476         }
477
478         if (spliced)
479                 return spliced;
480
481         return ret;
482 }
483
484 EXPORT_SYMBOL(generic_file_splice_read);
485
486 /*
487  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
488  * using sendpage(). Return the number of bytes sent.
489  */
490 static int pipe_to_sendpage(struct pipe_inode_info *info,
491                             struct pipe_buffer *buf, struct splice_desc *sd)
492 {
493         struct file *file = sd->file;
494         loff_t pos = sd->pos;
495         ssize_t ret;
496         void *ptr;
497         int more;
498
499         /*
500          * Sub-optimal, but we are limited by the pipe ->map. We don't
501          * need a kmap'ed buffer here, we just want to make sure we
502          * have the page pinned if the pipe page originates from the
503          * page cache.
504          */
505         ptr = buf->ops->map(file, info, buf);
506         if (IS_ERR(ptr))
507                 return PTR_ERR(ptr);
508
509         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
510
511         ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len,
512                                    &pos, more);
513
514         buf->ops->unmap(info, buf);
515         return ret;
516 }
517
518 /*
519  * This is a little more tricky than the file -> pipe splicing. There are
520  * basically three cases:
521  *
522  *      - Destination page already exists in the address space and there
523  *        are users of it. For that case we have no other option that
524  *        copying the data. Tough luck.
525  *      - Destination page already exists in the address space, but there
526  *        are no users of it. Make sure it's uptodate, then drop it. Fall
527  *        through to last case.
528  *      - Destination page does not exist, we can add the pipe page to
529  *        the page cache and avoid the copy.
530  *
531  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
532  * sd->flags), we attempt to migrate pages from the pipe to the output
533  * file address space page cache. This is possible if no one else has
534  * the pipe page referenced outside of the pipe and page cache. If
535  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
536  * a new page in the output file page cache and fill/dirty that.
537  */
538 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
539                         struct splice_desc *sd)
540 {
541         struct file *file = sd->file;
542         struct address_space *mapping = file->f_mapping;
543         gfp_t gfp_mask = mapping_gfp_mask(mapping);
544         unsigned int offset, this_len;
545         struct page *page;
546         pgoff_t index;
547         char *src;
548         int ret;
549
550         /*
551          * make sure the data in this buffer is uptodate
552          */
553         src = buf->ops->map(file, info, buf);
554         if (IS_ERR(src))
555                 return PTR_ERR(src);
556
557         index = sd->pos >> PAGE_CACHE_SHIFT;
558         offset = sd->pos & ~PAGE_CACHE_MASK;
559
560         this_len = sd->len;
561         if (this_len + offset > PAGE_CACHE_SIZE)
562                 this_len = PAGE_CACHE_SIZE - offset;
563
564         /*
565          * Reuse buf page, if SPLICE_F_MOVE is set.
566          */
567         if (sd->flags & SPLICE_F_MOVE) {
568                 /*
569                  * If steal succeeds, buf->page is now pruned from the vm
570                  * side (LRU and page cache) and we can reuse it. The page
571                  * will also be looked on successful return.
572                  */
573                 if (buf->ops->steal(info, buf))
574                         goto find_page;
575
576                 page = buf->page;
577                 if (add_to_page_cache(page, mapping, index, gfp_mask))
578                         goto find_page;
579
580                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
581                         lru_cache_add(page);
582         } else {
583 find_page:
584                 page = find_lock_page(mapping, index);
585                 if (!page) {
586                         ret = -ENOMEM;
587                         page = page_cache_alloc_cold(mapping);
588                         if (unlikely(!page))
589                                 goto out_nomem;
590
591                         /*
592                          * This will also lock the page
593                          */
594                         ret = add_to_page_cache_lru(page, mapping, index,
595                                                     gfp_mask);
596                         if (unlikely(ret))
597                                 goto out;
598                 }
599
600                 /*
601                  * We get here with the page locked. If the page is also
602                  * uptodate, we don't need to do more. If it isn't, we
603                  * may need to bring it in if we are not going to overwrite
604                  * the full page.
605                  */
606                 if (!PageUptodate(page)) {
607                         if (this_len < PAGE_CACHE_SIZE) {
608                                 ret = mapping->a_ops->readpage(file, page);
609                                 if (unlikely(ret))
610                                         goto out;
611
612                                 lock_page(page);
613
614                                 if (!PageUptodate(page)) {
615                                         /*
616                                          * Page got invalidated, repeat.
617                                          */
618                                         if (!page->mapping) {
619                                                 unlock_page(page);
620                                                 page_cache_release(page);
621                                                 goto find_page;
622                                         }
623                                         ret = -EIO;
624                                         goto out;
625                                 }
626                         } else
627                                 SetPageUptodate(page);
628                 }
629         }
630
631         ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
632         if (ret == AOP_TRUNCATED_PAGE) {
633                 page_cache_release(page);
634                 goto find_page;
635         } else if (ret)
636                 goto out;
637
638         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
639                 char *dst = kmap_atomic(page, KM_USER0);
640
641                 memcpy(dst + offset, src + buf->offset, this_len);
642                 flush_dcache_page(page);
643                 kunmap_atomic(dst, KM_USER0);
644         }
645
646         ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
647         if (ret == AOP_TRUNCATED_PAGE) {
648                 page_cache_release(page);
649                 goto find_page;
650         } else if (ret)
651                 goto out;
652
653         /*
654          * Return the number of bytes written.
655          */
656         ret = this_len;
657         mark_page_accessed(page);
658         balance_dirty_pages_ratelimited(mapping);
659 out:
660         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
661                 page_cache_release(page);
662
663         unlock_page(page);
664 out_nomem:
665         buf->ops->unmap(info, buf);
666         return ret;
667 }
668
669 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
670                            struct splice_desc *);
671
672 /*
673  * Pipe input worker. Most of this logic works like a regular pipe, the
674  * key here is the 'actor' worker passed in that actually moves the data
675  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
676  */
677 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
678                               loff_t *ppos, size_t len, unsigned int flags,
679                               splice_actor *actor)
680 {
681         int ret, do_wakeup, err;
682         struct splice_desc sd;
683
684         ret = 0;
685         do_wakeup = 0;
686
687         sd.total_len = len;
688         sd.flags = flags;
689         sd.file = out;
690         sd.pos = *ppos;
691
692         if (pipe->inode)
693                 mutex_lock(&pipe->inode->i_mutex);
694
695         for (;;) {
696                 if (pipe->nrbufs) {
697                         struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
698                         struct pipe_buf_operations *ops = buf->ops;
699
700                         sd.len = buf->len;
701                         if (sd.len > sd.total_len)
702                                 sd.len = sd.total_len;
703
704                         err = actor(pipe, buf, &sd);
705                         if (err <= 0) {
706                                 if (!ret && err != -ENODATA)
707                                         ret = err;
708
709                                 break;
710                         }
711
712                         ret += err;
713                         buf->offset += err;
714                         buf->len -= err;
715
716                         sd.len -= err;
717                         sd.pos += err;
718                         sd.total_len -= err;
719                         if (sd.len)
720                                 continue;
721
722                         if (!buf->len) {
723                                 buf->ops = NULL;
724                                 ops->release(pipe, buf);
725                                 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
726                                 pipe->nrbufs--;
727                                 if (pipe->inode)
728                                         do_wakeup = 1;
729                         }
730
731                         if (!sd.total_len)
732                                 break;
733                 }
734
735                 if (pipe->nrbufs)
736                         continue;
737                 if (!pipe->writers)
738                         break;
739                 if (!pipe->waiting_writers) {
740                         if (ret)
741                                 break;
742                 }
743
744                 if (flags & SPLICE_F_NONBLOCK) {
745                         if (!ret)
746                                 ret = -EAGAIN;
747                         break;
748                 }
749
750                 if (signal_pending(current)) {
751                         if (!ret)
752                                 ret = -ERESTARTSYS;
753                         break;
754                 }
755
756                 if (do_wakeup) {
757                         smp_mb();
758                         if (waitqueue_active(&pipe->wait))
759                                 wake_up_interruptible_sync(&pipe->wait);
760                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
761                         do_wakeup = 0;
762                 }
763
764                 pipe_wait(pipe);
765         }
766
767         if (pipe->inode)
768                 mutex_unlock(&pipe->inode->i_mutex);
769
770         if (do_wakeup) {
771                 smp_mb();
772                 if (waitqueue_active(&pipe->wait))
773                         wake_up_interruptible(&pipe->wait);
774                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
775         }
776
777         return ret;
778 }
779
780 /**
781  * generic_file_splice_write - splice data from a pipe to a file
782  * @pipe:       pipe info
783  * @out:        file to write to
784  * @len:        number of bytes to splice
785  * @flags:      splice modifier flags
786  *
787  * Will either move or copy pages (determined by @flags options) from
788  * the given pipe inode to the given file.
789  *
790  */
791 ssize_t
792 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
793                           loff_t *ppos, size_t len, unsigned int flags)
794 {
795         struct address_space *mapping = out->f_mapping;
796         ssize_t ret;
797
798         ret = move_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
799         if (ret > 0) {
800                 struct inode *inode = mapping->host;
801
802                 *ppos += ret;
803
804                 /*
805                  * If file or inode is SYNC and we actually wrote some data,
806                  * sync it.
807                  */
808                 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
809                         int err;
810
811                         mutex_lock(&inode->i_mutex);
812                         err = generic_osync_inode(inode, mapping,
813                                                   OSYNC_METADATA|OSYNC_DATA);
814                         mutex_unlock(&inode->i_mutex);
815
816                         if (err)
817                                 ret = err;
818                 }
819         }
820
821         return ret;
822 }
823
824 EXPORT_SYMBOL(generic_file_splice_write);
825
826 /**
827  * generic_splice_sendpage - splice data from a pipe to a socket
828  * @inode:      pipe inode
829  * @out:        socket to write to
830  * @len:        number of bytes to splice
831  * @flags:      splice modifier flags
832  *
833  * Will send @len bytes from the pipe to a network socket. No data copying
834  * is involved.
835  *
836  */
837 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
838                                 loff_t *ppos, size_t len, unsigned int flags)
839 {
840         return move_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
841 }
842
843 EXPORT_SYMBOL(generic_splice_sendpage);
844
845 /*
846  * Attempt to initiate a splice from pipe to file.
847  */
848 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
849                            loff_t *ppos, size_t len, unsigned int flags)
850 {
851         int ret;
852
853         if (unlikely(!out->f_op || !out->f_op->splice_write))
854                 return -EINVAL;
855
856         if (unlikely(!(out->f_mode & FMODE_WRITE)))
857                 return -EBADF;
858
859         ret = rw_verify_area(WRITE, out, ppos, len);
860         if (unlikely(ret < 0))
861                 return ret;
862
863         return out->f_op->splice_write(pipe, out, ppos, len, flags);
864 }
865
866 /*
867  * Attempt to initiate a splice from a file to a pipe.
868  */
869 static long do_splice_to(struct file *in, loff_t *ppos,
870                          struct pipe_inode_info *pipe, size_t len,
871                          unsigned int flags)
872 {
873         loff_t isize, left;
874         int ret;
875
876         if (unlikely(!in->f_op || !in->f_op->splice_read))
877                 return -EINVAL;
878
879         if (unlikely(!(in->f_mode & FMODE_READ)))
880                 return -EBADF;
881
882         ret = rw_verify_area(READ, in, ppos, len);
883         if (unlikely(ret < 0))
884                 return ret;
885
886         isize = i_size_read(in->f_mapping->host);
887         if (unlikely(*ppos >= isize))
888                 return 0;
889         
890         left = isize - *ppos;
891         if (unlikely(left < len))
892                 len = left;
893
894         return in->f_op->splice_read(in, ppos, pipe, len, flags);
895 }
896
897 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
898                       size_t len, unsigned int flags)
899 {
900         struct pipe_inode_info *pipe;
901         long ret, bytes;
902         loff_t out_off;
903         umode_t i_mode;
904         int i;
905
906         /*
907          * We require the input being a regular file, as we don't want to
908          * randomly drop data for eg socket -> socket splicing. Use the
909          * piped splicing for that!
910          */
911         i_mode = in->f_dentry->d_inode->i_mode;
912         if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
913                 return -EINVAL;
914
915         /*
916          * neither in nor out is a pipe, setup an internal pipe attached to
917          * 'out' and transfer the wanted data from 'in' to 'out' through that
918          */
919         pipe = current->splice_pipe;
920         if (unlikely(!pipe)) {
921                 pipe = alloc_pipe_info(NULL);
922                 if (!pipe)
923                         return -ENOMEM;
924
925                 /*
926                  * We don't have an immediate reader, but we'll read the stuff
927                  * out of the pipe right after the move_to_pipe(). So set
928                  * PIPE_READERS appropriately.
929                  */
930                 pipe->readers = 1;
931
932                 current->splice_pipe = pipe;
933         }
934
935         /*
936          * Do the splice.
937          */
938         ret = 0;
939         bytes = 0;
940         out_off = 0;
941
942         while (len) {
943                 size_t read_len, max_read_len;
944
945                 /*
946                  * Do at most PIPE_BUFFERS pages worth of transfer:
947                  */
948                 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
949
950                 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
951                 if (unlikely(ret < 0))
952                         goto out_release;
953
954                 read_len = ret;
955
956                 /*
957                  * NOTE: nonblocking mode only applies to the input. We
958                  * must not do the output in nonblocking mode as then we
959                  * could get stuck data in the internal pipe:
960                  */
961                 ret = do_splice_from(pipe, out, &out_off, read_len,
962                                      flags & ~SPLICE_F_NONBLOCK);
963                 if (unlikely(ret < 0))
964                         goto out_release;
965
966                 bytes += ret;
967                 len -= ret;
968
969                 /*
970                  * In nonblocking mode, if we got back a short read then
971                  * that was due to either an IO error or due to the
972                  * pagecache entry not being there. In the IO error case
973                  * the _next_ splice attempt will produce a clean IO error
974                  * return value (not a short read), so in both cases it's
975                  * correct to break out of the loop here:
976                  */
977                 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
978                         break;
979         }
980
981         pipe->nrbufs = pipe->curbuf = 0;
982
983         return bytes;
984
985 out_release:
986         /*
987          * If we did an incomplete transfer we must release
988          * the pipe buffers in question:
989          */
990         for (i = 0; i < PIPE_BUFFERS; i++) {
991                 struct pipe_buffer *buf = pipe->bufs + i;
992
993                 if (buf->ops) {
994                         buf->ops->release(pipe, buf);
995                         buf->ops = NULL;
996                 }
997         }
998         pipe->nrbufs = pipe->curbuf = 0;
999
1000         /*
1001          * If we transferred some data, return the number of bytes:
1002          */
1003         if (bytes > 0)
1004                 return bytes;
1005
1006         return ret;
1007 }
1008
1009 EXPORT_SYMBOL(do_splice_direct);
1010
1011 /*
1012  * Determine where to splice to/from.
1013  */
1014 static long do_splice(struct file *in, loff_t __user *off_in,
1015                       struct file *out, loff_t __user *off_out,
1016                       size_t len, unsigned int flags)
1017 {
1018         struct pipe_inode_info *pipe;
1019         loff_t offset, *off;
1020         long ret;
1021
1022         pipe = in->f_dentry->d_inode->i_pipe;
1023         if (pipe) {
1024                 if (off_in)
1025                         return -ESPIPE;
1026                 if (off_out) {
1027                         if (out->f_op->llseek == no_llseek)
1028                                 return -EINVAL;
1029                         if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1030                                 return -EFAULT;
1031                         off = &offset;
1032                 } else
1033                         off = &out->f_pos;
1034
1035                 ret = do_splice_from(pipe, out, off, len, flags);
1036
1037                 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1038                         ret = -EFAULT;
1039
1040                 return ret;
1041         }
1042
1043         pipe = out->f_dentry->d_inode->i_pipe;
1044         if (pipe) {
1045                 if (off_out)
1046                         return -ESPIPE;
1047                 if (off_in) {
1048                         if (in->f_op->llseek == no_llseek)
1049                                 return -EINVAL;
1050                         if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1051                                 return -EFAULT;
1052                         off = &offset;
1053                 } else
1054                         off = &in->f_pos;
1055
1056                 ret = do_splice_to(in, off, pipe, len, flags);
1057
1058                 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1059                         ret = -EFAULT;
1060
1061                 return ret;
1062         }
1063
1064         return -EINVAL;
1065 }
1066
1067 /*
1068  * Map an iov into an array of pages and offset/length tupples. With the
1069  * partial_page structure, we can map several non-contiguous ranges into
1070  * our ones pages[] map instead of splitting that operation into pieces.
1071  * Could easily be exported as a generic helper for other users, in which
1072  * case one would probably want to add a 'max_nr_pages' parameter as well.
1073  */
1074 static int get_iovec_page_array(const struct iovec __user *iov,
1075                                 unsigned int nr_vecs, struct page **pages,
1076                                 struct partial_page *partial)
1077 {
1078         int buffers = 0, error = 0;
1079
1080         /*
1081          * It's ok to take the mmap_sem for reading, even
1082          * across a "get_user()".
1083          */
1084         down_read(&current->mm->mmap_sem);
1085
1086         while (nr_vecs) {
1087                 unsigned long off, npages;
1088                 void __user *base;
1089                 size_t len;
1090                 int i;
1091
1092                 /*
1093                  * Get user address base and length for this iovec.
1094                  */
1095                 error = get_user(base, &iov->iov_base);
1096                 if (unlikely(error))
1097                         break;
1098                 error = get_user(len, &iov->iov_len);
1099                 if (unlikely(error))
1100                         break;
1101
1102                 /*
1103                  * Sanity check this iovec. 0 read succeeds.
1104                  */
1105                 if (unlikely(!len))
1106                         break;
1107                 error = -EFAULT;
1108                 if (unlikely(!base))
1109                         break;
1110
1111                 /*
1112                  * Get this base offset and number of pages, then map
1113                  * in the user pages.
1114                  */
1115                 off = (unsigned long) base & ~PAGE_MASK;
1116                 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1117                 if (npages > PIPE_BUFFERS - buffers)
1118                         npages = PIPE_BUFFERS - buffers;
1119
1120                 error = get_user_pages(current, current->mm,
1121                                        (unsigned long) base, npages, 0, 0,
1122                                        &pages[buffers], NULL);
1123
1124                 if (unlikely(error <= 0))
1125                         break;
1126
1127                 /*
1128                  * Fill this contiguous range into the partial page map.
1129                  */
1130                 for (i = 0; i < error; i++) {
1131                         const int plen = min_t(size_t, len, PAGE_SIZE) - off;
1132
1133                         partial[buffers].offset = off;
1134                         partial[buffers].len = plen;
1135
1136                         off = 0;
1137                         len -= plen;
1138                         buffers++;
1139                 }
1140
1141                 /*
1142                  * We didn't complete this iov, stop here since it probably
1143                  * means we have to move some of this into a pipe to
1144                  * be able to continue.
1145                  */
1146                 if (len)
1147                         break;
1148
1149                 /*
1150                  * Don't continue if we mapped fewer pages than we asked for,
1151                  * or if we mapped the max number of pages that we have
1152                  * room for.
1153                  */
1154                 if (error < npages || buffers == PIPE_BUFFERS)
1155                         break;
1156
1157                 nr_vecs--;
1158                 iov++;
1159         }
1160
1161         up_read(&current->mm->mmap_sem);
1162
1163         if (buffers)
1164                 return buffers;
1165
1166         return error;
1167 }
1168
1169 /*
1170  * vmsplice splices a user address range into a pipe. It can be thought of
1171  * as splice-from-memory, where the regular splice is splice-from-file (or
1172  * to file). In both cases the output is a pipe, naturally.
1173  *
1174  * Note that vmsplice only supports splicing _from_ user memory to a pipe,
1175  * not the other way around. Splicing from user memory is a simple operation
1176  * that can be supported without any funky alignment restrictions or nasty
1177  * vm tricks. We simply map in the user memory and fill them into a pipe.
1178  * The reverse isn't quite as easy, though. There are two possible solutions
1179  * for that:
1180  *
1181  *      - memcpy() the data internally, at which point we might as well just
1182  *        do a regular read() on the buffer anyway.
1183  *      - Lots of nasty vm tricks, that are neither fast nor flexible (it
1184  *        has restriction limitations on both ends of the pipe).
1185  *
1186  * Alas, it isn't here.
1187  *
1188  */
1189 static long do_vmsplice(struct file *file, const struct iovec __user *iov,
1190                         unsigned long nr_segs, unsigned int flags)
1191 {
1192         struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe;
1193         struct page *pages[PIPE_BUFFERS];
1194         struct partial_page partial[PIPE_BUFFERS];
1195         struct splice_pipe_desc spd = {
1196                 .pages = pages,
1197                 .partial = partial,
1198                 .flags = flags,
1199                 .ops = &user_page_pipe_buf_ops,
1200         };
1201
1202         if (unlikely(!pipe))
1203                 return -EBADF;
1204         if (unlikely(nr_segs > UIO_MAXIOV))
1205                 return -EINVAL;
1206         else if (unlikely(!nr_segs))
1207                 return 0;
1208
1209         spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial);
1210         if (spd.nr_pages <= 0)
1211                 return spd.nr_pages;
1212
1213         return move_to_pipe(pipe, &spd);
1214 }
1215
1216 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1217                              unsigned long nr_segs, unsigned int flags)
1218 {
1219         struct file *file;
1220         long error;
1221         int fput;
1222
1223         error = -EBADF;
1224         file = fget_light(fd, &fput);
1225         if (file) {
1226                 if (file->f_mode & FMODE_WRITE)
1227                         error = do_vmsplice(file, iov, nr_segs, flags);
1228
1229                 fput_light(file, fput);
1230         }
1231
1232         return error;
1233 }
1234
1235 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1236                            int fd_out, loff_t __user *off_out,
1237                            size_t len, unsigned int flags)
1238 {
1239         long error;
1240         struct file *in, *out;
1241         int fput_in, fput_out;
1242
1243         if (unlikely(!len))
1244                 return 0;
1245
1246         error = -EBADF;
1247         in = fget_light(fd_in, &fput_in);
1248         if (in) {
1249                 if (in->f_mode & FMODE_READ) {
1250                         out = fget_light(fd_out, &fput_out);
1251                         if (out) {
1252                                 if (out->f_mode & FMODE_WRITE)
1253                                         error = do_splice(in, off_in,
1254                                                           out, off_out,
1255                                                           len, flags);
1256                                 fput_light(out, fput_out);
1257                         }
1258                 }
1259
1260                 fput_light(in, fput_in);
1261         }
1262
1263         return error;
1264 }
1265
1266 /*
1267  * Link contents of ipipe to opipe.
1268  */
1269 static int link_pipe(struct pipe_inode_info *ipipe,
1270                      struct pipe_inode_info *opipe,
1271                      size_t len, unsigned int flags)
1272 {
1273         struct pipe_buffer *ibuf, *obuf;
1274         int ret, do_wakeup, i, ipipe_first;
1275
1276         ret = do_wakeup = ipipe_first = 0;
1277
1278         /*
1279          * Potential ABBA deadlock, work around it by ordering lock
1280          * grabbing by inode address. Otherwise two different processes
1281          * could deadlock (one doing tee from A -> B, the other from B -> A).
1282          */
1283         if (ipipe->inode < opipe->inode) {
1284                 ipipe_first = 1;
1285                 mutex_lock(&ipipe->inode->i_mutex);
1286                 mutex_lock(&opipe->inode->i_mutex);
1287         } else {
1288                 mutex_lock(&opipe->inode->i_mutex);
1289                 mutex_lock(&ipipe->inode->i_mutex);
1290         }
1291
1292         for (i = 0;; i++) {
1293                 if (!opipe->readers) {
1294                         send_sig(SIGPIPE, current, 0);
1295                         if (!ret)
1296                                 ret = -EPIPE;
1297                         break;
1298                 }
1299                 if (ipipe->nrbufs - i) {
1300                         ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1301
1302                         /*
1303                          * If we have room, fill this buffer
1304                          */
1305                         if (opipe->nrbufs < PIPE_BUFFERS) {
1306                                 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1307
1308                                 /*
1309                                  * Get a reference to this pipe buffer,
1310                                  * so we can copy the contents over.
1311                                  */
1312                                 ibuf->ops->get(ipipe, ibuf);
1313
1314                                 obuf = opipe->bufs + nbuf;
1315                                 *obuf = *ibuf;
1316
1317                                 if (obuf->len > len)
1318                                         obuf->len = len;
1319
1320                                 opipe->nrbufs++;
1321                                 do_wakeup = 1;
1322                                 ret += obuf->len;
1323                                 len -= obuf->len;
1324
1325                                 if (!len)
1326                                         break;
1327                                 if (opipe->nrbufs < PIPE_BUFFERS)
1328                                         continue;
1329                         }
1330
1331                         /*
1332                          * We have input available, but no output room.
1333                          * If we already copied data, return that. If we
1334                          * need to drop the opipe lock, it must be ordered
1335                          * last to avoid deadlocks.
1336                          */
1337                         if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1338                                 if (!ret)
1339                                         ret = -EAGAIN;
1340                                 break;
1341                         }
1342                         if (signal_pending(current)) {
1343                                 if (!ret)
1344                                         ret = -ERESTARTSYS;
1345                                 break;
1346                         }
1347                         if (do_wakeup) {
1348                                 smp_mb();
1349                                 if (waitqueue_active(&opipe->wait))
1350                                         wake_up_interruptible(&opipe->wait);
1351                                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1352                                 do_wakeup = 0;
1353                         }
1354
1355                         opipe->waiting_writers++;
1356                         pipe_wait(opipe);
1357                         opipe->waiting_writers--;
1358                         continue;
1359                 }
1360
1361                 /*
1362                  * No input buffers, do the usual checks for available
1363                  * writers and blocking and wait if necessary
1364                  */
1365                 if (!ipipe->writers)
1366                         break;
1367                 if (!ipipe->waiting_writers) {
1368                         if (ret)
1369                                 break;
1370                 }
1371                 /*
1372                  * pipe_wait() drops the ipipe mutex. To avoid deadlocks
1373                  * with another process, we can only safely do that if
1374                  * the ipipe lock is ordered last.
1375                  */
1376                 if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1377                         if (!ret)
1378                                 ret = -EAGAIN;
1379                         break;
1380                 }
1381                 if (signal_pending(current)) {
1382                         if (!ret)
1383                                 ret = -ERESTARTSYS;
1384                         break;
1385                 }
1386
1387                 if (waitqueue_active(&ipipe->wait))
1388                         wake_up_interruptible_sync(&ipipe->wait);
1389                 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1390
1391                 pipe_wait(ipipe);
1392         }
1393
1394         mutex_unlock(&ipipe->inode->i_mutex);
1395         mutex_unlock(&opipe->inode->i_mutex);
1396
1397         if (do_wakeup) {
1398                 smp_mb();
1399                 if (waitqueue_active(&opipe->wait))
1400                         wake_up_interruptible(&opipe->wait);
1401                 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1402         }
1403
1404         return ret;
1405 }
1406
1407 /*
1408  * This is a tee(1) implementation that works on pipes. It doesn't copy
1409  * any data, it simply references the 'in' pages on the 'out' pipe.
1410  * The 'flags' used are the SPLICE_F_* variants, currently the only
1411  * applicable one is SPLICE_F_NONBLOCK.
1412  */
1413 static long do_tee(struct file *in, struct file *out, size_t len,
1414                    unsigned int flags)
1415 {
1416         struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1417         struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1418
1419         /*
1420          * Link ipipe to the two output pipes, consuming as we go along.
1421          */
1422         if (ipipe && opipe)
1423                 return link_pipe(ipipe, opipe, len, flags);
1424
1425         return -EINVAL;
1426 }
1427
1428 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1429 {
1430         struct file *in;
1431         int error, fput_in;
1432
1433         if (unlikely(!len))
1434                 return 0;
1435
1436         error = -EBADF;
1437         in = fget_light(fdin, &fput_in);
1438         if (in) {
1439                 if (in->f_mode & FMODE_READ) {
1440                         int fput_out;
1441                         struct file *out = fget_light(fdout, &fput_out);
1442
1443                         if (out) {
1444                                 if (out->f_mode & FMODE_WRITE)
1445                                         error = do_tee(in, out, len, flags);
1446                                 fput_light(out, fput_out);
1447                         }
1448                 }
1449                 fput_light(in, fput_in);
1450         }
1451
1452         return error;
1453 }