2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
38 * Passed to splice_to_pipe
40 struct splice_pipe_desc {
41 struct page **pages; /* page map */
42 struct partial_page *partial; /* pages[] may not be contig */
43 int nr_pages; /* number of pages in map */
44 unsigned int flags; /* splice flags */
45 const struct pipe_buf_operations *ops;/* ops associated with output pipe */
49 * Attempt to steal a page from a pipe buffer. This should perhaps go into
50 * a vm helper function, it's already simplified quite a bit by the
51 * addition of remove_mapping(). If success is returned, the caller may
52 * attempt to reuse this page for another destination.
54 static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
55 struct pipe_buffer *buf)
57 struct page *page = buf->page;
58 struct address_space *mapping;
62 mapping = page_mapping(page);
64 WARN_ON(!PageUptodate(page));
67 * At least for ext2 with nobh option, we need to wait on
68 * writeback completing on this page, since we'll remove it
69 * from the pagecache. Otherwise truncate wont wait on the
70 * page, allowing the disk blocks to be reused by someone else
71 * before we actually wrote our data to them. fs corruption
74 wait_on_page_writeback(page);
76 if (PagePrivate(page))
77 try_to_release_page(page, GFP_KERNEL);
80 * If we succeeded in removing the mapping, set LRU flag
83 if (remove_mapping(mapping, page)) {
84 buf->flags |= PIPE_BUF_FLAG_LRU;
90 * Raced with truncate or failed to remove page from current
91 * address space, unlock and return failure.
97 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
98 struct pipe_buffer *buf)
100 page_cache_release(buf->page);
101 buf->flags &= ~PIPE_BUF_FLAG_LRU;
104 static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe,
105 struct pipe_buffer *buf)
107 struct page *page = buf->page;
110 if (!PageUptodate(page)) {
114 * Page got truncated/unhashed. This will cause a 0-byte
115 * splice, if this is the first page.
117 if (!page->mapping) {
123 * Uh oh, read-error from disk.
125 if (!PageUptodate(page)) {
131 * Page is ok afterall, we are done.
142 static const struct pipe_buf_operations page_cache_pipe_buf_ops = {
144 .map = generic_pipe_buf_map,
145 .unmap = generic_pipe_buf_unmap,
146 .pin = page_cache_pipe_buf_pin,
147 .release = page_cache_pipe_buf_release,
148 .steal = page_cache_pipe_buf_steal,
149 .get = generic_pipe_buf_get,
152 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
153 struct pipe_buffer *buf)
155 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
158 buf->flags |= PIPE_BUF_FLAG_LRU;
159 return generic_pipe_buf_steal(pipe, buf);
162 static const struct pipe_buf_operations user_page_pipe_buf_ops = {
164 .map = generic_pipe_buf_map,
165 .unmap = generic_pipe_buf_unmap,
166 .pin = generic_pipe_buf_pin,
167 .release = page_cache_pipe_buf_release,
168 .steal = user_page_pipe_buf_steal,
169 .get = generic_pipe_buf_get,
173 * Pipe output worker. This sets up our pipe format with the page cache
174 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
176 static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
177 struct splice_pipe_desc *spd)
179 int ret, do_wakeup, page_nr;
186 mutex_lock(&pipe->inode->i_mutex);
189 if (!pipe->readers) {
190 send_sig(SIGPIPE, current, 0);
196 if (pipe->nrbufs < PIPE_BUFFERS) {
197 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
198 struct pipe_buffer *buf = pipe->bufs + newbuf;
200 buf->page = spd->pages[page_nr];
201 buf->offset = spd->partial[page_nr].offset;
202 buf->len = spd->partial[page_nr].len;
204 if (spd->flags & SPLICE_F_GIFT)
205 buf->flags |= PIPE_BUF_FLAG_GIFT;
214 if (!--spd->nr_pages)
216 if (pipe->nrbufs < PIPE_BUFFERS)
222 if (spd->flags & SPLICE_F_NONBLOCK) {
228 if (signal_pending(current)) {
236 if (waitqueue_active(&pipe->wait))
237 wake_up_interruptible_sync(&pipe->wait);
238 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
242 pipe->waiting_writers++;
244 pipe->waiting_writers--;
248 mutex_unlock(&pipe->inode->i_mutex);
252 if (waitqueue_active(&pipe->wait))
253 wake_up_interruptible(&pipe->wait);
254 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
257 while (page_nr < spd->nr_pages)
258 page_cache_release(spd->pages[page_nr++]);
264 __generic_file_splice_read(struct file *in, loff_t *ppos,
265 struct pipe_inode_info *pipe, size_t len,
268 struct address_space *mapping = in->f_mapping;
269 unsigned int loff, nr_pages;
270 struct page *pages[PIPE_BUFFERS];
271 struct partial_page partial[PIPE_BUFFERS];
273 pgoff_t index, end_index;
277 struct splice_pipe_desc spd = {
281 .ops = &page_cache_pipe_buf_ops,
284 index = *ppos >> PAGE_CACHE_SHIFT;
285 loff = *ppos & ~PAGE_CACHE_MASK;
286 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
288 if (nr_pages > PIPE_BUFFERS)
289 nr_pages = PIPE_BUFFERS;
292 * Don't try to 2nd guess the read-ahead logic, call into
293 * page_cache_readahead() like the page cache reads would do.
295 page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
298 * Now fill in the holes:
304 * Lookup the (hopefully) full range of pages we need.
306 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
309 * If find_get_pages_contig() returned fewer pages than we needed,
312 index += spd.nr_pages;
313 while (spd.nr_pages < nr_pages) {
315 * Page could be there, find_get_pages_contig() breaks on
318 page = find_get_page(mapping, index);
321 * Make sure the read-ahead engine is notified
322 * about this failure.
324 handle_ra_miss(mapping, &in->f_ra, index);
327 * page didn't exist, allocate one.
329 page = page_cache_alloc_cold(mapping);
333 error = add_to_page_cache_lru(page, mapping, index,
335 if (unlikely(error)) {
336 page_cache_release(page);
337 if (error == -EEXIST)
342 * add_to_page_cache() locks the page, unlock it
343 * to avoid convoluting the logic below even more.
348 pages[spd.nr_pages++] = page;
353 * Now loop over the map and see if we need to start IO on any
354 * pages, fill in the partial map, etc.
356 index = *ppos >> PAGE_CACHE_SHIFT;
357 nr_pages = spd.nr_pages;
359 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
360 unsigned int this_len;
366 * this_len is the max we'll use from this page
368 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
369 page = pages[page_nr];
372 * If the page isn't uptodate, we may need to start io on it
374 if (!PageUptodate(page)) {
376 * If in nonblock mode then dont block on waiting
377 * for an in-flight io page
379 if (flags & SPLICE_F_NONBLOCK) {
380 if (TestSetPageLocked(page))
386 * page was truncated, stop here. if this isn't the
387 * first page, we'll just complete what we already
390 if (!page->mapping) {
395 * page was already under io and is now done, great
397 if (PageUptodate(page)) {
403 * need to read in the page
405 error = mapping->a_ops->readpage(in, page);
406 if (unlikely(error)) {
408 * We really should re-lookup the page here,
409 * but it complicates things a lot. Instead
410 * lets just do what we already stored, and
411 * we'll get it the next time we are called.
413 if (error == AOP_TRUNCATED_PAGE)
420 * i_size must be checked after ->readpage().
422 isize = i_size_read(mapping->host);
423 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
424 if (unlikely(!isize || index > end_index))
428 * if this is the last page, see if we need to shrink
429 * the length and stop
431 if (end_index == index) {
432 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
433 if (total_len + loff > isize)
436 * force quit after adding this page
439 this_len = min(this_len, loff);
444 partial[page_nr].offset = loff;
445 partial[page_nr].len = this_len;
447 total_len += this_len;
454 * Release any pages at the end, if we quit early. 'i' is how far
455 * we got, 'nr_pages' is how many pages are in the map.
457 while (page_nr < nr_pages)
458 page_cache_release(pages[page_nr++]);
461 return splice_to_pipe(pipe, &spd);
467 * generic_file_splice_read - splice data from file to a pipe
468 * @in: file to splice from
469 * @pipe: pipe to splice to
470 * @len: number of bytes to splice
471 * @flags: splice modifier flags
473 * Will read pages from given file and fill them into a pipe.
475 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
476 struct pipe_inode_info *pipe, size_t len,
483 isize = i_size_read(in->f_mapping->host);
484 if (unlikely(*ppos >= isize))
487 left = isize - *ppos;
488 if (unlikely(left < len))
494 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
501 if (flags & SPLICE_F_NONBLOCK) {
518 EXPORT_SYMBOL(generic_file_splice_read);
521 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
522 * using sendpage(). Return the number of bytes sent.
524 static int pipe_to_sendpage(struct pipe_inode_info *pipe,
525 struct pipe_buffer *buf, struct splice_desc *sd)
527 struct file *file = sd->file;
528 loff_t pos = sd->pos;
531 ret = buf->ops->pin(pipe, buf);
533 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
535 ret = file->f_op->sendpage(file, buf->page, buf->offset,
536 sd->len, &pos, more);
543 * This is a little more tricky than the file -> pipe splicing. There are
544 * basically three cases:
546 * - Destination page already exists in the address space and there
547 * are users of it. For that case we have no other option that
548 * copying the data. Tough luck.
549 * - Destination page already exists in the address space, but there
550 * are no users of it. Make sure it's uptodate, then drop it. Fall
551 * through to last case.
552 * - Destination page does not exist, we can add the pipe page to
553 * the page cache and avoid the copy.
555 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
556 * sd->flags), we attempt to migrate pages from the pipe to the output
557 * file address space page cache. This is possible if no one else has
558 * the pipe page referenced outside of the pipe and page cache. If
559 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
560 * a new page in the output file page cache and fill/dirty that.
562 static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
563 struct splice_desc *sd)
565 struct file *file = sd->file;
566 struct address_space *mapping = file->f_mapping;
567 unsigned int offset, this_len;
573 * make sure the data in this buffer is uptodate
575 ret = buf->ops->pin(pipe, buf);
579 index = sd->pos >> PAGE_CACHE_SHIFT;
580 offset = sd->pos & ~PAGE_CACHE_MASK;
583 if (this_len + offset > PAGE_CACHE_SIZE)
584 this_len = PAGE_CACHE_SIZE - offset;
587 page = find_lock_page(mapping, index);
590 page = page_cache_alloc_cold(mapping);
595 * This will also lock the page
597 ret = add_to_page_cache_lru(page, mapping, index,
603 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
605 loff_t isize = i_size_read(mapping->host);
607 if (ret != AOP_TRUNCATED_PAGE)
609 page_cache_release(page);
610 if (ret == AOP_TRUNCATED_PAGE)
614 * prepare_write() may have instantiated a few blocks
615 * outside i_size. Trim these off again.
617 if (sd->pos + this_len > isize)
618 vmtruncate(mapping->host, isize);
623 if (buf->page != page) {
625 * Careful, ->map() uses KM_USER0!
627 char *src = buf->ops->map(pipe, buf, 1);
628 char *dst = kmap_atomic(page, KM_USER1);
630 memcpy(dst + offset, src + buf->offset, this_len);
631 flush_dcache_page(page);
632 kunmap_atomic(dst, KM_USER1);
633 buf->ops->unmap(pipe, buf, src);
636 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
638 if (ret == AOP_TRUNCATED_PAGE) {
639 page_cache_release(page);
645 * Partial write has happened, so 'ret' already initialized by
646 * number of bytes written, Where is nothing we have to do here.
651 * Return the number of bytes written and mark page as
652 * accessed, we are now done!
654 mark_page_accessed(page);
655 balance_dirty_pages_ratelimited(mapping);
657 page_cache_release(page);
664 * Pipe input worker. Most of this logic works like a regular pipe, the
665 * key here is the 'actor' worker passed in that actually moves the data
666 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
668 ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
669 struct file *out, loff_t *ppos, size_t len,
670 unsigned int flags, splice_actor *actor)
672 int ret, do_wakeup, err;
673 struct splice_desc sd;
685 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
686 const struct pipe_buf_operations *ops = buf->ops;
689 if (sd.len > sd.total_len)
690 sd.len = sd.total_len;
692 err = actor(pipe, buf, &sd);
694 if (!ret && err != -ENODATA)
712 ops->release(pipe, buf);
713 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
727 if (!pipe->waiting_writers) {
732 if (flags & SPLICE_F_NONBLOCK) {
738 if (signal_pending(current)) {
746 if (waitqueue_active(&pipe->wait))
747 wake_up_interruptible_sync(&pipe->wait);
748 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
757 if (waitqueue_active(&pipe->wait))
758 wake_up_interruptible(&pipe->wait);
759 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
764 EXPORT_SYMBOL(__splice_from_pipe);
766 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
767 loff_t *ppos, size_t len, unsigned int flags,
771 struct inode *inode = out->f_mapping->host;
774 * The actor worker might be calling ->prepare_write and
775 * ->commit_write. Most of the time, these expect i_mutex to
776 * be held. Since this may result in an ABBA deadlock with
777 * pipe->inode, we have to order lock acquiry here.
779 inode_double_lock(inode, pipe->inode);
780 ret = __splice_from_pipe(pipe, out, ppos, len, flags, actor);
781 inode_double_unlock(inode, pipe->inode);
787 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
789 * @out: file to write to
790 * @len: number of bytes to splice
791 * @flags: splice modifier flags
793 * Will either move or copy pages (determined by @flags options) from
794 * the given pipe inode to the given file. The caller is responsible
795 * for acquiring i_mutex on both inodes.
799 generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
800 loff_t *ppos, size_t len, unsigned int flags)
802 struct address_space *mapping = out->f_mapping;
803 struct inode *inode = mapping->host;
807 err = remove_suid(out->f_path.dentry);
811 ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
816 * If file or inode is SYNC and we actually wrote some data,
819 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
820 err = generic_osync_inode(inode, mapping,
821 OSYNC_METADATA|OSYNC_DATA);
831 EXPORT_SYMBOL(generic_file_splice_write_nolock);
834 * generic_file_splice_write - splice data from a pipe to a file
836 * @out: file to write to
837 * @len: number of bytes to splice
838 * @flags: splice modifier flags
840 * Will either move or copy pages (determined by @flags options) from
841 * the given pipe inode to the given file.
845 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
846 loff_t *ppos, size_t len, unsigned int flags)
848 struct address_space *mapping = out->f_mapping;
849 struct inode *inode = mapping->host;
853 err = should_remove_suid(out->f_path.dentry);
855 mutex_lock(&inode->i_mutex);
856 err = __remove_suid(out->f_path.dentry, err);
857 mutex_unlock(&inode->i_mutex);
862 ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
867 * If file or inode is SYNC and we actually wrote some data,
870 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
871 mutex_lock(&inode->i_mutex);
872 err = generic_osync_inode(inode, mapping,
873 OSYNC_METADATA|OSYNC_DATA);
874 mutex_unlock(&inode->i_mutex);
884 EXPORT_SYMBOL(generic_file_splice_write);
887 * generic_splice_sendpage - splice data from a pipe to a socket
889 * @out: socket to write to
890 * @len: number of bytes to splice
891 * @flags: splice modifier flags
893 * Will send @len bytes from the pipe to a network socket. No data copying
897 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
898 loff_t *ppos, size_t len, unsigned int flags)
900 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
903 EXPORT_SYMBOL(generic_splice_sendpage);
906 * Attempt to initiate a splice from pipe to file.
908 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
909 loff_t *ppos, size_t len, unsigned int flags)
913 if (unlikely(!out->f_op || !out->f_op->splice_write))
916 if (unlikely(!(out->f_mode & FMODE_WRITE)))
919 ret = rw_verify_area(WRITE, out, ppos, len);
920 if (unlikely(ret < 0))
923 return out->f_op->splice_write(pipe, out, ppos, len, flags);
927 * Attempt to initiate a splice from a file to a pipe.
929 static long do_splice_to(struct file *in, loff_t *ppos,
930 struct pipe_inode_info *pipe, size_t len,
935 if (unlikely(!in->f_op || !in->f_op->splice_read))
938 if (unlikely(!(in->f_mode & FMODE_READ)))
941 ret = rw_verify_area(READ, in, ppos, len);
942 if (unlikely(ret < 0))
945 return in->f_op->splice_read(in, ppos, pipe, len, flags);
948 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
949 size_t len, unsigned int flags)
951 struct pipe_inode_info *pipe;
958 * We require the input being a regular file, as we don't want to
959 * randomly drop data for eg socket -> socket splicing. Use the
960 * piped splicing for that!
962 i_mode = in->f_path.dentry->d_inode->i_mode;
963 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
967 * neither in nor out is a pipe, setup an internal pipe attached to
968 * 'out' and transfer the wanted data from 'in' to 'out' through that
970 pipe = current->splice_pipe;
971 if (unlikely(!pipe)) {
972 pipe = alloc_pipe_info(NULL);
977 * We don't have an immediate reader, but we'll read the stuff
978 * out of the pipe right after the splice_to_pipe(). So set
979 * PIPE_READERS appropriately.
983 current->splice_pipe = pipe;
994 size_t read_len, max_read_len;
997 * Do at most PIPE_BUFFERS pages worth of transfer:
999 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
1001 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
1002 if (unlikely(ret < 0))
1008 * NOTE: nonblocking mode only applies to the input. We
1009 * must not do the output in nonblocking mode as then we
1010 * could get stuck data in the internal pipe:
1012 ret = do_splice_from(pipe, out, &out_off, read_len,
1013 flags & ~SPLICE_F_NONBLOCK);
1014 if (unlikely(ret < 0))
1021 * In nonblocking mode, if we got back a short read then
1022 * that was due to either an IO error or due to the
1023 * pagecache entry not being there. In the IO error case
1024 * the _next_ splice attempt will produce a clean IO error
1025 * return value (not a short read), so in both cases it's
1026 * correct to break out of the loop here:
1028 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
1032 pipe->nrbufs = pipe->curbuf = 0;
1038 * If we did an incomplete transfer we must release
1039 * the pipe buffers in question:
1041 for (i = 0; i < PIPE_BUFFERS; i++) {
1042 struct pipe_buffer *buf = pipe->bufs + i;
1045 buf->ops->release(pipe, buf);
1049 pipe->nrbufs = pipe->curbuf = 0;
1052 * If we transferred some data, return the number of bytes:
1060 EXPORT_SYMBOL(do_splice_direct);
1063 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1064 * location, so checking ->i_pipe is not enough to verify that this is a
1067 static inline struct pipe_inode_info *pipe_info(struct inode *inode)
1069 if (S_ISFIFO(inode->i_mode))
1070 return inode->i_pipe;
1076 * Determine where to splice to/from.
1078 static long do_splice(struct file *in, loff_t __user *off_in,
1079 struct file *out, loff_t __user *off_out,
1080 size_t len, unsigned int flags)
1082 struct pipe_inode_info *pipe;
1083 loff_t offset, *off;
1086 pipe = pipe_info(in->f_path.dentry->d_inode);
1091 if (out->f_op->llseek == no_llseek)
1093 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1099 ret = do_splice_from(pipe, out, off, len, flags);
1101 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1107 pipe = pipe_info(out->f_path.dentry->d_inode);
1112 if (in->f_op->llseek == no_llseek)
1114 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1120 ret = do_splice_to(in, off, pipe, len, flags);
1122 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1132 * Map an iov into an array of pages and offset/length tupples. With the
1133 * partial_page structure, we can map several non-contiguous ranges into
1134 * our ones pages[] map instead of splitting that operation into pieces.
1135 * Could easily be exported as a generic helper for other users, in which
1136 * case one would probably want to add a 'max_nr_pages' parameter as well.
1138 static int get_iovec_page_array(const struct iovec __user *iov,
1139 unsigned int nr_vecs, struct page **pages,
1140 struct partial_page *partial, int aligned)
1142 int buffers = 0, error = 0;
1145 * It's ok to take the mmap_sem for reading, even
1146 * across a "get_user()".
1148 down_read(¤t->mm->mmap_sem);
1151 unsigned long off, npages;
1157 * Get user address base and length for this iovec.
1159 error = get_user(base, &iov->iov_base);
1160 if (unlikely(error))
1162 error = get_user(len, &iov->iov_len);
1163 if (unlikely(error))
1167 * Sanity check this iovec. 0 read succeeds.
1172 if (unlikely(!base))
1176 * Get this base offset and number of pages, then map
1177 * in the user pages.
1179 off = (unsigned long) base & ~PAGE_MASK;
1182 * If asked for alignment, the offset must be zero and the
1183 * length a multiple of the PAGE_SIZE.
1186 if (aligned && (off || len & ~PAGE_MASK))
1189 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1190 if (npages > PIPE_BUFFERS - buffers)
1191 npages = PIPE_BUFFERS - buffers;
1193 error = get_user_pages(current, current->mm,
1194 (unsigned long) base, npages, 0, 0,
1195 &pages[buffers], NULL);
1197 if (unlikely(error <= 0))
1201 * Fill this contiguous range into the partial page map.
1203 for (i = 0; i < error; i++) {
1204 const int plen = min_t(size_t, len, PAGE_SIZE - off);
1206 partial[buffers].offset = off;
1207 partial[buffers].len = plen;
1215 * We didn't complete this iov, stop here since it probably
1216 * means we have to move some of this into a pipe to
1217 * be able to continue.
1223 * Don't continue if we mapped fewer pages than we asked for,
1224 * or if we mapped the max number of pages that we have
1227 if (error < npages || buffers == PIPE_BUFFERS)
1234 up_read(¤t->mm->mmap_sem);
1243 * vmsplice splices a user address range into a pipe. It can be thought of
1244 * as splice-from-memory, where the regular splice is splice-from-file (or
1245 * to file). In both cases the output is a pipe, naturally.
1247 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
1248 * not the other way around. Splicing from user memory is a simple operation
1249 * that can be supported without any funky alignment restrictions or nasty
1250 * vm tricks. We simply map in the user memory and fill them into a pipe.
1251 * The reverse isn't quite as easy, though. There are two possible solutions
1254 * - memcpy() the data internally, at which point we might as well just
1255 * do a regular read() on the buffer anyway.
1256 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1257 * has restriction limitations on both ends of the pipe).
1259 * Alas, it isn't here.
1262 static long do_vmsplice(struct file *file, const struct iovec __user *iov,
1263 unsigned long nr_segs, unsigned int flags)
1265 struct pipe_inode_info *pipe;
1266 struct page *pages[PIPE_BUFFERS];
1267 struct partial_page partial[PIPE_BUFFERS];
1268 struct splice_pipe_desc spd = {
1272 .ops = &user_page_pipe_buf_ops,
1275 pipe = pipe_info(file->f_path.dentry->d_inode);
1278 if (unlikely(nr_segs > UIO_MAXIOV))
1280 else if (unlikely(!nr_segs))
1283 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
1284 flags & SPLICE_F_GIFT);
1285 if (spd.nr_pages <= 0)
1286 return spd.nr_pages;
1288 return splice_to_pipe(pipe, &spd);
1291 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1292 unsigned long nr_segs, unsigned int flags)
1299 file = fget_light(fd, &fput);
1301 if (file->f_mode & FMODE_WRITE)
1302 error = do_vmsplice(file, iov, nr_segs, flags);
1304 fput_light(file, fput);
1310 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1311 int fd_out, loff_t __user *off_out,
1312 size_t len, unsigned int flags)
1315 struct file *in, *out;
1316 int fput_in, fput_out;
1322 in = fget_light(fd_in, &fput_in);
1324 if (in->f_mode & FMODE_READ) {
1325 out = fget_light(fd_out, &fput_out);
1327 if (out->f_mode & FMODE_WRITE)
1328 error = do_splice(in, off_in,
1331 fput_light(out, fput_out);
1335 fput_light(in, fput_in);
1342 * Make sure there's data to read. Wait for input if we can, otherwise
1343 * return an appropriate error.
1345 static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1350 * Check ->nrbufs without the inode lock first. This function
1351 * is speculative anyways, so missing one is ok.
1357 mutex_lock(&pipe->inode->i_mutex);
1359 while (!pipe->nrbufs) {
1360 if (signal_pending(current)) {
1366 if (!pipe->waiting_writers) {
1367 if (flags & SPLICE_F_NONBLOCK) {
1375 mutex_unlock(&pipe->inode->i_mutex);
1380 * Make sure there's writeable room. Wait for room if we can, otherwise
1381 * return an appropriate error.
1383 static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1388 * Check ->nrbufs without the inode lock first. This function
1389 * is speculative anyways, so missing one is ok.
1391 if (pipe->nrbufs < PIPE_BUFFERS)
1395 mutex_lock(&pipe->inode->i_mutex);
1397 while (pipe->nrbufs >= PIPE_BUFFERS) {
1398 if (!pipe->readers) {
1399 send_sig(SIGPIPE, current, 0);
1403 if (flags & SPLICE_F_NONBLOCK) {
1407 if (signal_pending(current)) {
1411 pipe->waiting_writers++;
1413 pipe->waiting_writers--;
1416 mutex_unlock(&pipe->inode->i_mutex);
1421 * Link contents of ipipe to opipe.
1423 static int link_pipe(struct pipe_inode_info *ipipe,
1424 struct pipe_inode_info *opipe,
1425 size_t len, unsigned int flags)
1427 struct pipe_buffer *ibuf, *obuf;
1428 int ret = 0, i = 0, nbuf;
1431 * Potential ABBA deadlock, work around it by ordering lock
1432 * grabbing by inode address. Otherwise two different processes
1433 * could deadlock (one doing tee from A -> B, the other from B -> A).
1435 inode_double_lock(ipipe->inode, opipe->inode);
1438 if (!opipe->readers) {
1439 send_sig(SIGPIPE, current, 0);
1446 * If we have iterated all input buffers or ran out of
1447 * output room, break.
1449 if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS)
1452 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1453 nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1456 * Get a reference to this pipe buffer,
1457 * so we can copy the contents over.
1459 ibuf->ops->get(ipipe, ibuf);
1461 obuf = opipe->bufs + nbuf;
1465 * Don't inherit the gift flag, we need to
1466 * prevent multiple steals of this page.
1468 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1470 if (obuf->len > len)
1479 inode_double_unlock(ipipe->inode, opipe->inode);
1482 * If we put data in the output pipe, wakeup any potential readers.
1486 if (waitqueue_active(&opipe->wait))
1487 wake_up_interruptible(&opipe->wait);
1488 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1495 * This is a tee(1) implementation that works on pipes. It doesn't copy
1496 * any data, it simply references the 'in' pages on the 'out' pipe.
1497 * The 'flags' used are the SPLICE_F_* variants, currently the only
1498 * applicable one is SPLICE_F_NONBLOCK.
1500 static long do_tee(struct file *in, struct file *out, size_t len,
1503 struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
1504 struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
1508 * Duplicate the contents of ipipe to opipe without actually
1511 if (ipipe && opipe && ipipe != opipe) {
1513 * Keep going, unless we encounter an error. The ipipe/opipe
1514 * ordering doesn't really matter.
1516 ret = link_ipipe_prep(ipipe, flags);
1518 ret = link_opipe_prep(opipe, flags);
1520 ret = link_pipe(ipipe, opipe, len, flags);
1521 if (!ret && (flags & SPLICE_F_NONBLOCK))
1530 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1539 in = fget_light(fdin, &fput_in);
1541 if (in->f_mode & FMODE_READ) {
1543 struct file *out = fget_light(fdout, &fput_out);
1546 if (out->f_mode & FMODE_WRITE)
1547 error = do_tee(in, out, len, flags);
1548 fput_light(out, fput_out);
1551 fput_light(in, fput_in);