2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/splice.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
31 #include <linux/security.h>
34 * Attempt to steal a page from a pipe buffer. This should perhaps go into
35 * a vm helper function, it's already simplified quite a bit by the
36 * addition of remove_mapping(). If success is returned, the caller may
37 * attempt to reuse this page for another destination.
39 static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
40 struct pipe_buffer *buf)
42 struct page *page = buf->page;
43 struct address_space *mapping;
47 mapping = page_mapping(page);
49 WARN_ON(!PageUptodate(page));
52 * At least for ext2 with nobh option, we need to wait on
53 * writeback completing on this page, since we'll remove it
54 * from the pagecache. Otherwise truncate wont wait on the
55 * page, allowing the disk blocks to be reused by someone else
56 * before we actually wrote our data to them. fs corruption
59 wait_on_page_writeback(page);
61 if (PagePrivate(page))
62 try_to_release_page(page, GFP_KERNEL);
65 * If we succeeded in removing the mapping, set LRU flag
68 if (remove_mapping(mapping, page)) {
69 buf->flags |= PIPE_BUF_FLAG_LRU;
75 * Raced with truncate or failed to remove page from current
76 * address space, unlock and return failure.
82 static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
83 struct pipe_buffer *buf)
85 page_cache_release(buf->page);
86 buf->flags &= ~PIPE_BUF_FLAG_LRU;
90 * Check whether the contents of buf is OK to access. Since the content
91 * is a page cache page, IO may be in flight.
93 static int page_cache_pipe_buf_confirm(struct pipe_inode_info *pipe,
94 struct pipe_buffer *buf)
96 struct page *page = buf->page;
99 if (!PageUptodate(page)) {
103 * Page got truncated/unhashed. This will cause a 0-byte
104 * splice, if this is the first page.
106 if (!page->mapping) {
112 * Uh oh, read-error from disk.
114 if (!PageUptodate(page)) {
120 * Page is ok afterall, we are done.
131 static const struct pipe_buf_operations page_cache_pipe_buf_ops = {
133 .map = generic_pipe_buf_map,
134 .unmap = generic_pipe_buf_unmap,
135 .confirm = page_cache_pipe_buf_confirm,
136 .release = page_cache_pipe_buf_release,
137 .steal = page_cache_pipe_buf_steal,
138 .get = generic_pipe_buf_get,
141 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
142 struct pipe_buffer *buf)
144 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
147 buf->flags |= PIPE_BUF_FLAG_LRU;
148 return generic_pipe_buf_steal(pipe, buf);
151 static const struct pipe_buf_operations user_page_pipe_buf_ops = {
153 .map = generic_pipe_buf_map,
154 .unmap = generic_pipe_buf_unmap,
155 .confirm = generic_pipe_buf_confirm,
156 .release = page_cache_pipe_buf_release,
157 .steal = user_page_pipe_buf_steal,
158 .get = generic_pipe_buf_get,
162 * splice_to_pipe - fill passed data into a pipe
163 * @pipe: pipe to fill
167 * @spd contains a map of pages and len/offset tupples, a long with
168 * the struct pipe_buf_operations associated with these pages. This
169 * function will link that data to the pipe.
172 ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
173 struct splice_pipe_desc *spd)
175 unsigned int spd_pages = spd->nr_pages;
176 int ret, do_wakeup, page_nr;
183 mutex_lock(&pipe->inode->i_mutex);
186 if (!pipe->readers) {
187 send_sig(SIGPIPE, current, 0);
193 if (pipe->nrbufs < PIPE_BUFFERS) {
194 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
195 struct pipe_buffer *buf = pipe->bufs + newbuf;
197 buf->page = spd->pages[page_nr];
198 buf->offset = spd->partial[page_nr].offset;
199 buf->len = spd->partial[page_nr].len;
200 buf->private = spd->partial[page_nr].private;
202 if (spd->flags & SPLICE_F_GIFT)
203 buf->flags |= PIPE_BUF_FLAG_GIFT;
212 if (!--spd->nr_pages)
214 if (pipe->nrbufs < PIPE_BUFFERS)
220 if (spd->flags & SPLICE_F_NONBLOCK) {
226 if (signal_pending(current)) {
234 if (waitqueue_active(&pipe->wait))
235 wake_up_interruptible_sync(&pipe->wait);
236 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
240 pipe->waiting_writers++;
242 pipe->waiting_writers--;
246 mutex_unlock(&pipe->inode->i_mutex);
250 if (waitqueue_active(&pipe->wait))
251 wake_up_interruptible(&pipe->wait);
252 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
256 while (page_nr < spd_pages)
257 page_cache_release(spd->pages[page_nr++]);
263 __generic_file_splice_read(struct file *in, loff_t *ppos,
264 struct pipe_inode_info *pipe, size_t len,
267 struct address_space *mapping = in->f_mapping;
268 unsigned int loff, nr_pages;
269 struct page *pages[PIPE_BUFFERS];
270 struct partial_page partial[PIPE_BUFFERS];
272 pgoff_t index, end_index;
275 struct splice_pipe_desc spd = {
279 .ops = &page_cache_pipe_buf_ops,
282 index = *ppos >> PAGE_CACHE_SHIFT;
283 loff = *ppos & ~PAGE_CACHE_MASK;
284 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
286 if (nr_pages > PIPE_BUFFERS)
287 nr_pages = PIPE_BUFFERS;
290 * Lookup the (hopefully) full range of pages we need.
292 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
293 index += spd.nr_pages;
296 * If find_get_pages_contig() returned fewer pages than we needed,
297 * readahead/allocate the rest and fill in the holes.
299 if (spd.nr_pages < nr_pages)
300 page_cache_readahead_ondemand(mapping, &in->f_ra, in,
301 NULL, index, nr_pages - spd.nr_pages);
304 while (spd.nr_pages < nr_pages) {
306 * Page could be there, find_get_pages_contig() breaks on
309 page = find_get_page(mapping, index);
312 * page didn't exist, allocate one.
314 page = page_cache_alloc_cold(mapping);
318 error = add_to_page_cache_lru(page, mapping, index,
320 if (unlikely(error)) {
321 page_cache_release(page);
322 if (error == -EEXIST)
327 * add_to_page_cache() locks the page, unlock it
328 * to avoid convoluting the logic below even more.
333 pages[spd.nr_pages++] = page;
338 * Now loop over the map and see if we need to start IO on any
339 * pages, fill in the partial map, etc.
341 index = *ppos >> PAGE_CACHE_SHIFT;
342 nr_pages = spd.nr_pages;
344 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
345 unsigned int this_len;
351 * this_len is the max we'll use from this page
353 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
354 page = pages[page_nr];
356 if (PageReadahead(page))
357 page_cache_readahead_ondemand(mapping, &in->f_ra, in,
358 page, index, nr_pages - page_nr);
361 * If the page isn't uptodate, we may need to start io on it
363 if (!PageUptodate(page)) {
365 * If in nonblock mode then dont block on waiting
366 * for an in-flight io page
368 if (flags & SPLICE_F_NONBLOCK) {
369 if (TestSetPageLocked(page))
375 * page was truncated, stop here. if this isn't the
376 * first page, we'll just complete what we already
379 if (!page->mapping) {
384 * page was already under io and is now done, great
386 if (PageUptodate(page)) {
392 * need to read in the page
394 error = mapping->a_ops->readpage(in, page);
395 if (unlikely(error)) {
397 * We really should re-lookup the page here,
398 * but it complicates things a lot. Instead
399 * lets just do what we already stored, and
400 * we'll get it the next time we are called.
402 if (error == AOP_TRUNCATED_PAGE)
410 * i_size must be checked after PageUptodate.
412 isize = i_size_read(mapping->host);
413 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
414 if (unlikely(!isize || index > end_index))
418 * if this is the last page, see if we need to shrink
419 * the length and stop
421 if (end_index == index) {
425 * max good bytes in this page
427 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
432 * force quit after adding this page
434 this_len = min(this_len, plen - loff);
438 partial[page_nr].offset = loff;
439 partial[page_nr].len = this_len;
447 * Release any pages at the end, if we quit early. 'page_nr' is how far
448 * we got, 'nr_pages' is how many pages are in the map.
450 while (page_nr < nr_pages)
451 page_cache_release(pages[page_nr++]);
452 in->f_ra.prev_index = index;
455 return splice_to_pipe(pipe, &spd);
461 * generic_file_splice_read - splice data from file to a pipe
462 * @in: file to splice from
463 * @ppos: position in @in
464 * @pipe: pipe to splice to
465 * @len: number of bytes to splice
466 * @flags: splice modifier flags
469 * Will read pages from given file and fill them into a pipe. Can be
470 * used as long as the address_space operations for the source implements
474 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
475 struct pipe_inode_info *pipe, size_t len,
482 isize = i_size_read(in->f_mapping->host);
483 if (unlikely(*ppos >= isize))
486 left = isize - *ppos;
487 if (unlikely(left < len))
492 while (len && !spliced) {
493 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
500 if (flags & SPLICE_F_NONBLOCK) {
517 EXPORT_SYMBOL(generic_file_splice_read);
520 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
521 * using sendpage(). Return the number of bytes sent.
523 static int pipe_to_sendpage(struct pipe_inode_info *pipe,
524 struct pipe_buffer *buf, struct splice_desc *sd)
526 struct file *file = sd->u.file;
527 loff_t pos = sd->pos;
530 ret = buf->ops->confirm(pipe, buf);
532 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
534 ret = file->f_op->sendpage(file, buf->page, buf->offset,
535 sd->len, &pos, more);
542 * This is a little more tricky than the file -> pipe splicing. There are
543 * basically three cases:
545 * - Destination page already exists in the address space and there
546 * are users of it. For that case we have no other option that
547 * copying the data. Tough luck.
548 * - Destination page already exists in the address space, but there
549 * are no users of it. Make sure it's uptodate, then drop it. Fall
550 * through to last case.
551 * - Destination page does not exist, we can add the pipe page to
552 * the page cache and avoid the copy.
554 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
555 * sd->flags), we attempt to migrate pages from the pipe to the output
556 * file address space page cache. This is possible if no one else has
557 * the pipe page referenced outside of the pipe and page cache. If
558 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
559 * a new page in the output file page cache and fill/dirty that.
561 static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
562 struct splice_desc *sd)
564 struct file *file = sd->u.file;
565 struct address_space *mapping = file->f_mapping;
566 unsigned int offset, this_len;
572 * make sure the data in this buffer is uptodate
574 ret = buf->ops->confirm(pipe, buf);
578 index = sd->pos >> PAGE_CACHE_SHIFT;
579 offset = sd->pos & ~PAGE_CACHE_MASK;
582 if (this_len + offset > PAGE_CACHE_SIZE)
583 this_len = PAGE_CACHE_SIZE - offset;
586 page = find_lock_page(mapping, index);
589 page = page_cache_alloc_cold(mapping);
594 * This will also lock the page
596 ret = add_to_page_cache_lru(page, mapping, index,
602 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
604 loff_t isize = i_size_read(mapping->host);
606 if (ret != AOP_TRUNCATED_PAGE)
608 page_cache_release(page);
609 if (ret == AOP_TRUNCATED_PAGE)
613 * prepare_write() may have instantiated a few blocks
614 * outside i_size. Trim these off again.
616 if (sd->pos + this_len > isize)
617 vmtruncate(mapping->host, isize);
622 if (buf->page != page) {
624 * Careful, ->map() uses KM_USER0!
626 char *src = buf->ops->map(pipe, buf, 1);
627 char *dst = kmap_atomic(page, KM_USER1);
629 memcpy(dst + offset, src + buf->offset, this_len);
630 flush_dcache_page(page);
631 kunmap_atomic(dst, KM_USER1);
632 buf->ops->unmap(pipe, buf, src);
635 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
637 if (ret == AOP_TRUNCATED_PAGE) {
638 page_cache_release(page);
644 * Partial write has happened, so 'ret' already initialized by
645 * number of bytes written, Where is nothing we have to do here.
650 * Return the number of bytes written and mark page as
651 * accessed, we are now done!
653 mark_page_accessed(page);
655 page_cache_release(page);
662 * __splice_from_pipe - splice data from a pipe to given actor
663 * @pipe: pipe to splice from
664 * @sd: information to @actor
665 * @actor: handler that splices the data
668 * This function does little more than loop over the pipe and call
669 * @actor to do the actual moving of a single struct pipe_buffer to
670 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
674 ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd,
677 int ret, do_wakeup, err;
684 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
685 const struct pipe_buf_operations *ops = buf->ops;
688 if (sd->len > sd->total_len)
689 sd->len = sd->total_len;
691 err = actor(pipe, buf, sd);
693 if (!ret && err != -ENODATA)
705 sd->total_len -= err;
711 ops->release(pipe, buf);
712 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
726 if (!pipe->waiting_writers) {
731 if (sd->flags & SPLICE_F_NONBLOCK) {
737 if (signal_pending(current)) {
745 if (waitqueue_active(&pipe->wait))
746 wake_up_interruptible_sync(&pipe->wait);
747 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
756 if (waitqueue_active(&pipe->wait))
757 wake_up_interruptible(&pipe->wait);
758 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
763 EXPORT_SYMBOL(__splice_from_pipe);
766 * splice_from_pipe - splice data from a pipe to a file
767 * @pipe: pipe to splice from
768 * @out: file to splice to
769 * @ppos: position in @out
770 * @len: how many bytes to splice
771 * @flags: splice modifier flags
772 * @actor: handler that splices the data
775 * See __splice_from_pipe. This function locks the input and output inodes,
776 * otherwise it's identical to __splice_from_pipe().
779 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
780 loff_t *ppos, size_t len, unsigned int flags,
784 struct inode *inode = out->f_mapping->host;
785 struct splice_desc sd = {
793 * The actor worker might be calling ->prepare_write and
794 * ->commit_write. Most of the time, these expect i_mutex to
795 * be held. Since this may result in an ABBA deadlock with
796 * pipe->inode, we have to order lock acquiry here.
798 inode_double_lock(inode, pipe->inode);
799 ret = __splice_from_pipe(pipe, &sd, actor);
800 inode_double_unlock(inode, pipe->inode);
806 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
808 * @out: file to write to
809 * @ppos: position in @out
810 * @len: number of bytes to splice
811 * @flags: splice modifier flags
814 * Will either move or copy pages (determined by @flags options) from
815 * the given pipe inode to the given file. The caller is responsible
816 * for acquiring i_mutex on both inodes.
820 generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
821 loff_t *ppos, size_t len, unsigned int flags)
823 struct address_space *mapping = out->f_mapping;
824 struct inode *inode = mapping->host;
825 struct splice_desc sd = {
834 err = remove_suid(out->f_path.dentry);
838 ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
840 unsigned long nr_pages;
843 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
846 * If file or inode is SYNC and we actually wrote some data,
849 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
850 err = generic_osync_inode(inode, mapping,
851 OSYNC_METADATA|OSYNC_DATA);
856 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
862 EXPORT_SYMBOL(generic_file_splice_write_nolock);
865 * generic_file_splice_write - splice data from a pipe to a file
867 * @out: file to write to
868 * @ppos: position in @out
869 * @len: number of bytes to splice
870 * @flags: splice modifier flags
873 * Will either move or copy pages (determined by @flags options) from
874 * the given pipe inode to the given file.
878 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
879 loff_t *ppos, size_t len, unsigned int flags)
881 struct address_space *mapping = out->f_mapping;
882 struct inode *inode = mapping->host;
886 err = should_remove_suid(out->f_path.dentry);
888 mutex_lock(&inode->i_mutex);
889 err = __remove_suid(out->f_path.dentry, err);
890 mutex_unlock(&inode->i_mutex);
895 ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
897 unsigned long nr_pages;
900 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
903 * If file or inode is SYNC and we actually wrote some data,
906 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
907 mutex_lock(&inode->i_mutex);
908 err = generic_osync_inode(inode, mapping,
909 OSYNC_METADATA|OSYNC_DATA);
910 mutex_unlock(&inode->i_mutex);
915 balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
921 EXPORT_SYMBOL(generic_file_splice_write);
924 * generic_splice_sendpage - splice data from a pipe to a socket
925 * @pipe: pipe to splice from
926 * @out: socket to write to
927 * @ppos: position in @out
928 * @len: number of bytes to splice
929 * @flags: splice modifier flags
932 * Will send @len bytes from the pipe to a network socket. No data copying
936 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
937 loff_t *ppos, size_t len, unsigned int flags)
939 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
942 EXPORT_SYMBOL(generic_splice_sendpage);
945 * Attempt to initiate a splice from pipe to file.
947 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
948 loff_t *ppos, size_t len, unsigned int flags)
952 if (unlikely(!out->f_op || !out->f_op->splice_write))
955 if (unlikely(!(out->f_mode & FMODE_WRITE)))
958 ret = rw_verify_area(WRITE, out, ppos, len);
959 if (unlikely(ret < 0))
962 ret = security_file_permission(out, MAY_WRITE);
963 if (unlikely(ret < 0))
966 return out->f_op->splice_write(pipe, out, ppos, len, flags);
970 * Attempt to initiate a splice from a file to a pipe.
972 static long do_splice_to(struct file *in, loff_t *ppos,
973 struct pipe_inode_info *pipe, size_t len,
978 if (unlikely(!in->f_op || !in->f_op->splice_read))
981 if (unlikely(!(in->f_mode & FMODE_READ)))
984 ret = rw_verify_area(READ, in, ppos, len);
985 if (unlikely(ret < 0))
988 ret = security_file_permission(in, MAY_READ);
989 if (unlikely(ret < 0))
992 return in->f_op->splice_read(in, ppos, pipe, len, flags);
996 * splice_direct_to_actor - splices data directly between two non-pipes
997 * @in: file to splice from
998 * @sd: actor information on where to splice to
999 * @actor: handles the data splicing
1002 * This is a special case helper to splice directly between two
1003 * points, without requiring an explicit pipe. Internally an allocated
1004 * pipe is cached in the process, and reused during the life time of
1008 ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
1009 splice_direct_actor *actor)
1011 struct pipe_inode_info *pipe;
1018 * We require the input being a regular file, as we don't want to
1019 * randomly drop data for eg socket -> socket splicing. Use the
1020 * piped splicing for that!
1022 i_mode = in->f_path.dentry->d_inode->i_mode;
1023 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
1027 * neither in nor out is a pipe, setup an internal pipe attached to
1028 * 'out' and transfer the wanted data from 'in' to 'out' through that
1030 pipe = current->splice_pipe;
1031 if (unlikely(!pipe)) {
1032 pipe = alloc_pipe_info(NULL);
1037 * We don't have an immediate reader, but we'll read the stuff
1038 * out of the pipe right after the splice_to_pipe(). So set
1039 * PIPE_READERS appropriately.
1043 current->splice_pipe = pipe;
1051 len = sd->total_len;
1055 * Don't block on output, we have to drain the direct pipe.
1057 sd->flags &= ~SPLICE_F_NONBLOCK;
1061 loff_t pos = sd->pos;
1063 ret = do_splice_to(in, &pos, pipe, len, flags);
1064 if (unlikely(ret <= 0))
1068 sd->total_len = read_len;
1071 * NOTE: nonblocking mode only applies to the input. We
1072 * must not do the output in nonblocking mode as then we
1073 * could get stuck data in the internal pipe:
1075 ret = actor(pipe, sd);
1076 if (unlikely(ret <= 0))
1087 pipe->nrbufs = pipe->curbuf = 0;
1092 * If we did an incomplete transfer we must release
1093 * the pipe buffers in question:
1095 for (i = 0; i < PIPE_BUFFERS; i++) {
1096 struct pipe_buffer *buf = pipe->bufs + i;
1099 buf->ops->release(pipe, buf);
1103 pipe->nrbufs = pipe->curbuf = 0;
1106 * If we transferred some data, return the number of bytes:
1114 EXPORT_SYMBOL(splice_direct_to_actor);
1116 static int direct_splice_actor(struct pipe_inode_info *pipe,
1117 struct splice_desc *sd)
1119 struct file *file = sd->u.file;
1121 return do_splice_from(pipe, file, &sd->pos, sd->total_len, sd->flags);
1125 * do_splice_direct - splices data directly between two files
1126 * @in: file to splice from
1127 * @ppos: input file offset
1128 * @out: file to splice to
1129 * @len: number of bytes to splice
1130 * @flags: splice modifier flags
1133 * For use by do_sendfile(). splice can easily emulate sendfile, but
1134 * doing it in the application would incur an extra system call
1135 * (splice in + splice out, as compared to just sendfile()). So this helper
1136 * can splice directly through a process-private pipe.
1139 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1140 size_t len, unsigned int flags)
1142 struct splice_desc sd = {
1151 ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
1159 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1160 * location, so checking ->i_pipe is not enough to verify that this is a
1163 static inline struct pipe_inode_info *pipe_info(struct inode *inode)
1165 if (S_ISFIFO(inode->i_mode))
1166 return inode->i_pipe;
1172 * Determine where to splice to/from.
1174 static long do_splice(struct file *in, loff_t __user *off_in,
1175 struct file *out, loff_t __user *off_out,
1176 size_t len, unsigned int flags)
1178 struct pipe_inode_info *pipe;
1179 loff_t offset, *off;
1182 pipe = pipe_info(in->f_path.dentry->d_inode);
1187 if (out->f_op->llseek == no_llseek)
1189 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1195 ret = do_splice_from(pipe, out, off, len, flags);
1197 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1203 pipe = pipe_info(out->f_path.dentry->d_inode);
1208 if (in->f_op->llseek == no_llseek)
1210 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1216 ret = do_splice_to(in, off, pipe, len, flags);
1218 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1228 * Map an iov into an array of pages and offset/length tupples. With the
1229 * partial_page structure, we can map several non-contiguous ranges into
1230 * our ones pages[] map instead of splitting that operation into pieces.
1231 * Could easily be exported as a generic helper for other users, in which
1232 * case one would probably want to add a 'max_nr_pages' parameter as well.
1234 static int get_iovec_page_array(const struct iovec __user *iov,
1235 unsigned int nr_vecs, struct page **pages,
1236 struct partial_page *partial, int aligned)
1238 int buffers = 0, error = 0;
1241 * It's ok to take the mmap_sem for reading, even
1242 * across a "get_user()".
1244 down_read(¤t->mm->mmap_sem);
1247 unsigned long off, npages;
1253 * Get user address base and length for this iovec.
1255 error = get_user(base, &iov->iov_base);
1256 if (unlikely(error))
1258 error = get_user(len, &iov->iov_len);
1259 if (unlikely(error))
1263 * Sanity check this iovec. 0 read succeeds.
1268 if (unlikely(!base))
1272 * Get this base offset and number of pages, then map
1273 * in the user pages.
1275 off = (unsigned long) base & ~PAGE_MASK;
1278 * If asked for alignment, the offset must be zero and the
1279 * length a multiple of the PAGE_SIZE.
1282 if (aligned && (off || len & ~PAGE_MASK))
1285 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1286 if (npages > PIPE_BUFFERS - buffers)
1287 npages = PIPE_BUFFERS - buffers;
1289 error = get_user_pages(current, current->mm,
1290 (unsigned long) base, npages, 0, 0,
1291 &pages[buffers], NULL);
1293 if (unlikely(error <= 0))
1297 * Fill this contiguous range into the partial page map.
1299 for (i = 0; i < error; i++) {
1300 const int plen = min_t(size_t, len, PAGE_SIZE - off);
1302 partial[buffers].offset = off;
1303 partial[buffers].len = plen;
1311 * We didn't complete this iov, stop here since it probably
1312 * means we have to move some of this into a pipe to
1313 * be able to continue.
1319 * Don't continue if we mapped fewer pages than we asked for,
1320 * or if we mapped the max number of pages that we have
1323 if (error < npages || buffers == PIPE_BUFFERS)
1330 up_read(¤t->mm->mmap_sem);
1338 static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
1339 struct splice_desc *sd)
1344 ret = buf->ops->confirm(pipe, buf);
1349 * See if we can use the atomic maps, by prefaulting in the
1350 * pages and doing an atomic copy
1352 if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
1353 src = buf->ops->map(pipe, buf, 1);
1354 ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
1356 buf->ops->unmap(pipe, buf, src);
1364 * No dice, use slow non-atomic map and copy
1366 src = buf->ops->map(pipe, buf, 0);
1369 if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
1374 sd->u.userptr += ret;
1375 buf->ops->unmap(pipe, buf, src);
1380 * For lack of a better implementation, implement vmsplice() to userspace
1381 * as a simple copy of the pipes pages to the user iov.
1383 static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1384 unsigned long nr_segs, unsigned int flags)
1386 struct pipe_inode_info *pipe;
1387 struct splice_desc sd;
1392 pipe = pipe_info(file->f_path.dentry->d_inode);
1397 mutex_lock(&pipe->inode->i_mutex);
1405 * Get user address base and length for this iovec.
1407 error = get_user(base, &iov->iov_base);
1408 if (unlikely(error))
1410 error = get_user(len, &iov->iov_len);
1411 if (unlikely(error))
1415 * Sanity check this iovec. 0 read succeeds.
1419 if (unlikely(!base)) {
1427 sd.u.userptr = base;
1430 size = __splice_from_pipe(pipe, &sd, pipe_to_user);
1448 mutex_unlock(&pipe->inode->i_mutex);
1457 * vmsplice splices a user address range into a pipe. It can be thought of
1458 * as splice-from-memory, where the regular splice is splice-from-file (or
1459 * to file). In both cases the output is a pipe, naturally.
1461 static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1462 unsigned long nr_segs, unsigned int flags)
1464 struct pipe_inode_info *pipe;
1465 struct page *pages[PIPE_BUFFERS];
1466 struct partial_page partial[PIPE_BUFFERS];
1467 struct splice_pipe_desc spd = {
1471 .ops = &user_page_pipe_buf_ops,
1474 pipe = pipe_info(file->f_path.dentry->d_inode);
1478 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
1479 flags & SPLICE_F_GIFT);
1480 if (spd.nr_pages <= 0)
1481 return spd.nr_pages;
1483 return splice_to_pipe(pipe, &spd);
1487 * Note that vmsplice only really supports true splicing _from_ user memory
1488 * to a pipe, not the other way around. Splicing from user memory is a simple
1489 * operation that can be supported without any funky alignment restrictions
1490 * or nasty vm tricks. We simply map in the user memory and fill them into
1491 * a pipe. The reverse isn't quite as easy, though. There are two possible
1492 * solutions for that:
1494 * - memcpy() the data internally, at which point we might as well just
1495 * do a regular read() on the buffer anyway.
1496 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1497 * has restriction limitations on both ends of the pipe).
1499 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1502 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1503 unsigned long nr_segs, unsigned int flags)
1509 if (unlikely(nr_segs > UIO_MAXIOV))
1511 else if (unlikely(!nr_segs))
1515 file = fget_light(fd, &fput);
1517 if (file->f_mode & FMODE_WRITE)
1518 error = vmsplice_to_pipe(file, iov, nr_segs, flags);
1519 else if (file->f_mode & FMODE_READ)
1520 error = vmsplice_to_user(file, iov, nr_segs, flags);
1522 fput_light(file, fput);
1528 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1529 int fd_out, loff_t __user *off_out,
1530 size_t len, unsigned int flags)
1533 struct file *in, *out;
1534 int fput_in, fput_out;
1540 in = fget_light(fd_in, &fput_in);
1542 if (in->f_mode & FMODE_READ) {
1543 out = fget_light(fd_out, &fput_out);
1545 if (out->f_mode & FMODE_WRITE)
1546 error = do_splice(in, off_in,
1549 fput_light(out, fput_out);
1553 fput_light(in, fput_in);
1560 * Make sure there's data to read. Wait for input if we can, otherwise
1561 * return an appropriate error.
1563 static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1568 * Check ->nrbufs without the inode lock first. This function
1569 * is speculative anyways, so missing one is ok.
1575 mutex_lock(&pipe->inode->i_mutex);
1577 while (!pipe->nrbufs) {
1578 if (signal_pending(current)) {
1584 if (!pipe->waiting_writers) {
1585 if (flags & SPLICE_F_NONBLOCK) {
1593 mutex_unlock(&pipe->inode->i_mutex);
1598 * Make sure there's writeable room. Wait for room if we can, otherwise
1599 * return an appropriate error.
1601 static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
1606 * Check ->nrbufs without the inode lock first. This function
1607 * is speculative anyways, so missing one is ok.
1609 if (pipe->nrbufs < PIPE_BUFFERS)
1613 mutex_lock(&pipe->inode->i_mutex);
1615 while (pipe->nrbufs >= PIPE_BUFFERS) {
1616 if (!pipe->readers) {
1617 send_sig(SIGPIPE, current, 0);
1621 if (flags & SPLICE_F_NONBLOCK) {
1625 if (signal_pending(current)) {
1629 pipe->waiting_writers++;
1631 pipe->waiting_writers--;
1634 mutex_unlock(&pipe->inode->i_mutex);
1639 * Link contents of ipipe to opipe.
1641 static int link_pipe(struct pipe_inode_info *ipipe,
1642 struct pipe_inode_info *opipe,
1643 size_t len, unsigned int flags)
1645 struct pipe_buffer *ibuf, *obuf;
1646 int ret = 0, i = 0, nbuf;
1649 * Potential ABBA deadlock, work around it by ordering lock
1650 * grabbing by inode address. Otherwise two different processes
1651 * could deadlock (one doing tee from A -> B, the other from B -> A).
1653 inode_double_lock(ipipe->inode, opipe->inode);
1656 if (!opipe->readers) {
1657 send_sig(SIGPIPE, current, 0);
1664 * If we have iterated all input buffers or ran out of
1665 * output room, break.
1667 if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS)
1670 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1671 nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1674 * Get a reference to this pipe buffer,
1675 * so we can copy the contents over.
1677 ibuf->ops->get(ipipe, ibuf);
1679 obuf = opipe->bufs + nbuf;
1683 * Don't inherit the gift flag, we need to
1684 * prevent multiple steals of this page.
1686 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1688 if (obuf->len > len)
1697 inode_double_unlock(ipipe->inode, opipe->inode);
1700 * If we put data in the output pipe, wakeup any potential readers.
1704 if (waitqueue_active(&opipe->wait))
1705 wake_up_interruptible(&opipe->wait);
1706 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1713 * This is a tee(1) implementation that works on pipes. It doesn't copy
1714 * any data, it simply references the 'in' pages on the 'out' pipe.
1715 * The 'flags' used are the SPLICE_F_* variants, currently the only
1716 * applicable one is SPLICE_F_NONBLOCK.
1718 static long do_tee(struct file *in, struct file *out, size_t len,
1721 struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
1722 struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
1726 * Duplicate the contents of ipipe to opipe without actually
1729 if (ipipe && opipe && ipipe != opipe) {
1731 * Keep going, unless we encounter an error. The ipipe/opipe
1732 * ordering doesn't really matter.
1734 ret = link_ipipe_prep(ipipe, flags);
1736 ret = link_opipe_prep(opipe, flags);
1738 ret = link_pipe(ipipe, opipe, len, flags);
1739 if (!ret && (flags & SPLICE_F_NONBLOCK))
1748 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1757 in = fget_light(fdin, &fput_in);
1759 if (in->f_mode & FMODE_READ) {
1761 struct file *out = fget_light(fdout, &fput_out);
1764 if (out->f_mode & FMODE_WRITE)
1765 error = do_tee(in, out, len, flags);
1766 fput_light(out, fput_out);
1769 fput_light(in, fput_in);