2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
31 * Passed to the actors
34 unsigned int len, total_len; /* current and remaining length */
35 unsigned int flags; /* splice flags */
36 struct file *file; /* file to read/write */
37 loff_t pos; /* file position */
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47 struct pipe_buffer *buf)
49 struct page *page = buf->page;
50 struct address_space *mapping = page_mapping(page);
52 WARN_ON(!PageLocked(page));
53 WARN_ON(!PageUptodate(page));
55 if (PagePrivate(page))
56 try_to_release_page(page, mapping_gfp_mask(mapping));
58 if (!remove_mapping(mapping, page))
62 struct zone *zone = page_zone(page);
64 spin_lock_irq(&zone->lru_lock);
65 BUG_ON(!PageLRU(page));
67 del_page_from_lru(zone, page);
68 spin_unlock_irq(&zone->lru_lock);
74 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75 struct pipe_buffer *buf)
77 page_cache_release(buf->page);
81 static void *page_cache_pipe_buf_map(struct file *file,
82 struct pipe_inode_info *info,
83 struct pipe_buffer *buf)
85 struct page *page = buf->page;
89 if (!PageUptodate(page)) {
96 return ERR_PTR(-ENODATA);
99 return kmap(buf->page);
102 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
103 struct pipe_buffer *buf)
105 unlock_page(buf->page);
109 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
111 .map = page_cache_pipe_buf_map,
112 .unmap = page_cache_pipe_buf_unmap,
113 .release = page_cache_pipe_buf_release,
114 .steal = page_cache_pipe_buf_steal,
118 * Pipe output worker. This sets up our pipe format with the page cache
119 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
121 static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
122 int nr_pages, unsigned long offset,
123 unsigned long len, unsigned int flags)
125 struct pipe_inode_info *info;
126 int ret, do_wakeup, i;
132 mutex_lock(PIPE_MUTEX(*inode));
134 info = inode->i_pipe;
138 if (!PIPE_READERS(*inode)) {
139 send_sig(SIGPIPE, current, 0);
146 if (bufs < PIPE_BUFFERS) {
147 int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
148 struct pipe_buffer *buf = info->bufs + newbuf;
149 struct page *page = pages[i++];
150 unsigned long this_len;
152 this_len = PAGE_CACHE_SIZE - offset;
157 buf->offset = offset;
159 buf->ops = &page_cache_pipe_buf_ops;
160 info->nrbufs = ++bufs;
170 if (bufs < PIPE_BUFFERS)
176 if (flags & SPLICE_F_NONBLOCK) {
182 if (signal_pending(current)) {
189 wake_up_interruptible_sync(PIPE_WAIT(*inode));
190 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
195 PIPE_WAITING_WRITERS(*inode)++;
197 PIPE_WAITING_WRITERS(*inode)--;
200 mutex_unlock(PIPE_MUTEX(*inode));
203 wake_up_interruptible(PIPE_WAIT(*inode));
204 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
208 page_cache_release(pages[i++]);
213 static int __generic_file_splice_read(struct file *in, struct inode *pipe,
214 size_t len, unsigned int flags)
216 struct address_space *mapping = in->f_mapping;
217 unsigned int offset, nr_pages;
218 struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
223 index = in->f_pos >> PAGE_CACHE_SHIFT;
224 offset = in->f_pos & ~PAGE_CACHE_MASK;
225 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
227 if (nr_pages > PIPE_BUFFERS)
228 nr_pages = PIPE_BUFFERS;
231 * initiate read-ahead on this page range
233 do_page_cache_readahead(mapping, in, index, nr_pages);
236 * Get as many pages from the page cache as possible..
237 * Start IO on the page cache entries we create (we
238 * can assume that any pre-existing ones we find have
239 * already had IO started on them).
241 i = find_get_pages(mapping, index, nr_pages, pages);
244 * common case - we found all pages and they are contiguous,
247 if (i && (pages[i - 1]->index == index + i - 1))
251 * fill shadow[] with pages at the right locations, so we only
254 memset(shadow, 0, nr_pages * sizeof(struct page *));
255 for (j = 0; j < i; j++)
256 shadow[pages[j]->index - index] = pages[j];
259 * now fill in the holes
261 for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
268 * no page there, look one up / create it
270 page = find_or_create_page(mapping, pidx,
271 mapping_gfp_mask(mapping));
275 if (PageUptodate(page))
278 error = mapping->a_ops->readpage(in, page);
280 if (unlikely(error)) {
281 page_cache_release(page);
289 for (i = 0; i < nr_pages; i++) {
291 page_cache_release(shadow[i]);
296 memcpy(pages, shadow, i * sizeof(struct page *));
299 * Now we splice them into the pipe..
302 return move_to_pipe(pipe, pages, i, offset, len, flags);
306 * generic_file_splice_read - splice data from file to a pipe
307 * @in: file to splice from
308 * @pipe: pipe to splice to
309 * @len: number of bytes to splice
310 * @flags: splice modifier flags
312 * Will read pages from given file and fill them into a pipe.
315 ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
316 size_t len, unsigned int flags)
324 ret = __generic_file_splice_read(in, pipe, len, flags);
333 if (!(flags & SPLICE_F_NONBLOCK))
345 EXPORT_SYMBOL(generic_file_splice_read);
348 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
351 static int pipe_to_sendpage(struct pipe_inode_info *info,
352 struct pipe_buffer *buf, struct splice_desc *sd)
354 struct file *file = sd->file;
355 loff_t pos = sd->pos;
362 * sub-optimal, but we are limited by the pipe ->map. we don't
363 * need a kmap'ed buffer here, we just want to make sure we
364 * have the page pinned if the pipe page originates from the
367 ptr = buf->ops->map(file, info, buf);
371 offset = pos & ~PAGE_CACHE_MASK;
372 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
374 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
376 buf->ops->unmap(info, buf);
384 * This is a little more tricky than the file -> pipe splicing. There are
385 * basically three cases:
387 * - Destination page already exists in the address space and there
388 * are users of it. For that case we have no other option that
389 * copying the data. Tough luck.
390 * - Destination page already exists in the address space, but there
391 * are no users of it. Make sure it's uptodate, then drop it. Fall
392 * through to last case.
393 * - Destination page does not exist, we can add the pipe page to
394 * the page cache and avoid the copy.
396 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
397 * sd->flags), we attempt to migrate pages from the pipe to the output
398 * file address space page cache. This is possible if no one else has
399 * the pipe page referenced outside of the pipe and page cache. If
400 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
401 * a new page in the output file page cache and fill/dirty that.
403 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
404 struct splice_desc *sd)
406 struct file *file = sd->file;
407 struct address_space *mapping = file->f_mapping;
415 * after this, page will be locked and unmapped
417 src = buf->ops->map(file, info, buf);
421 index = sd->pos >> PAGE_CACHE_SHIFT;
422 offset = sd->pos & ~PAGE_CACHE_MASK;
426 * reuse buf page, if SPLICE_F_MOVE is set
428 if (sd->flags & SPLICE_F_MOVE) {
430 * If steal succeeds, buf->page is now pruned from the vm
431 * side (LRU and page cache) and we can reuse it.
433 if (buf->ops->steal(info, buf))
438 if (add_to_page_cache_lru(page, mapping, index,
439 mapping_gfp_mask(mapping)))
444 page = find_or_create_page(mapping, index,
445 mapping_gfp_mask(mapping));
450 * If the page is uptodate, it is also locked. If it isn't
451 * uptodate, we can mark it uptodate if we are filling the
452 * full page. Otherwise we need to read it in first...
454 if (!PageUptodate(page)) {
455 if (sd->len < PAGE_CACHE_SIZE) {
456 ret = mapping->a_ops->readpage(file, page);
462 if (!PageUptodate(page)) {
464 * page got invalidated, repeat
466 if (!page->mapping) {
468 page_cache_release(page);
475 WARN_ON(!PageLocked(page));
476 SetPageUptodate(page);
481 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
482 if (ret == AOP_TRUNCATED_PAGE) {
483 page_cache_release(page);
489 char *dst = kmap_atomic(page, KM_USER0);
491 memcpy(dst + offset, src + buf->offset, sd->len);
492 flush_dcache_page(page);
493 kunmap_atomic(dst, KM_USER0);
496 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
497 if (ret == AOP_TRUNCATED_PAGE) {
498 page_cache_release(page);
503 balance_dirty_pages_ratelimited(mapping);
506 page_cache_release(page);
509 buf->ops->unmap(info, buf);
513 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
514 struct splice_desc *);
517 * Pipe input worker. Most of this logic works like a regular pipe, the
518 * key here is the 'actor' worker passed in that actually moves the data
519 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
521 static ssize_t move_from_pipe(struct inode *inode, struct file *out,
522 size_t len, unsigned int flags,
525 struct pipe_inode_info *info;
526 int ret, do_wakeup, err;
527 struct splice_desc sd;
537 mutex_lock(PIPE_MUTEX(*inode));
539 info = inode->i_pipe;
541 int bufs = info->nrbufs;
544 int curbuf = info->curbuf;
545 struct pipe_buffer *buf = info->bufs + curbuf;
546 struct pipe_buf_operations *ops = buf->ops;
549 if (sd.len > sd.total_len)
550 sd.len = sd.total_len;
552 err = actor(info, buf, &sd);
554 if (!ret && err != -ENODATA)
561 buf->offset += sd.len;
565 ops->release(info, buf);
566 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
567 info->curbuf = curbuf;
568 info->nrbufs = --bufs;
573 sd.total_len -= sd.len;
580 if (!PIPE_WRITERS(*inode))
582 if (!PIPE_WAITING_WRITERS(*inode)) {
587 if (flags & SPLICE_F_NONBLOCK) {
593 if (signal_pending(current)) {
600 wake_up_interruptible_sync(PIPE_WAIT(*inode));
601 kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
608 mutex_unlock(PIPE_MUTEX(*inode));
611 wake_up_interruptible(PIPE_WAIT(*inode));
612 kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
615 mutex_lock(&out->f_mapping->host->i_mutex);
617 mutex_unlock(&out->f_mapping->host->i_mutex);
623 * generic_file_splice_write - splice data from a pipe to a file
625 * @out: file to write to
626 * @len: number of bytes to splice
627 * @flags: splice modifier flags
629 * Will either move or copy pages (determined by @flags options) from
630 * the given pipe inode to the given file.
633 ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
634 size_t len, unsigned int flags)
636 struct address_space *mapping = out->f_mapping;
637 ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
640 * if file or inode is SYNC and we actually wrote some data, sync it
642 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
644 struct inode *inode = mapping->host;
647 mutex_lock(&inode->i_mutex);
648 err = generic_osync_inode(mapping->host, mapping,
649 OSYNC_METADATA|OSYNC_DATA);
650 mutex_unlock(&inode->i_mutex);
659 EXPORT_SYMBOL(generic_file_splice_write);
662 * generic_splice_sendpage - splice data from a pipe to a socket
664 * @out: socket to write to
665 * @len: number of bytes to splice
666 * @flags: splice modifier flags
668 * Will send @len bytes from the pipe to a network socket. No data copying
672 ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
673 size_t len, unsigned int flags)
675 return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
678 EXPORT_SYMBOL(generic_splice_sendpage);
681 * Attempt to initiate a splice from pipe to file.
683 static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
689 if (!out->f_op || !out->f_op->splice_write)
692 if (!(out->f_mode & FMODE_WRITE))
696 ret = rw_verify_area(WRITE, out, &pos, len);
697 if (unlikely(ret < 0))
700 return out->f_op->splice_write(pipe, out, len, flags);
704 * Attempt to initiate a splice from a file to a pipe.
706 static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
709 loff_t pos, isize, left;
712 if (!in->f_op || !in->f_op->splice_read)
715 if (!(in->f_mode & FMODE_READ))
719 ret = rw_verify_area(READ, in, &pos, len);
720 if (unlikely(ret < 0))
723 isize = i_size_read(in->f_mapping->host);
724 if (unlikely(in->f_pos >= isize))
727 left = isize - in->f_pos;
731 return in->f_op->splice_read(in, pipe, len, flags);
735 * Determine where to splice to/from.
737 static long do_splice(struct file *in, struct file *out, size_t len,
742 pipe = in->f_dentry->d_inode;
744 return do_splice_from(pipe, out, len, flags);
746 pipe = out->f_dentry->d_inode;
748 return do_splice_to(in, pipe, len, flags);
753 asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
756 struct file *in, *out;
757 int fput_in, fput_out;
763 in = fget_light(fdin, &fput_in);
765 if (in->f_mode & FMODE_READ) {
766 out = fget_light(fdout, &fput_out);
768 if (out->f_mode & FMODE_WRITE)
769 error = do_splice(in, out, len, flags);
770 fput_light(out, fput_out);
774 fput_light(in, fput_in);