9bfd6af0cf452a4a558ea4b88c9044777d924502
[safe/jmp/linux-2.6] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files and fixing the initial implementation
13  * bugs.
14  *
15  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17  *
18  */
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
29
30 /*
31  * Passed to the actors
32  */
33 struct splice_desc {
34         unsigned int len, total_len;    /* current and remaining length */
35         unsigned int flags;             /* splice flags */
36         struct file *file;              /* file to read/write */
37         loff_t pos;                     /* file position */
38 };
39
40 /*
41  * Attempt to steal a page from a pipe buffer. This should perhaps go into
42  * a vm helper function, it's already simplified quite a bit by the
43  * addition of remove_mapping(). If success is returned, the caller may
44  * attempt to reuse this page for another destination.
45  */
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47                                      struct pipe_buffer *buf)
48 {
49         struct page *page = buf->page;
50         struct address_space *mapping = page_mapping(page);
51
52         WARN_ON(!PageLocked(page));
53         WARN_ON(!PageUptodate(page));
54
55         /*
56          * At least for ext2 with nobh option, we need to wait on writeback
57          * completing on this page, since we'll remove it from the pagecache.
58          * Otherwise truncate wont wait on the page, allowing the disk
59          * blocks to be reused by someone else before we actually wrote our
60          * data to them. fs corruption ensues.
61          */
62         wait_on_page_writeback(page);
63
64         if (PagePrivate(page))
65                 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67         if (!remove_mapping(mapping, page))
68                 return 1;
69
70         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
71         return 0;
72 }
73
74 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75                                         struct pipe_buffer *buf)
76 {
77         page_cache_release(buf->page);
78         buf->page = NULL;
79         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
80 }
81
82 static void *page_cache_pipe_buf_map(struct file *file,
83                                      struct pipe_inode_info *info,
84                                      struct pipe_buffer *buf)
85 {
86         struct page *page = buf->page;
87         int err;
88
89         if (!PageUptodate(page)) {
90                 lock_page(page);
91
92                 /*
93                  * Page got truncated/unhashed. This will cause a 0-byte
94                  * splice, if this is the first page
95                  */
96                 if (!page->mapping) {
97                         err = -ENODATA;
98                         goto error;
99                 }
100
101                 /*
102                  * uh oh, read-error from disk
103                  */
104                 if (!PageUptodate(page)) {
105                         err = -EIO;
106                         goto error;
107                 }
108
109                 /*
110                  * page is ok afterall, fall through to mapping
111                  */
112                 unlock_page(page);
113         }
114
115         return kmap(page);
116 error:
117         unlock_page(page);
118         return ERR_PTR(err);
119 }
120
121 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
122                                       struct pipe_buffer *buf)
123 {
124         kunmap(buf->page);
125 }
126
127 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
128         .can_merge = 0,
129         .map = page_cache_pipe_buf_map,
130         .unmap = page_cache_pipe_buf_unmap,
131         .release = page_cache_pipe_buf_release,
132         .steal = page_cache_pipe_buf_steal,
133 };
134
135 /*
136  * Pipe output worker. This sets up our pipe format with the page cache
137  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
138  */
139 static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
140                             int nr_pages, unsigned long offset,
141                             unsigned long len, unsigned int flags)
142 {
143         struct pipe_inode_info *info;
144         int ret, do_wakeup, i;
145
146         ret = 0;
147         do_wakeup = 0;
148         i = 0;
149
150         mutex_lock(PIPE_MUTEX(*inode));
151
152         info = inode->i_pipe;
153         for (;;) {
154                 int bufs;
155
156                 if (!PIPE_READERS(*inode)) {
157                         send_sig(SIGPIPE, current, 0);
158                         if (!ret)
159                                 ret = -EPIPE;
160                         break;
161                 }
162
163                 bufs = info->nrbufs;
164                 if (bufs < PIPE_BUFFERS) {
165                         int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
166                         struct pipe_buffer *buf = info->bufs + newbuf;
167                         struct page *page = pages[i++];
168                         unsigned long this_len;
169
170                         this_len = PAGE_CACHE_SIZE - offset;
171                         if (this_len > len)
172                                 this_len = len;
173
174                         buf->page = page;
175                         buf->offset = offset;
176                         buf->len = this_len;
177                         buf->ops = &page_cache_pipe_buf_ops;
178                         info->nrbufs = ++bufs;
179                         do_wakeup = 1;
180
181                         ret += this_len;
182                         len -= this_len;
183                         offset = 0;
184                         if (!--nr_pages)
185                                 break;
186                         if (!len)
187                                 break;
188                         if (bufs < PIPE_BUFFERS)
189                                 continue;
190
191                         break;
192                 }
193
194                 if (flags & SPLICE_F_NONBLOCK) {
195                         if (!ret)
196                                 ret = -EAGAIN;
197                         break;
198                 }
199
200                 if (signal_pending(current)) {
201                         if (!ret)
202                                 ret = -ERESTARTSYS;
203                         break;
204                 }
205
206                 if (do_wakeup) {
207                         smp_mb();
208                         if (waitqueue_active(PIPE_WAIT(*inode)))
209                                 wake_up_interruptible_sync(PIPE_WAIT(*inode));
210                         kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
211                                     POLL_IN);
212                         do_wakeup = 0;
213                 }
214
215                 PIPE_WAITING_WRITERS(*inode)++;
216                 pipe_wait(inode);
217                 PIPE_WAITING_WRITERS(*inode)--;
218         }
219
220         mutex_unlock(PIPE_MUTEX(*inode));
221
222         if (do_wakeup) {
223                 smp_mb();
224                 if (waitqueue_active(PIPE_WAIT(*inode)))
225                         wake_up_interruptible(PIPE_WAIT(*inode));
226                 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
227         }
228
229         while (i < nr_pages)
230                 page_cache_release(pages[i++]);
231
232         return ret;
233 }
234
235 static int __generic_file_splice_read(struct file *in, struct inode *pipe,
236                                       size_t len, unsigned int flags)
237 {
238         struct address_space *mapping = in->f_mapping;
239         unsigned int offset, nr_pages;
240         struct page *pages[PIPE_BUFFERS];
241         struct page *page;
242         pgoff_t index;
243         int i;
244
245         index = in->f_pos >> PAGE_CACHE_SHIFT;
246         offset = in->f_pos & ~PAGE_CACHE_MASK;
247         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
248
249         if (nr_pages > PIPE_BUFFERS)
250                 nr_pages = PIPE_BUFFERS;
251
252         /*
253          * initiate read-ahead on this page range. however, don't call into
254          * read-ahead if this is a non-zero offset (we are likely doing small
255          * chunk splice and the page is already there) for a single page.
256          */
257         if (!offset || nr_pages > 1)
258                 do_page_cache_readahead(mapping, in, index, nr_pages);
259
260         /*
261          * now fill in the holes
262          */
263         for (i = 0; i < nr_pages; i++, index++) {
264                 /*
265                  * no page there, look one up / create it
266                  */
267                 page = find_or_create_page(mapping, index,
268                                                    mapping_gfp_mask(mapping));
269                 if (!page)
270                         break;
271
272                 if (PageUptodate(page))
273                         unlock_page(page);
274                 else {
275                         int error = mapping->a_ops->readpage(in, page);
276
277                         if (unlikely(error)) {
278                                 page_cache_release(page);
279                                 break;
280                         }
281                 }
282                 pages[i] = page;
283         }
284
285         if (i)
286                 return move_to_pipe(pipe, pages, i, offset, len, flags);
287
288         return 0;
289 }
290
291 /**
292  * generic_file_splice_read - splice data from file to a pipe
293  * @in:         file to splice from
294  * @pipe:       pipe to splice to
295  * @len:        number of bytes to splice
296  * @flags:      splice modifier flags
297  *
298  * Will read pages from given file and fill them into a pipe.
299  *
300  */
301 ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
302                                  size_t len, unsigned int flags)
303 {
304         ssize_t spliced;
305         int ret;
306
307         ret = 0;
308         spliced = 0;
309         while (len) {
310                 ret = __generic_file_splice_read(in, pipe, len, flags);
311
312                 if (ret <= 0)
313                         break;
314
315                 in->f_pos += ret;
316                 len -= ret;
317                 spliced += ret;
318
319                 if (!(flags & SPLICE_F_NONBLOCK))
320                         continue;
321                 ret = -EAGAIN;
322                 break;
323         }
324
325         if (spliced)
326                 return spliced;
327
328         return ret;
329 }
330
331 EXPORT_SYMBOL(generic_file_splice_read);
332
333 /*
334  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
335  * using sendpage().
336  */
337 static int pipe_to_sendpage(struct pipe_inode_info *info,
338                             struct pipe_buffer *buf, struct splice_desc *sd)
339 {
340         struct file *file = sd->file;
341         loff_t pos = sd->pos;
342         unsigned int offset;
343         ssize_t ret;
344         void *ptr;
345         int more;
346
347         /*
348          * sub-optimal, but we are limited by the pipe ->map. we don't
349          * need a kmap'ed buffer here, we just want to make sure we
350          * have the page pinned if the pipe page originates from the
351          * page cache
352          */
353         ptr = buf->ops->map(file, info, buf);
354         if (IS_ERR(ptr))
355                 return PTR_ERR(ptr);
356
357         offset = pos & ~PAGE_CACHE_MASK;
358         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
359
360         ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
361
362         buf->ops->unmap(info, buf);
363         if (ret == sd->len)
364                 return 0;
365
366         return -EIO;
367 }
368
369 /*
370  * This is a little more tricky than the file -> pipe splicing. There are
371  * basically three cases:
372  *
373  *      - Destination page already exists in the address space and there
374  *        are users of it. For that case we have no other option that
375  *        copying the data. Tough luck.
376  *      - Destination page already exists in the address space, but there
377  *        are no users of it. Make sure it's uptodate, then drop it. Fall
378  *        through to last case.
379  *      - Destination page does not exist, we can add the pipe page to
380  *        the page cache and avoid the copy.
381  *
382  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
383  * sd->flags), we attempt to migrate pages from the pipe to the output
384  * file address space page cache. This is possible if no one else has
385  * the pipe page referenced outside of the pipe and page cache. If
386  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
387  * a new page in the output file page cache and fill/dirty that.
388  */
389 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
390                         struct splice_desc *sd)
391 {
392         struct file *file = sd->file;
393         struct address_space *mapping = file->f_mapping;
394         gfp_t gfp_mask = mapping_gfp_mask(mapping);
395         unsigned int offset;
396         struct page *page;
397         pgoff_t index;
398         char *src;
399         int ret;
400
401         /*
402          * make sure the data in this buffer is uptodate
403          */
404         src = buf->ops->map(file, info, buf);
405         if (IS_ERR(src))
406                 return PTR_ERR(src);
407
408         index = sd->pos >> PAGE_CACHE_SHIFT;
409         offset = sd->pos & ~PAGE_CACHE_MASK;
410
411         /*
412          * reuse buf page, if SPLICE_F_MOVE is set
413          */
414         if (sd->flags & SPLICE_F_MOVE) {
415                 /*
416                  * If steal succeeds, buf->page is now pruned from the vm
417                  * side (LRU and page cache) and we can reuse it.
418                  */
419                 if (buf->ops->steal(info, buf))
420                         goto find_page;
421
422                 /*
423                  * this will also set the page locked
424                  */
425                 page = buf->page;
426                 if (add_to_page_cache(page, mapping, index, gfp_mask))
427                         goto find_page;
428
429                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
430                         lru_cache_add(page);
431         } else {
432 find_page:
433                 ret = -ENOMEM;
434                 page = find_or_create_page(mapping, index, gfp_mask);
435                 if (!page)
436                         goto out_nomem;
437
438                 /*
439                  * If the page is uptodate, it is also locked. If it isn't
440                  * uptodate, we can mark it uptodate if we are filling the
441                  * full page. Otherwise we need to read it in first...
442                  */
443                 if (!PageUptodate(page)) {
444                         if (sd->len < PAGE_CACHE_SIZE) {
445                                 ret = mapping->a_ops->readpage(file, page);
446                                 if (unlikely(ret))
447                                         goto out;
448
449                                 lock_page(page);
450
451                                 if (!PageUptodate(page)) {
452                                         /*
453                                          * page got invalidated, repeat
454                                          */
455                                         if (!page->mapping) {
456                                                 unlock_page(page);
457                                                 page_cache_release(page);
458                                                 goto find_page;
459                                         }
460                                         ret = -EIO;
461                                         goto out;
462                                 }
463                         } else {
464                                 WARN_ON(!PageLocked(page));
465                                 SetPageUptodate(page);
466                         }
467                 }
468         }
469
470         ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
471         if (ret == AOP_TRUNCATED_PAGE) {
472                 page_cache_release(page);
473                 goto find_page;
474         } else if (ret)
475                 goto out;
476
477         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
478                 char *dst = kmap_atomic(page, KM_USER0);
479
480                 memcpy(dst + offset, src + buf->offset, sd->len);
481                 flush_dcache_page(page);
482                 kunmap_atomic(dst, KM_USER0);
483         }
484
485         ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
486         if (ret == AOP_TRUNCATED_PAGE) {
487                 page_cache_release(page);
488                 goto find_page;
489         } else if (ret)
490                 goto out;
491
492         mark_page_accessed(page);
493         balance_dirty_pages_ratelimited(mapping);
494 out:
495         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
496                 page_cache_release(page);
497                 unlock_page(page);
498         }
499 out_nomem:
500         buf->ops->unmap(info, buf);
501         return ret;
502 }
503
504 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
505                            struct splice_desc *);
506
507 /*
508  * Pipe input worker. Most of this logic works like a regular pipe, the
509  * key here is the 'actor' worker passed in that actually moves the data
510  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
511  */
512 static ssize_t move_from_pipe(struct inode *inode, struct file *out,
513                               size_t len, unsigned int flags,
514                               splice_actor *actor)
515 {
516         struct pipe_inode_info *info;
517         int ret, do_wakeup, err;
518         struct splice_desc sd;
519
520         ret = 0;
521         do_wakeup = 0;
522
523         sd.total_len = len;
524         sd.flags = flags;
525         sd.file = out;
526         sd.pos = out->f_pos;
527
528         mutex_lock(PIPE_MUTEX(*inode));
529
530         info = inode->i_pipe;
531         for (;;) {
532                 int bufs = info->nrbufs;
533
534                 if (bufs) {
535                         int curbuf = info->curbuf;
536                         struct pipe_buffer *buf = info->bufs + curbuf;
537                         struct pipe_buf_operations *ops = buf->ops;
538
539                         sd.len = buf->len;
540                         if (sd.len > sd.total_len)
541                                 sd.len = sd.total_len;
542
543                         err = actor(info, buf, &sd);
544                         if (err) {
545                                 if (!ret && err != -ENODATA)
546                                         ret = err;
547
548                                 break;
549                         }
550
551                         ret += sd.len;
552                         buf->offset += sd.len;
553                         buf->len -= sd.len;
554                         if (!buf->len) {
555                                 buf->ops = NULL;
556                                 ops->release(info, buf);
557                                 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
558                                 info->curbuf = curbuf;
559                                 info->nrbufs = --bufs;
560                                 do_wakeup = 1;
561                         }
562
563                         sd.pos += sd.len;
564                         sd.total_len -= sd.len;
565                         if (!sd.total_len)
566                                 break;
567                 }
568
569                 if (bufs)
570                         continue;
571                 if (!PIPE_WRITERS(*inode))
572                         break;
573                 if (!PIPE_WAITING_WRITERS(*inode)) {
574                         if (ret)
575                                 break;
576                 }
577
578                 if (flags & SPLICE_F_NONBLOCK) {
579                         if (!ret)
580                                 ret = -EAGAIN;
581                         break;
582                 }
583
584                 if (signal_pending(current)) {
585                         if (!ret)
586                                 ret = -ERESTARTSYS;
587                         break;
588                 }
589
590                 if (do_wakeup) {
591                         smp_mb();
592                         if (waitqueue_active(PIPE_WAIT(*inode)))
593                                 wake_up_interruptible_sync(PIPE_WAIT(*inode));
594                         kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
595                         do_wakeup = 0;
596                 }
597
598                 pipe_wait(inode);
599         }
600
601         mutex_unlock(PIPE_MUTEX(*inode));
602
603         if (do_wakeup) {
604                 smp_mb();
605                 if (waitqueue_active(PIPE_WAIT(*inode)))
606                         wake_up_interruptible(PIPE_WAIT(*inode));
607                 kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
608         }
609
610         mutex_lock(&out->f_mapping->host->i_mutex);
611         out->f_pos = sd.pos;
612         mutex_unlock(&out->f_mapping->host->i_mutex);
613         return ret;
614
615 }
616
617 /**
618  * generic_file_splice_write - splice data from a pipe to a file
619  * @inode:      pipe inode
620  * @out:        file to write to
621  * @len:        number of bytes to splice
622  * @flags:      splice modifier flags
623  *
624  * Will either move or copy pages (determined by @flags options) from
625  * the given pipe inode to the given file.
626  *
627  */
628 ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
629                                   size_t len, unsigned int flags)
630 {
631         struct address_space *mapping = out->f_mapping;
632         ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
633
634         /*
635          * if file or inode is SYNC and we actually wrote some data, sync it
636          */
637         if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
638             && ret > 0) {
639                 struct inode *inode = mapping->host;
640                 int err;
641
642                 mutex_lock(&inode->i_mutex);
643                 err = generic_osync_inode(mapping->host, mapping,
644                                                 OSYNC_METADATA|OSYNC_DATA);
645                 mutex_unlock(&inode->i_mutex);
646
647                 if (err)
648                         ret = err;
649         }
650
651         return ret;
652 }
653
654 EXPORT_SYMBOL(generic_file_splice_write);
655
656 /**
657  * generic_splice_sendpage - splice data from a pipe to a socket
658  * @inode:      pipe inode
659  * @out:        socket to write to
660  * @len:        number of bytes to splice
661  * @flags:      splice modifier flags
662  *
663  * Will send @len bytes from the pipe to a network socket. No data copying
664  * is involved.
665  *
666  */
667 ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
668                                 size_t len, unsigned int flags)
669 {
670         return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
671 }
672
673 EXPORT_SYMBOL(generic_splice_sendpage);
674
675 /*
676  * Attempt to initiate a splice from pipe to file.
677  */
678 static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
679                            unsigned int flags)
680 {
681         loff_t pos;
682         int ret;
683
684         if (!out->f_op || !out->f_op->splice_write)
685                 return -EINVAL;
686
687         if (!(out->f_mode & FMODE_WRITE))
688                 return -EBADF;
689
690         pos = out->f_pos;
691         ret = rw_verify_area(WRITE, out, &pos, len);
692         if (unlikely(ret < 0))
693                 return ret;
694
695         return out->f_op->splice_write(pipe, out, len, flags);
696 }
697
698 /*
699  * Attempt to initiate a splice from a file to a pipe.
700  */
701 static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
702                          unsigned int flags)
703 {
704         loff_t pos, isize, left;
705         int ret;
706
707         if (!in->f_op || !in->f_op->splice_read)
708                 return -EINVAL;
709
710         if (!(in->f_mode & FMODE_READ))
711                 return -EBADF;
712
713         pos = in->f_pos;
714         ret = rw_verify_area(READ, in, &pos, len);
715         if (unlikely(ret < 0))
716                 return ret;
717
718         isize = i_size_read(in->f_mapping->host);
719         if (unlikely(in->f_pos >= isize))
720                 return 0;
721         
722         left = isize - in->f_pos;
723         if (left < len)
724                 len = left;
725
726         return in->f_op->splice_read(in, pipe, len, flags);
727 }
728
729 /*
730  * Determine where to splice to/from.
731  */
732 static long do_splice(struct file *in, struct file *out, size_t len,
733                       unsigned int flags)
734 {
735         struct inode *pipe;
736
737         pipe = in->f_dentry->d_inode;
738         if (pipe->i_pipe)
739                 return do_splice_from(pipe, out, len, flags);
740
741         pipe = out->f_dentry->d_inode;
742         if (pipe->i_pipe)
743                 return do_splice_to(in, pipe, len, flags);
744
745         return -EINVAL;
746 }
747
748 asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
749 {
750         long error;
751         struct file *in, *out;
752         int fput_in, fput_out;
753
754         if (unlikely(!len))
755                 return 0;
756
757         error = -EBADF;
758         in = fget_light(fdin, &fput_in);
759         if (in) {
760                 if (in->f_mode & FMODE_READ) {
761                         out = fget_light(fdout, &fput_out);
762                         if (out) {
763                                 if (out->f_mode & FMODE_WRITE)
764                                         error = do_splice(in, out, len, flags);
765                                 fput_light(out, fput_out);
766                         }
767                 }
768
769                 fput_light(in, fput_in);
770         }
771
772         return error;
773 }