7c2abd4504d702fea0fe514abe6bcbcfdbf0adbf
[safe/jmp/linux-2.6] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files and fixing the initial implementation
13  * bugs.
14  *
15  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17  *
18  */
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
29
30 /*
31  * Passed to the actors
32  */
33 struct splice_desc {
34         unsigned int len, total_len;    /* current and remaining length */
35         unsigned int flags;             /* splice flags */
36         struct file *file;              /* file to read/write */
37         loff_t pos;                     /* file position */
38 };
39
40 /*
41  * Attempt to steal a page from a pipe buffer. This should perhaps go into
42  * a vm helper function, it's already simplified quite a bit by the
43  * addition of remove_mapping(). If success is returned, the caller may
44  * attempt to reuse this page for another destination.
45  */
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47                                      struct pipe_buffer *buf)
48 {
49         struct page *page = buf->page;
50         struct address_space *mapping = page_mapping(page);
51
52         WARN_ON(!PageLocked(page));
53         WARN_ON(!PageUptodate(page));
54
55         if (PagePrivate(page))
56                 try_to_release_page(page, mapping_gfp_mask(mapping));
57
58         if (!remove_mapping(mapping, page))
59                 return 1;
60
61         if (PageLRU(page)) {
62                 struct zone *zone = page_zone(page);
63
64                 spin_lock_irq(&zone->lru_lock);
65                 BUG_ON(!PageLRU(page));
66                 __ClearPageLRU(page);
67                 del_page_from_lru(zone, page);
68                 spin_unlock_irq(&zone->lru_lock);
69         }
70
71         return 0;
72 }
73
74 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75                                         struct pipe_buffer *buf)
76 {
77         page_cache_release(buf->page);
78         buf->page = NULL;
79 }
80
81 static void *page_cache_pipe_buf_map(struct file *file,
82                                      struct pipe_inode_info *info,
83                                      struct pipe_buffer *buf)
84 {
85         struct page *page = buf->page;
86
87         lock_page(page);
88
89         if (!PageUptodate(page)) {
90                 unlock_page(page);
91                 return ERR_PTR(-EIO);
92         }
93
94         if (!page->mapping) {
95                 unlock_page(page);
96                 return ERR_PTR(-ENODATA);
97         }
98
99         return kmap(buf->page);
100 }
101
102 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
103                                       struct pipe_buffer *buf)
104 {
105         unlock_page(buf->page);
106         kunmap(buf->page);
107 }
108
109 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
110         .can_merge = 0,
111         .map = page_cache_pipe_buf_map,
112         .unmap = page_cache_pipe_buf_unmap,
113         .release = page_cache_pipe_buf_release,
114         .steal = page_cache_pipe_buf_steal,
115 };
116
117 /*
118  * Pipe output worker. This sets up our pipe format with the page cache
119  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
120  */
121 static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
122                             int nr_pages, unsigned long offset,
123                             unsigned long len, unsigned int flags)
124 {
125         struct pipe_inode_info *info;
126         int ret, do_wakeup, i;
127
128         ret = 0;
129         do_wakeup = 0;
130         i = 0;
131
132         mutex_lock(PIPE_MUTEX(*inode));
133
134         info = inode->i_pipe;
135         for (;;) {
136                 int bufs;
137
138                 if (!PIPE_READERS(*inode)) {
139                         send_sig(SIGPIPE, current, 0);
140                         if (!ret)
141                                 ret = -EPIPE;
142                         break;
143                 }
144
145                 bufs = info->nrbufs;
146                 if (bufs < PIPE_BUFFERS) {
147                         int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
148                         struct pipe_buffer *buf = info->bufs + newbuf;
149                         struct page *page = pages[i++];
150                         unsigned long this_len;
151
152                         this_len = PAGE_CACHE_SIZE - offset;
153                         if (this_len > len)
154                                 this_len = len;
155
156                         buf->page = page;
157                         buf->offset = offset;
158                         buf->len = this_len;
159                         buf->ops = &page_cache_pipe_buf_ops;
160                         info->nrbufs = ++bufs;
161                         do_wakeup = 1;
162
163                         ret += this_len;
164                         len -= this_len;
165                         offset = 0;
166                         if (!--nr_pages)
167                                 break;
168                         if (!len)
169                                 break;
170                         if (bufs < PIPE_BUFFERS)
171                                 continue;
172
173                         break;
174                 }
175
176                 if (flags & SPLICE_F_NONBLOCK) {
177                         if (!ret)
178                                 ret = -EAGAIN;
179                         break;
180                 }
181
182                 if (signal_pending(current)) {
183                         if (!ret)
184                                 ret = -ERESTARTSYS;
185                         break;
186                 }
187
188                 if (do_wakeup) {
189                         wake_up_interruptible_sync(PIPE_WAIT(*inode));
190                         kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
191                                     POLL_IN);
192                         do_wakeup = 0;
193                 }
194
195                 PIPE_WAITING_WRITERS(*inode)++;
196                 pipe_wait(inode);
197                 PIPE_WAITING_WRITERS(*inode)--;
198         }
199
200         mutex_unlock(PIPE_MUTEX(*inode));
201
202         if (do_wakeup) {
203                 wake_up_interruptible(PIPE_WAIT(*inode));
204                 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
205         }
206
207         while (i < nr_pages)
208                 page_cache_release(pages[i++]);
209
210         return ret;
211 }
212
213 static int __generic_file_splice_read(struct file *in, struct inode *pipe,
214                                       size_t len, unsigned int flags)
215 {
216         struct address_space *mapping = in->f_mapping;
217         unsigned int offset, nr_pages;
218         struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
219         struct page *page;
220         pgoff_t index, pidx;
221         int i, j;
222
223         index = in->f_pos >> PAGE_CACHE_SHIFT;
224         offset = in->f_pos & ~PAGE_CACHE_MASK;
225         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
226
227         if (nr_pages > PIPE_BUFFERS)
228                 nr_pages = PIPE_BUFFERS;
229
230         /*
231          * initiate read-ahead on this page range
232          */
233         do_page_cache_readahead(mapping, in, index, nr_pages);
234
235         /*
236          * Get as many pages from the page cache as possible..
237          * Start IO on the page cache entries we create (we
238          * can assume that any pre-existing ones we find have
239          * already had IO started on them).
240          */
241         i = find_get_pages(mapping, index, nr_pages, pages);
242
243         /*
244          * common case - we found all pages and they are contiguous,
245          * kick them off
246          */
247         if (i && (pages[i - 1]->index == index + i - 1))
248                 goto splice_them;
249
250         /*
251          * fill shadow[] with pages at the right locations, so we only
252          * have to fill holes
253          */
254         memset(shadow, 0, nr_pages * sizeof(struct page *));
255         for (j = 0; j < i; j++)
256                 shadow[pages[j]->index - index] = pages[j];
257
258         /*
259          * now fill in the holes
260          */
261         for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
262                 int error;
263
264                 if (shadow[i])
265                         continue;
266
267                 /*
268                  * no page there, look one up / create it
269                  */
270                 page = find_or_create_page(mapping, pidx,
271                                                    mapping_gfp_mask(mapping));
272                 if (!page)
273                         break;
274
275                 if (PageUptodate(page))
276                         unlock_page(page);
277                 else {
278                         error = mapping->a_ops->readpage(in, page);
279
280                         if (unlikely(error)) {
281                                 page_cache_release(page);
282                                 break;
283                         }
284                 }
285                 shadow[i] = page;
286         }
287
288         if (!i) {
289                 for (i = 0; i < nr_pages; i++) {
290                          if (shadow[i])
291                                 page_cache_release(shadow[i]);
292                 }
293                 return 0;
294         }
295
296         memcpy(pages, shadow, i * sizeof(struct page *));
297
298         /*
299          * Now we splice them into the pipe..
300          */
301 splice_them:
302         return move_to_pipe(pipe, pages, i, offset, len, flags);
303 }
304
305 /**
306  * generic_file_splice_read - splice data from file to a pipe
307  * @in:         file to splice from
308  * @pipe:       pipe to splice to
309  * @len:        number of bytes to splice
310  * @flags:      splice modifier flags
311  *
312  * Will read pages from given file and fill them into a pipe.
313  *
314  */
315 ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
316                                  size_t len, unsigned int flags)
317 {
318         ssize_t spliced;
319         int ret;
320
321         ret = 0;
322         spliced = 0;
323         while (len) {
324                 ret = __generic_file_splice_read(in, pipe, len, flags);
325
326                 if (ret <= 0)
327                         break;
328
329                 in->f_pos += ret;
330                 len -= ret;
331                 spliced += ret;
332
333                 if (!(flags & SPLICE_F_NONBLOCK))
334                         continue;
335                 ret = -EAGAIN;
336                 break;
337         }
338
339         if (spliced)
340                 return spliced;
341
342         return ret;
343 }
344
345 EXPORT_SYMBOL(generic_file_splice_read);
346
347 /*
348  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
349  * using sendpage().
350  */
351 static int pipe_to_sendpage(struct pipe_inode_info *info,
352                             struct pipe_buffer *buf, struct splice_desc *sd)
353 {
354         struct file *file = sd->file;
355         loff_t pos = sd->pos;
356         unsigned int offset;
357         ssize_t ret;
358         void *ptr;
359         int more;
360
361         /*
362          * sub-optimal, but we are limited by the pipe ->map. we don't
363          * need a kmap'ed buffer here, we just want to make sure we
364          * have the page pinned if the pipe page originates from the
365          * page cache
366          */
367         ptr = buf->ops->map(file, info, buf);
368         if (IS_ERR(ptr))
369                 return PTR_ERR(ptr);
370
371         offset = pos & ~PAGE_CACHE_MASK;
372         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
373
374         ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
375
376         buf->ops->unmap(info, buf);
377         if (ret == sd->len)
378                 return 0;
379
380         return -EIO;
381 }
382
383 /*
384  * This is a little more tricky than the file -> pipe splicing. There are
385  * basically three cases:
386  *
387  *      - Destination page already exists in the address space and there
388  *        are users of it. For that case we have no other option that
389  *        copying the data. Tough luck.
390  *      - Destination page already exists in the address space, but there
391  *        are no users of it. Make sure it's uptodate, then drop it. Fall
392  *        through to last case.
393  *      - Destination page does not exist, we can add the pipe page to
394  *        the page cache and avoid the copy.
395  *
396  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
397  * sd->flags), we attempt to migrate pages from the pipe to the output
398  * file address space page cache. This is possible if no one else has
399  * the pipe page referenced outside of the pipe and page cache. If
400  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
401  * a new page in the output file page cache and fill/dirty that.
402  */
403 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
404                         struct splice_desc *sd)
405 {
406         struct file *file = sd->file;
407         struct address_space *mapping = file->f_mapping;
408         unsigned int offset;
409         struct page *page;
410         pgoff_t index;
411         char *src;
412         int ret, stolen;
413
414         /*
415          * after this, page will be locked and unmapped
416          */
417         src = buf->ops->map(file, info, buf);
418         if (IS_ERR(src))
419                 return PTR_ERR(src);
420
421         index = sd->pos >> PAGE_CACHE_SHIFT;
422         offset = sd->pos & ~PAGE_CACHE_MASK;
423         stolen = 0;
424
425         /*
426          * reuse buf page, if SPLICE_F_MOVE is set
427          */
428         if (sd->flags & SPLICE_F_MOVE) {
429                 /*
430                  * If steal succeeds, buf->page is now pruned from the vm
431                  * side (LRU and page cache) and we can reuse it.
432                  */
433                 if (buf->ops->steal(info, buf))
434                         goto find_page;
435
436                 page = buf->page;
437                 stolen = 1;
438                 if (add_to_page_cache_lru(page, mapping, index,
439                                                 mapping_gfp_mask(mapping)))
440                         goto find_page;
441         } else {
442 find_page:
443                 ret = -ENOMEM;
444                 page = find_or_create_page(mapping, index,
445                                                 mapping_gfp_mask(mapping));
446                 if (!page)
447                         goto out;
448
449                 /*
450                  * If the page is uptodate, it is also locked. If it isn't
451                  * uptodate, we can mark it uptodate if we are filling the
452                  * full page. Otherwise we need to read it in first...
453                  */
454                 if (!PageUptodate(page)) {
455                         if (sd->len < PAGE_CACHE_SIZE) {
456                                 ret = mapping->a_ops->readpage(file, page);
457                                 if (unlikely(ret))
458                                         goto out;
459
460                                 lock_page(page);
461
462                                 if (!PageUptodate(page)) {
463                                         /*
464                                          * page got invalidated, repeat
465                                          */
466                                         if (!page->mapping) {
467                                                 unlock_page(page);
468                                                 page_cache_release(page);
469                                                 goto find_page;
470                                         }
471                                         ret = -EIO;
472                                         goto out;
473                                 }
474                         } else {
475                                 WARN_ON(!PageLocked(page));
476                                 SetPageUptodate(page);
477                         }
478                 }
479         }
480
481         ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
482         if (ret == AOP_TRUNCATED_PAGE) {
483                 page_cache_release(page);
484                 goto find_page;
485         } else if (ret)
486                 goto out;
487
488         if (!stolen) {
489                 char *dst = kmap_atomic(page, KM_USER0);
490
491                 memcpy(dst + offset, src + buf->offset, sd->len);
492                 flush_dcache_page(page);
493                 kunmap_atomic(dst, KM_USER0);
494         }
495
496         ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
497         if (ret == AOP_TRUNCATED_PAGE) {
498                 page_cache_release(page);
499                 goto find_page;
500         } else if (ret)
501                 goto out;
502
503         balance_dirty_pages_ratelimited(mapping);
504 out:
505         if (!stolen) {
506                 page_cache_release(page);
507                 unlock_page(page);
508         }
509         buf->ops->unmap(info, buf);
510         return ret;
511 }
512
513 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
514                            struct splice_desc *);
515
516 /*
517  * Pipe input worker. Most of this logic works like a regular pipe, the
518  * key here is the 'actor' worker passed in that actually moves the data
519  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
520  */
521 static ssize_t move_from_pipe(struct inode *inode, struct file *out,
522                               size_t len, unsigned int flags,
523                               splice_actor *actor)
524 {
525         struct pipe_inode_info *info;
526         int ret, do_wakeup, err;
527         struct splice_desc sd;
528
529         ret = 0;
530         do_wakeup = 0;
531
532         sd.total_len = len;
533         sd.flags = flags;
534         sd.file = out;
535         sd.pos = out->f_pos;
536
537         mutex_lock(PIPE_MUTEX(*inode));
538
539         info = inode->i_pipe;
540         for (;;) {
541                 int bufs = info->nrbufs;
542
543                 if (bufs) {
544                         int curbuf = info->curbuf;
545                         struct pipe_buffer *buf = info->bufs + curbuf;
546                         struct pipe_buf_operations *ops = buf->ops;
547
548                         sd.len = buf->len;
549                         if (sd.len > sd.total_len)
550                                 sd.len = sd.total_len;
551
552                         err = actor(info, buf, &sd);
553                         if (err) {
554                                 if (!ret && err != -ENODATA)
555                                         ret = err;
556
557                                 break;
558                         }
559
560                         ret += sd.len;
561                         buf->offset += sd.len;
562                         buf->len -= sd.len;
563                         if (!buf->len) {
564                                 buf->ops = NULL;
565                                 ops->release(info, buf);
566                                 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
567                                 info->curbuf = curbuf;
568                                 info->nrbufs = --bufs;
569                                 do_wakeup = 1;
570                         }
571
572                         sd.pos += sd.len;
573                         sd.total_len -= sd.len;
574                         if (!sd.total_len)
575                                 break;
576                 }
577
578                 if (bufs)
579                         continue;
580                 if (!PIPE_WRITERS(*inode))
581                         break;
582                 if (!PIPE_WAITING_WRITERS(*inode)) {
583                         if (ret)
584                                 break;
585                 }
586
587                 if (flags & SPLICE_F_NONBLOCK) {
588                         if (!ret)
589                                 ret = -EAGAIN;
590                         break;
591                 }
592
593                 if (signal_pending(current)) {
594                         if (!ret)
595                                 ret = -ERESTARTSYS;
596                         break;
597                 }
598
599                 if (do_wakeup) {
600                         wake_up_interruptible_sync(PIPE_WAIT(*inode));
601                         kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
602                         do_wakeup = 0;
603                 }
604
605                 pipe_wait(inode);
606         }
607
608         mutex_unlock(PIPE_MUTEX(*inode));
609
610         if (do_wakeup) {
611                 wake_up_interruptible(PIPE_WAIT(*inode));
612                 kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
613         }
614
615         mutex_lock(&out->f_mapping->host->i_mutex);
616         out->f_pos = sd.pos;
617         mutex_unlock(&out->f_mapping->host->i_mutex);
618         return ret;
619
620 }
621
622 /**
623  * generic_file_splice_write - splice data from a pipe to a file
624  * @inode:      pipe inode
625  * @out:        file to write to
626  * @len:        number of bytes to splice
627  * @flags:      splice modifier flags
628  *
629  * Will either move or copy pages (determined by @flags options) from
630  * the given pipe inode to the given file.
631  *
632  */
633 ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
634                                   size_t len, unsigned int flags)
635 {
636         struct address_space *mapping = out->f_mapping;
637         ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
638
639         /*
640          * if file or inode is SYNC and we actually wrote some data, sync it
641          */
642         if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
643             && ret > 0) {
644                 struct inode *inode = mapping->host;
645                 int err;
646
647                 mutex_lock(&inode->i_mutex);
648                 err = generic_osync_inode(mapping->host, mapping,
649                                                 OSYNC_METADATA|OSYNC_DATA);
650                 mutex_unlock(&inode->i_mutex);
651
652                 if (err)
653                         ret = err;
654         }
655
656         return ret;
657 }
658
659 EXPORT_SYMBOL(generic_file_splice_write);
660
661 /**
662  * generic_splice_sendpage - splice data from a pipe to a socket
663  * @inode:      pipe inode
664  * @out:        socket to write to
665  * @len:        number of bytes to splice
666  * @flags:      splice modifier flags
667  *
668  * Will send @len bytes from the pipe to a network socket. No data copying
669  * is involved.
670  *
671  */
672 ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
673                                 size_t len, unsigned int flags)
674 {
675         return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
676 }
677
678 EXPORT_SYMBOL(generic_splice_sendpage);
679
680 /*
681  * Attempt to initiate a splice from pipe to file.
682  */
683 static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
684                            unsigned int flags)
685 {
686         loff_t pos;
687         int ret;
688
689         if (!out->f_op || !out->f_op->splice_write)
690                 return -EINVAL;
691
692         if (!(out->f_mode & FMODE_WRITE))
693                 return -EBADF;
694
695         pos = out->f_pos;
696         ret = rw_verify_area(WRITE, out, &pos, len);
697         if (unlikely(ret < 0))
698                 return ret;
699
700         return out->f_op->splice_write(pipe, out, len, flags);
701 }
702
703 /*
704  * Attempt to initiate a splice from a file to a pipe.
705  */
706 static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
707                          unsigned int flags)
708 {
709         loff_t pos, isize, left;
710         int ret;
711
712         if (!in->f_op || !in->f_op->splice_read)
713                 return -EINVAL;
714
715         if (!(in->f_mode & FMODE_READ))
716                 return -EBADF;
717
718         pos = in->f_pos;
719         ret = rw_verify_area(READ, in, &pos, len);
720         if (unlikely(ret < 0))
721                 return ret;
722
723         isize = i_size_read(in->f_mapping->host);
724         if (unlikely(in->f_pos >= isize))
725                 return 0;
726         
727         left = isize - in->f_pos;
728         if (left < len)
729                 len = left;
730
731         return in->f_op->splice_read(in, pipe, len, flags);
732 }
733
734 /*
735  * Determine where to splice to/from.
736  */
737 static long do_splice(struct file *in, struct file *out, size_t len,
738                       unsigned int flags)
739 {
740         struct inode *pipe;
741
742         pipe = in->f_dentry->d_inode;
743         if (pipe->i_pipe)
744                 return do_splice_from(pipe, out, len, flags);
745
746         pipe = out->f_dentry->d_inode;
747         if (pipe->i_pipe)
748                 return do_splice_to(in, pipe, len, flags);
749
750         return -EINVAL;
751 }
752
753 asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
754 {
755         long error;
756         struct file *in, *out;
757         int fput_in, fput_out;
758
759         if (unlikely(!len))
760                 return 0;
761
762         error = -EBADF;
763         in = fget_light(fdin, &fput_in);
764         if (in) {
765                 if (in->f_mode & FMODE_READ) {
766                         out = fget_light(fdout, &fput_out);
767                         if (out) {
768                                 if (out->f_mode & FMODE_WRITE)
769                                         error = do_splice(in, out, len, flags);
770                                 fput_light(out, fput_out);
771                         }
772                 }
773
774                 fput_light(in, fput_in);
775         }
776
777         return error;
778 }