[PATCH] introduce a "kernel-internal pipe object" abstraction
[safe/jmp/linux-2.6] / fs / splice.c
1 /*
2  * "splice": joining two ropes together by interweaving their strands.
3  *
4  * This is the "extended pipe" functionality, where a pipe is used as
5  * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6  * buffer that you can use to transfer data from one end to the other.
7  *
8  * The traditional unix read/write is extended with a "splice()" operation
9  * that transfers data buffers to or from a pipe buffer.
10  *
11  * Named by Larry McVoy, original implementation from Linus, extended by
12  * Jens to support splicing to files and fixing the initial implementation
13  * bugs.
14  *
15  * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16  * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
17  *
18  */
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
29
30 /*
31  * Passed to the actors
32  */
33 struct splice_desc {
34         unsigned int len, total_len;    /* current and remaining length */
35         unsigned int flags;             /* splice flags */
36         struct file *file;              /* file to read/write */
37         loff_t pos;                     /* file position */
38 };
39
40 /*
41  * Attempt to steal a page from a pipe buffer. This should perhaps go into
42  * a vm helper function, it's already simplified quite a bit by the
43  * addition of remove_mapping(). If success is returned, the caller may
44  * attempt to reuse this page for another destination.
45  */
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47                                      struct pipe_buffer *buf)
48 {
49         struct page *page = buf->page;
50         struct address_space *mapping = page_mapping(page);
51
52         WARN_ON(!PageLocked(page));
53         WARN_ON(!PageUptodate(page));
54
55         /*
56          * At least for ext2 with nobh option, we need to wait on writeback
57          * completing on this page, since we'll remove it from the pagecache.
58          * Otherwise truncate wont wait on the page, allowing the disk
59          * blocks to be reused by someone else before we actually wrote our
60          * data to them. fs corruption ensues.
61          */
62         wait_on_page_writeback(page);
63
64         if (PagePrivate(page))
65                 try_to_release_page(page, mapping_gfp_mask(mapping));
66
67         if (!remove_mapping(mapping, page))
68                 return 1;
69
70         buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
71         return 0;
72 }
73
74 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
75                                         struct pipe_buffer *buf)
76 {
77         page_cache_release(buf->page);
78         buf->page = NULL;
79         buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
80 }
81
82 static void *page_cache_pipe_buf_map(struct file *file,
83                                      struct pipe_inode_info *info,
84                                      struct pipe_buffer *buf)
85 {
86         struct page *page = buf->page;
87         int err;
88
89         if (!PageUptodate(page)) {
90                 lock_page(page);
91
92                 /*
93                  * Page got truncated/unhashed. This will cause a 0-byte
94                  * splice, if this is the first page
95                  */
96                 if (!page->mapping) {
97                         err = -ENODATA;
98                         goto error;
99                 }
100
101                 /*
102                  * uh oh, read-error from disk
103                  */
104                 if (!PageUptodate(page)) {
105                         err = -EIO;
106                         goto error;
107                 }
108
109                 /*
110                  * page is ok afterall, fall through to mapping
111                  */
112                 unlock_page(page);
113         }
114
115         return kmap(page);
116 error:
117         unlock_page(page);
118         return ERR_PTR(err);
119 }
120
121 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
122                                       struct pipe_buffer *buf)
123 {
124         kunmap(buf->page);
125 }
126
127 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
128         .can_merge = 0,
129         .map = page_cache_pipe_buf_map,
130         .unmap = page_cache_pipe_buf_unmap,
131         .release = page_cache_pipe_buf_release,
132         .steal = page_cache_pipe_buf_steal,
133 };
134
135 /*
136  * Pipe output worker. This sets up our pipe format with the page cache
137  * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
138  */
139 static ssize_t move_to_pipe(struct pipe_inode_info *pipe, struct page **pages,
140                             int nr_pages, unsigned long offset,
141                             unsigned long len, unsigned int flags)
142 {
143         int ret, do_wakeup, i;
144
145         ret = 0;
146         do_wakeup = 0;
147         i = 0;
148
149         if (pipe->inode)
150                 mutex_lock(&pipe->inode->i_mutex);
151
152         for (;;) {
153                 int bufs;
154
155                 if (!pipe->readers) {
156                         send_sig(SIGPIPE, current, 0);
157                         if (!ret)
158                                 ret = -EPIPE;
159                         break;
160                 }
161
162                 bufs = pipe->nrbufs;
163                 if (bufs < PIPE_BUFFERS) {
164                         int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS - 1);
165                         struct pipe_buffer *buf = pipe->bufs + newbuf;
166                         struct page *page = pages[i++];
167                         unsigned long this_len;
168
169                         this_len = PAGE_CACHE_SIZE - offset;
170                         if (this_len > len)
171                                 this_len = len;
172
173                         buf->page = page;
174                         buf->offset = offset;
175                         buf->len = this_len;
176                         buf->ops = &page_cache_pipe_buf_ops;
177                         pipe->nrbufs = ++bufs;
178                         do_wakeup = 1;
179
180                         ret += this_len;
181                         len -= this_len;
182                         offset = 0;
183                         if (!--nr_pages)
184                                 break;
185                         if (!len)
186                                 break;
187                         if (bufs < PIPE_BUFFERS)
188                                 continue;
189
190                         break;
191                 }
192
193                 if (flags & SPLICE_F_NONBLOCK) {
194                         if (!ret)
195                                 ret = -EAGAIN;
196                         break;
197                 }
198
199                 if (signal_pending(current)) {
200                         if (!ret)
201                                 ret = -ERESTARTSYS;
202                         break;
203                 }
204
205                 if (do_wakeup) {
206                         smp_mb();
207                         if (waitqueue_active(&pipe->wait))
208                                 wake_up_interruptible_sync(&pipe->wait);
209                         kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
210                         do_wakeup = 0;
211                 }
212
213                 pipe->waiting_writers++;
214                 pipe_wait(pipe);
215                 pipe->waiting_writers--;
216         }
217
218         if (pipe->inode)
219                 mutex_unlock(&pipe->inode->i_mutex);
220
221         if (do_wakeup) {
222                 smp_mb();
223                 if (waitqueue_active(&pipe->wait))
224                         wake_up_interruptible(&pipe->wait);
225                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
226         }
227
228         while (i < nr_pages)
229                 page_cache_release(pages[i++]);
230
231         return ret;
232 }
233
234 static int
235 __generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
236                            size_t len, unsigned int flags)
237 {
238         struct address_space *mapping = in->f_mapping;
239         unsigned int offset, nr_pages;
240         struct page *pages[PIPE_BUFFERS];
241         struct page *page;
242         pgoff_t index;
243         int i;
244
245         index = in->f_pos >> PAGE_CACHE_SHIFT;
246         offset = in->f_pos & ~PAGE_CACHE_MASK;
247         nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
248
249         if (nr_pages > PIPE_BUFFERS)
250                 nr_pages = PIPE_BUFFERS;
251
252         /*
253          * initiate read-ahead on this page range. however, don't call into
254          * read-ahead if this is a non-zero offset (we are likely doing small
255          * chunk splice and the page is already there) for a single page.
256          */
257         if (!offset || nr_pages > 1)
258                 do_page_cache_readahead(mapping, in, index, nr_pages);
259
260         /*
261          * now fill in the holes
262          */
263         for (i = 0; i < nr_pages; i++, index++) {
264                 /*
265                  * no page there, look one up / create it
266                  */
267                 page = find_or_create_page(mapping, index,
268                                                    mapping_gfp_mask(mapping));
269                 if (!page)
270                         break;
271
272                 if (PageUptodate(page))
273                         unlock_page(page);
274                 else {
275                         int error = mapping->a_ops->readpage(in, page);
276
277                         if (unlikely(error)) {
278                                 page_cache_release(page);
279                                 break;
280                         }
281                 }
282                 pages[i] = page;
283         }
284
285         if (i)
286                 return move_to_pipe(pipe, pages, i, offset, len, flags);
287
288         return 0;
289 }
290
291 /**
292  * generic_file_splice_read - splice data from file to a pipe
293  * @in:         file to splice from
294  * @pipe:       pipe to splice to
295  * @len:        number of bytes to splice
296  * @flags:      splice modifier flags
297  *
298  * Will read pages from given file and fill them into a pipe.
299  *
300  */
301 ssize_t generic_file_splice_read(struct file *in, struct pipe_inode_info *pipe,
302                                  size_t len, unsigned int flags)
303 {
304         ssize_t spliced;
305         int ret;
306
307         ret = 0;
308         spliced = 0;
309
310         while (len) {
311                 ret = __generic_file_splice_read(in, pipe, len, flags);
312
313                 if (ret <= 0)
314                         break;
315
316                 in->f_pos += ret;
317                 len -= ret;
318                 spliced += ret;
319
320                 if (!(flags & SPLICE_F_NONBLOCK))
321                         continue;
322                 ret = -EAGAIN;
323                 break;
324         }
325
326         if (spliced)
327                 return spliced;
328
329         return ret;
330 }
331
332 EXPORT_SYMBOL(generic_file_splice_read);
333
334 /*
335  * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
336  * using sendpage().
337  */
338 static int pipe_to_sendpage(struct pipe_inode_info *info,
339                             struct pipe_buffer *buf, struct splice_desc *sd)
340 {
341         struct file *file = sd->file;
342         loff_t pos = sd->pos;
343         unsigned int offset;
344         ssize_t ret;
345         void *ptr;
346         int more;
347
348         /*
349          * sub-optimal, but we are limited by the pipe ->map. we don't
350          * need a kmap'ed buffer here, we just want to make sure we
351          * have the page pinned if the pipe page originates from the
352          * page cache
353          */
354         ptr = buf->ops->map(file, info, buf);
355         if (IS_ERR(ptr))
356                 return PTR_ERR(ptr);
357
358         offset = pos & ~PAGE_CACHE_MASK;
359         more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
360
361         ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
362
363         buf->ops->unmap(info, buf);
364         if (ret == sd->len)
365                 return 0;
366
367         return -EIO;
368 }
369
370 /*
371  * This is a little more tricky than the file -> pipe splicing. There are
372  * basically three cases:
373  *
374  *      - Destination page already exists in the address space and there
375  *        are users of it. For that case we have no other option that
376  *        copying the data. Tough luck.
377  *      - Destination page already exists in the address space, but there
378  *        are no users of it. Make sure it's uptodate, then drop it. Fall
379  *        through to last case.
380  *      - Destination page does not exist, we can add the pipe page to
381  *        the page cache and avoid the copy.
382  *
383  * If asked to move pages to the output file (SPLICE_F_MOVE is set in
384  * sd->flags), we attempt to migrate pages from the pipe to the output
385  * file address space page cache. This is possible if no one else has
386  * the pipe page referenced outside of the pipe and page cache. If
387  * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
388  * a new page in the output file page cache and fill/dirty that.
389  */
390 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
391                         struct splice_desc *sd)
392 {
393         struct file *file = sd->file;
394         struct address_space *mapping = file->f_mapping;
395         gfp_t gfp_mask = mapping_gfp_mask(mapping);
396         unsigned int offset;
397         struct page *page;
398         pgoff_t index;
399         char *src;
400         int ret;
401
402         /*
403          * make sure the data in this buffer is uptodate
404          */
405         src = buf->ops->map(file, info, buf);
406         if (IS_ERR(src))
407                 return PTR_ERR(src);
408
409         index = sd->pos >> PAGE_CACHE_SHIFT;
410         offset = sd->pos & ~PAGE_CACHE_MASK;
411
412         /*
413          * reuse buf page, if SPLICE_F_MOVE is set
414          */
415         if (sd->flags & SPLICE_F_MOVE) {
416                 /*
417                  * If steal succeeds, buf->page is now pruned from the vm
418                  * side (LRU and page cache) and we can reuse it.
419                  */
420                 if (buf->ops->steal(info, buf))
421                         goto find_page;
422
423                 /*
424                  * this will also set the page locked
425                  */
426                 page = buf->page;
427                 if (add_to_page_cache(page, mapping, index, gfp_mask))
428                         goto find_page;
429
430                 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
431                         lru_cache_add(page);
432         } else {
433 find_page:
434                 ret = -ENOMEM;
435                 page = find_or_create_page(mapping, index, gfp_mask);
436                 if (!page)
437                         goto out_nomem;
438
439                 /*
440                  * If the page is uptodate, it is also locked. If it isn't
441                  * uptodate, we can mark it uptodate if we are filling the
442                  * full page. Otherwise we need to read it in first...
443                  */
444                 if (!PageUptodate(page)) {
445                         if (sd->len < PAGE_CACHE_SIZE) {
446                                 ret = mapping->a_ops->readpage(file, page);
447                                 if (unlikely(ret))
448                                         goto out;
449
450                                 lock_page(page);
451
452                                 if (!PageUptodate(page)) {
453                                         /*
454                                          * page got invalidated, repeat
455                                          */
456                                         if (!page->mapping) {
457                                                 unlock_page(page);
458                                                 page_cache_release(page);
459                                                 goto find_page;
460                                         }
461                                         ret = -EIO;
462                                         goto out;
463                                 }
464                         } else {
465                                 WARN_ON(!PageLocked(page));
466                                 SetPageUptodate(page);
467                         }
468                 }
469         }
470
471         ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
472         if (ret == AOP_TRUNCATED_PAGE) {
473                 page_cache_release(page);
474                 goto find_page;
475         } else if (ret)
476                 goto out;
477
478         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
479                 char *dst = kmap_atomic(page, KM_USER0);
480
481                 memcpy(dst + offset, src + buf->offset, sd->len);
482                 flush_dcache_page(page);
483                 kunmap_atomic(dst, KM_USER0);
484         }
485
486         ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
487         if (ret == AOP_TRUNCATED_PAGE) {
488                 page_cache_release(page);
489                 goto find_page;
490         } else if (ret)
491                 goto out;
492
493         mark_page_accessed(page);
494         balance_dirty_pages_ratelimited(mapping);
495 out:
496         if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
497                 page_cache_release(page);
498                 unlock_page(page);
499         }
500 out_nomem:
501         buf->ops->unmap(info, buf);
502         return ret;
503 }
504
505 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
506                            struct splice_desc *);
507
508 /*
509  * Pipe input worker. Most of this logic works like a regular pipe, the
510  * key here is the 'actor' worker passed in that actually moves the data
511  * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
512  */
513 static ssize_t move_from_pipe(struct pipe_inode_info *pipe, struct file *out,
514                               size_t len, unsigned int flags,
515                               splice_actor *actor)
516 {
517         int ret, do_wakeup, err;
518         struct splice_desc sd;
519
520         ret = 0;
521         do_wakeup = 0;
522
523         sd.total_len = len;
524         sd.flags = flags;
525         sd.file = out;
526         sd.pos = out->f_pos;
527
528         if (pipe->inode)
529                 mutex_lock(&pipe->inode->i_mutex);
530
531         for (;;) {
532                 int bufs = pipe->nrbufs;
533
534                 if (bufs) {
535                         int curbuf = pipe->curbuf;
536                         struct pipe_buffer *buf = pipe->bufs + curbuf;
537                         struct pipe_buf_operations *ops = buf->ops;
538
539                         sd.len = buf->len;
540                         if (sd.len > sd.total_len)
541                                 sd.len = sd.total_len;
542
543                         err = actor(pipe, buf, &sd);
544                         if (err) {
545                                 if (!ret && err != -ENODATA)
546                                         ret = err;
547
548                                 break;
549                         }
550
551                         ret += sd.len;
552                         buf->offset += sd.len;
553                         buf->len -= sd.len;
554                         if (!buf->len) {
555                                 buf->ops = NULL;
556                                 ops->release(pipe, buf);
557                                 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
558                                 pipe->curbuf = curbuf;
559                                 pipe->nrbufs = --bufs;
560                                 do_wakeup = 1;
561                         }
562
563                         sd.pos += sd.len;
564                         sd.total_len -= sd.len;
565                         if (!sd.total_len)
566                                 break;
567                 }
568
569                 if (bufs)
570                         continue;
571                 if (!pipe->writers)
572                         break;
573                 if (!pipe->waiting_writers) {
574                         if (ret)
575                                 break;
576                 }
577
578                 if (flags & SPLICE_F_NONBLOCK) {
579                         if (!ret)
580                                 ret = -EAGAIN;
581                         break;
582                 }
583
584                 if (signal_pending(current)) {
585                         if (!ret)
586                                 ret = -ERESTARTSYS;
587                         break;
588                 }
589
590                 if (do_wakeup) {
591                         smp_mb();
592                         if (waitqueue_active(&pipe->wait))
593                                 wake_up_interruptible_sync(&pipe->wait);
594                         kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
595                         do_wakeup = 0;
596                 }
597
598                 pipe_wait(pipe);
599         }
600
601         if (pipe->inode)
602                 mutex_unlock(&pipe->inode->i_mutex);
603
604         if (do_wakeup) {
605                 smp_mb();
606                 if (waitqueue_active(&pipe->wait))
607                         wake_up_interruptible(&pipe->wait);
608                 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
609         }
610
611         mutex_lock(&out->f_mapping->host->i_mutex);
612         out->f_pos = sd.pos;
613         mutex_unlock(&out->f_mapping->host->i_mutex);
614         return ret;
615
616 }
617
618 /**
619  * generic_file_splice_write - splice data from a pipe to a file
620  * @pipe:       pipe info
621  * @out:        file to write to
622  * @len:        number of bytes to splice
623  * @flags:      splice modifier flags
624  *
625  * Will either move or copy pages (determined by @flags options) from
626  * the given pipe inode to the given file.
627  *
628  */
629 ssize_t
630 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
631                           size_t len, unsigned int flags)
632 {
633         struct address_space *mapping = out->f_mapping;
634         ssize_t ret;
635
636         ret = move_from_pipe(pipe, out, len, flags, pipe_to_file);
637
638         /*
639          * if file or inode is SYNC and we actually wrote some data, sync it
640          */
641         if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
642             && ret > 0) {
643                 struct inode *inode = mapping->host;
644                 int err;
645
646                 mutex_lock(&inode->i_mutex);
647                 err = generic_osync_inode(mapping->host, mapping,
648                                                 OSYNC_METADATA|OSYNC_DATA);
649                 mutex_unlock(&inode->i_mutex);
650
651                 if (err)
652                         ret = err;
653         }
654
655         return ret;
656 }
657
658 EXPORT_SYMBOL(generic_file_splice_write);
659
660 /**
661  * generic_splice_sendpage - splice data from a pipe to a socket
662  * @inode:      pipe inode
663  * @out:        socket to write to
664  * @len:        number of bytes to splice
665  * @flags:      splice modifier flags
666  *
667  * Will send @len bytes from the pipe to a network socket. No data copying
668  * is involved.
669  *
670  */
671 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
672                                 size_t len, unsigned int flags)
673 {
674         return move_from_pipe(pipe, out, len, flags, pipe_to_sendpage);
675 }
676
677 EXPORT_SYMBOL(generic_splice_sendpage);
678
679 /*
680  * Attempt to initiate a splice from pipe to file.
681  */
682 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
683                            size_t len, unsigned int flags)
684 {
685         loff_t pos;
686         int ret;
687
688         if (!out->f_op || !out->f_op->splice_write)
689                 return -EINVAL;
690
691         if (!(out->f_mode & FMODE_WRITE))
692                 return -EBADF;
693
694         pos = out->f_pos;
695         ret = rw_verify_area(WRITE, out, &pos, len);
696         if (unlikely(ret < 0))
697                 return ret;
698
699         return out->f_op->splice_write(pipe, out, len, flags);
700 }
701
702 /*
703  * Attempt to initiate a splice from a file to a pipe.
704  */
705 static long do_splice_to(struct file *in, struct pipe_inode_info *pipe,
706                          size_t len, unsigned int flags)
707 {
708         loff_t pos, isize, left;
709         int ret;
710
711         if (!in->f_op || !in->f_op->splice_read)
712                 return -EINVAL;
713
714         if (!(in->f_mode & FMODE_READ))
715                 return -EBADF;
716
717         pos = in->f_pos;
718         ret = rw_verify_area(READ, in, &pos, len);
719         if (unlikely(ret < 0))
720                 return ret;
721
722         isize = i_size_read(in->f_mapping->host);
723         if (unlikely(in->f_pos >= isize))
724                 return 0;
725         
726         left = isize - in->f_pos;
727         if (left < len)
728                 len = left;
729
730         return in->f_op->splice_read(in, pipe, len, flags);
731 }
732
733 /*
734  * Determine where to splice to/from.
735  */
736 static long do_splice(struct file *in, struct file *out, size_t len,
737                       unsigned int flags)
738 {
739         struct pipe_inode_info *pipe;
740
741         pipe = in->f_dentry->d_inode->i_pipe;
742         if (pipe)
743                 return do_splice_from(pipe, out, len, flags);
744
745         pipe = out->f_dentry->d_inode->i_pipe;
746         if (pipe)
747                 return do_splice_to(in, pipe, len, flags);
748
749         return -EINVAL;
750 }
751
752 asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
753 {
754         long error;
755         struct file *in, *out;
756         int fput_in, fput_out;
757
758         if (unlikely(!len))
759                 return 0;
760
761         error = -EBADF;
762         in = fget_light(fdin, &fput_in);
763         if (in) {
764                 if (in->f_mode & FMODE_READ) {
765                         out = fget_light(fdout, &fput_out);
766                         if (out) {
767                                 if (out->f_mode & FMODE_WRITE)
768                                         error = do_splice(in, out, len, flags);
769                                 fput_light(out, fput_out);
770                         }
771                 }
772
773                 fput_light(in, fput_in);
774         }
775
776         return error;
777 }