fuse: add helper for asynchronous writes
[safe/jmp/linux-2.6] / fs / fuse / file.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15
16 static const struct file_operations fuse_direct_io_file_operations;
17
18 static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
19                           struct fuse_open_out *outargp)
20 {
21         struct fuse_conn *fc = get_fuse_conn(inode);
22         struct fuse_open_in inarg;
23         struct fuse_req *req;
24         int err;
25
26         req = fuse_get_req(fc);
27         if (IS_ERR(req))
28                 return PTR_ERR(req);
29
30         memset(&inarg, 0, sizeof(inarg));
31         inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32         if (!fc->atomic_o_trunc)
33                 inarg.flags &= ~O_TRUNC;
34         req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
35         req->in.h.nodeid = get_node_id(inode);
36         req->in.numargs = 1;
37         req->in.args[0].size = sizeof(inarg);
38         req->in.args[0].value = &inarg;
39         req->out.numargs = 1;
40         req->out.args[0].size = sizeof(*outargp);
41         req->out.args[0].value = outargp;
42         request_send(fc, req);
43         err = req->out.h.error;
44         fuse_put_request(fc, req);
45
46         return err;
47 }
48
49 struct fuse_file *fuse_file_alloc(void)
50 {
51         struct fuse_file *ff;
52         ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
53         if (ff) {
54                 ff->reserved_req = fuse_request_alloc();
55                 if (!ff->reserved_req) {
56                         kfree(ff);
57                         ff = NULL;
58                 }
59                 INIT_LIST_HEAD(&ff->write_entry);
60                 atomic_set(&ff->count, 0);
61         }
62         return ff;
63 }
64
65 void fuse_file_free(struct fuse_file *ff)
66 {
67         fuse_request_free(ff->reserved_req);
68         kfree(ff);
69 }
70
71 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
72 {
73         atomic_inc(&ff->count);
74         return ff;
75 }
76
77 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
78 {
79         dput(req->dentry);
80         mntput(req->vfsmount);
81         fuse_put_request(fc, req);
82 }
83
84 static void fuse_file_put(struct fuse_file *ff)
85 {
86         if (atomic_dec_and_test(&ff->count)) {
87                 struct fuse_req *req = ff->reserved_req;
88                 struct fuse_conn *fc = get_fuse_conn(req->dentry->d_inode);
89                 req->end = fuse_release_end;
90                 request_send_background(fc, req);
91                 kfree(ff);
92         }
93 }
94
95 void fuse_finish_open(struct inode *inode, struct file *file,
96                       struct fuse_file *ff, struct fuse_open_out *outarg)
97 {
98         if (outarg->open_flags & FOPEN_DIRECT_IO)
99                 file->f_op = &fuse_direct_io_file_operations;
100         if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
101                 invalidate_inode_pages2(inode->i_mapping);
102         ff->fh = outarg->fh;
103         file->private_data = fuse_file_get(ff);
104 }
105
106 int fuse_open_common(struct inode *inode, struct file *file, int isdir)
107 {
108         struct fuse_open_out outarg;
109         struct fuse_file *ff;
110         int err;
111
112         /* VFS checks this, but only _after_ ->open() */
113         if (file->f_flags & O_DIRECT)
114                 return -EINVAL;
115
116         err = generic_file_open(inode, file);
117         if (err)
118                 return err;
119
120         ff = fuse_file_alloc();
121         if (!ff)
122                 return -ENOMEM;
123
124         err = fuse_send_open(inode, file, isdir, &outarg);
125         if (err)
126                 fuse_file_free(ff);
127         else {
128                 if (isdir)
129                         outarg.open_flags &= ~FOPEN_DIRECT_IO;
130                 fuse_finish_open(inode, file, ff, &outarg);
131         }
132
133         return err;
134 }
135
136 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
137 {
138         struct fuse_req *req = ff->reserved_req;
139         struct fuse_release_in *inarg = &req->misc.release_in;
140
141         inarg->fh = ff->fh;
142         inarg->flags = flags;
143         req->in.h.opcode = opcode;
144         req->in.h.nodeid = nodeid;
145         req->in.numargs = 1;
146         req->in.args[0].size = sizeof(struct fuse_release_in);
147         req->in.args[0].value = inarg;
148 }
149
150 int fuse_release_common(struct inode *inode, struct file *file, int isdir)
151 {
152         struct fuse_file *ff = file->private_data;
153         if (ff) {
154                 struct fuse_conn *fc = get_fuse_conn(inode);
155
156                 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
157                                   isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
158
159                 /* Hold vfsmount and dentry until release is finished */
160                 ff->reserved_req->vfsmount = mntget(file->f_path.mnt);
161                 ff->reserved_req->dentry = dget(file->f_path.dentry);
162
163                 spin_lock(&fc->lock);
164                 list_del(&ff->write_entry);
165                 spin_unlock(&fc->lock);
166                 /*
167                  * Normally this will send the RELEASE request,
168                  * however if some asynchronous READ or WRITE requests
169                  * are outstanding, the sending will be delayed
170                  */
171                 fuse_file_put(ff);
172         }
173
174         /* Return value is ignored by VFS */
175         return 0;
176 }
177
178 static int fuse_open(struct inode *inode, struct file *file)
179 {
180         return fuse_open_common(inode, file, 0);
181 }
182
183 static int fuse_release(struct inode *inode, struct file *file)
184 {
185         return fuse_release_common(inode, file, 0);
186 }
187
188 /*
189  * Scramble the ID space with XTEA, so that the value of the files_struct
190  * pointer is not exposed to userspace.
191  */
192 static u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
193 {
194         u32 *k = fc->scramble_key;
195         u64 v = (unsigned long) id;
196         u32 v0 = v;
197         u32 v1 = v >> 32;
198         u32 sum = 0;
199         int i;
200
201         for (i = 0; i < 32; i++) {
202                 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
203                 sum += 0x9E3779B9;
204                 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
205         }
206
207         return (u64) v0 + ((u64) v1 << 32);
208 }
209
210 static int fuse_flush(struct file *file, fl_owner_t id)
211 {
212         struct inode *inode = file->f_path.dentry->d_inode;
213         struct fuse_conn *fc = get_fuse_conn(inode);
214         struct fuse_file *ff = file->private_data;
215         struct fuse_req *req;
216         struct fuse_flush_in inarg;
217         int err;
218
219         if (is_bad_inode(inode))
220                 return -EIO;
221
222         if (fc->no_flush)
223                 return 0;
224
225         req = fuse_get_req_nofail(fc, file);
226         memset(&inarg, 0, sizeof(inarg));
227         inarg.fh = ff->fh;
228         inarg.lock_owner = fuse_lock_owner_id(fc, id);
229         req->in.h.opcode = FUSE_FLUSH;
230         req->in.h.nodeid = get_node_id(inode);
231         req->in.numargs = 1;
232         req->in.args[0].size = sizeof(inarg);
233         req->in.args[0].value = &inarg;
234         req->force = 1;
235         request_send(fc, req);
236         err = req->out.h.error;
237         fuse_put_request(fc, req);
238         if (err == -ENOSYS) {
239                 fc->no_flush = 1;
240                 err = 0;
241         }
242         return err;
243 }
244
245 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
246                       int isdir)
247 {
248         struct inode *inode = de->d_inode;
249         struct fuse_conn *fc = get_fuse_conn(inode);
250         struct fuse_file *ff = file->private_data;
251         struct fuse_req *req;
252         struct fuse_fsync_in inarg;
253         int err;
254
255         if (is_bad_inode(inode))
256                 return -EIO;
257
258         if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
259                 return 0;
260
261         req = fuse_get_req(fc);
262         if (IS_ERR(req))
263                 return PTR_ERR(req);
264
265         memset(&inarg, 0, sizeof(inarg));
266         inarg.fh = ff->fh;
267         inarg.fsync_flags = datasync ? 1 : 0;
268         req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
269         req->in.h.nodeid = get_node_id(inode);
270         req->in.numargs = 1;
271         req->in.args[0].size = sizeof(inarg);
272         req->in.args[0].value = &inarg;
273         request_send(fc, req);
274         err = req->out.h.error;
275         fuse_put_request(fc, req);
276         if (err == -ENOSYS) {
277                 if (isdir)
278                         fc->no_fsyncdir = 1;
279                 else
280                         fc->no_fsync = 1;
281                 err = 0;
282         }
283         return err;
284 }
285
286 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
287 {
288         return fuse_fsync_common(file, de, datasync, 0);
289 }
290
291 void fuse_read_fill(struct fuse_req *req, struct fuse_file *ff,
292                     struct inode *inode, loff_t pos, size_t count, int opcode)
293 {
294         struct fuse_read_in *inarg = &req->misc.read_in;
295
296         inarg->fh = ff->fh;
297         inarg->offset = pos;
298         inarg->size = count;
299         req->in.h.opcode = opcode;
300         req->in.h.nodeid = get_node_id(inode);
301         req->in.numargs = 1;
302         req->in.args[0].size = sizeof(struct fuse_read_in);
303         req->in.args[0].value = inarg;
304         req->out.argpages = 1;
305         req->out.argvar = 1;
306         req->out.numargs = 1;
307         req->out.args[0].size = count;
308 }
309
310 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
311                              struct inode *inode, loff_t pos, size_t count)
312 {
313         struct fuse_conn *fc = get_fuse_conn(inode);
314         struct fuse_file *ff = file->private_data;
315         fuse_read_fill(req, ff, inode, pos, count, FUSE_READ);
316         request_send(fc, req);
317         return req->out.args[0].size;
318 }
319
320 static int fuse_readpage(struct file *file, struct page *page)
321 {
322         struct inode *inode = page->mapping->host;
323         struct fuse_conn *fc = get_fuse_conn(inode);
324         struct fuse_req *req;
325         int err;
326
327         err = -EIO;
328         if (is_bad_inode(inode))
329                 goto out;
330
331         req = fuse_get_req(fc);
332         err = PTR_ERR(req);
333         if (IS_ERR(req))
334                 goto out;
335
336         req->out.page_zeroing = 1;
337         req->num_pages = 1;
338         req->pages[0] = page;
339         fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE);
340         err = req->out.h.error;
341         fuse_put_request(fc, req);
342         if (!err)
343                 SetPageUptodate(page);
344         fuse_invalidate_attr(inode); /* atime changed */
345  out:
346         unlock_page(page);
347         return err;
348 }
349
350 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
351 {
352         int i;
353
354         fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */
355
356         for (i = 0; i < req->num_pages; i++) {
357                 struct page *page = req->pages[i];
358                 if (!req->out.h.error)
359                         SetPageUptodate(page);
360                 else
361                         SetPageError(page);
362                 unlock_page(page);
363         }
364         if (req->ff)
365                 fuse_file_put(req->ff);
366         fuse_put_request(fc, req);
367 }
368
369 static void fuse_send_readpages(struct fuse_req *req, struct fuse_file *ff,
370                                 struct inode *inode)
371 {
372         struct fuse_conn *fc = get_fuse_conn(inode);
373         loff_t pos = page_offset(req->pages[0]);
374         size_t count = req->num_pages << PAGE_CACHE_SHIFT;
375         req->out.page_zeroing = 1;
376         fuse_read_fill(req, ff, inode, pos, count, FUSE_READ);
377         if (fc->async_read) {
378                 req->ff = fuse_file_get(ff);
379                 req->end = fuse_readpages_end;
380                 request_send_background(fc, req);
381         } else {
382                 request_send(fc, req);
383                 fuse_readpages_end(fc, req);
384         }
385 }
386
387 struct fuse_fill_data {
388         struct fuse_req *req;
389         struct fuse_file *ff;
390         struct inode *inode;
391 };
392
393 static int fuse_readpages_fill(void *_data, struct page *page)
394 {
395         struct fuse_fill_data *data = _data;
396         struct fuse_req *req = data->req;
397         struct inode *inode = data->inode;
398         struct fuse_conn *fc = get_fuse_conn(inode);
399
400         if (req->num_pages &&
401             (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
402              (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
403              req->pages[req->num_pages - 1]->index + 1 != page->index)) {
404                 fuse_send_readpages(req, data->ff, inode);
405                 data->req = req = fuse_get_req(fc);
406                 if (IS_ERR(req)) {
407                         unlock_page(page);
408                         return PTR_ERR(req);
409                 }
410         }
411         req->pages[req->num_pages] = page;
412         req->num_pages ++;
413         return 0;
414 }
415
416 static int fuse_readpages(struct file *file, struct address_space *mapping,
417                           struct list_head *pages, unsigned nr_pages)
418 {
419         struct inode *inode = mapping->host;
420         struct fuse_conn *fc = get_fuse_conn(inode);
421         struct fuse_fill_data data;
422         int err;
423
424         err = -EIO;
425         if (is_bad_inode(inode))
426                 goto out;
427
428         data.ff = file->private_data;
429         data.inode = inode;
430         data.req = fuse_get_req(fc);
431         err = PTR_ERR(data.req);
432         if (IS_ERR(data.req))
433                 goto out;
434
435         err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
436         if (!err) {
437                 if (data.req->num_pages)
438                         fuse_send_readpages(data.req, data.ff, inode);
439                 else
440                         fuse_put_request(fc, data.req);
441         }
442 out:
443         return err;
444 }
445
446 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
447                             struct inode *inode, loff_t pos, size_t count,
448                             int writepage)
449 {
450         struct fuse_write_in *inarg = &req->misc.write.in;
451         struct fuse_write_out *outarg = &req->misc.write.out;
452
453         memset(inarg, 0, sizeof(struct fuse_write_in));
454         inarg->fh = ff->fh;
455         inarg->offset = pos;
456         inarg->size = count;
457         inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
458         req->in.h.opcode = FUSE_WRITE;
459         req->in.h.nodeid = get_node_id(inode);
460         req->in.argpages = 1;
461         req->in.numargs = 2;
462         req->in.args[0].size = sizeof(struct fuse_write_in);
463         req->in.args[0].value = inarg;
464         req->in.args[1].size = count;
465         req->out.numargs = 1;
466         req->out.args[0].size = sizeof(struct fuse_write_out);
467         req->out.args[0].value = outarg;
468 }
469
470 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
471                               struct inode *inode, loff_t pos, size_t count)
472 {
473         struct fuse_conn *fc = get_fuse_conn(inode);
474         fuse_write_fill(req, file->private_data, inode, pos, count, 0);
475         request_send(fc, req);
476         return req->misc.write.out.size;
477 }
478
479 static int fuse_write_begin(struct file *file, struct address_space *mapping,
480                         loff_t pos, unsigned len, unsigned flags,
481                         struct page **pagep, void **fsdata)
482 {
483         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
484
485         *pagep = __grab_cache_page(mapping, index);
486         if (!*pagep)
487                 return -ENOMEM;
488         return 0;
489 }
490
491 static int fuse_buffered_write(struct file *file, struct inode *inode,
492                                loff_t pos, unsigned count, struct page *page)
493 {
494         int err;
495         size_t nres;
496         struct fuse_conn *fc = get_fuse_conn(inode);
497         struct fuse_inode *fi = get_fuse_inode(inode);
498         unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
499         struct fuse_req *req;
500
501         if (is_bad_inode(inode))
502                 return -EIO;
503
504         req = fuse_get_req(fc);
505         if (IS_ERR(req))
506                 return PTR_ERR(req);
507
508         req->num_pages = 1;
509         req->pages[0] = page;
510         req->page_offset = offset;
511         nres = fuse_send_write(req, file, inode, pos, count);
512         err = req->out.h.error;
513         fuse_put_request(fc, req);
514         if (!err && !nres)
515                 err = -EIO;
516         if (!err) {
517                 pos += nres;
518                 spin_lock(&fc->lock);
519                 fi->attr_version = ++fc->attr_version;
520                 if (pos > inode->i_size)
521                         i_size_write(inode, pos);
522                 spin_unlock(&fc->lock);
523
524                 if (count == PAGE_CACHE_SIZE)
525                         SetPageUptodate(page);
526         }
527         fuse_invalidate_attr(inode);
528         return err ? err : nres;
529 }
530
531 static int fuse_write_end(struct file *file, struct address_space *mapping,
532                         loff_t pos, unsigned len, unsigned copied,
533                         struct page *page, void *fsdata)
534 {
535         struct inode *inode = mapping->host;
536         int res = 0;
537
538         if (copied)
539                 res = fuse_buffered_write(file, inode, pos, copied, page);
540
541         unlock_page(page);
542         page_cache_release(page);
543         return res;
544 }
545
546 static void fuse_release_user_pages(struct fuse_req *req, int write)
547 {
548         unsigned i;
549
550         for (i = 0; i < req->num_pages; i++) {
551                 struct page *page = req->pages[i];
552                 if (write)
553                         set_page_dirty_lock(page);
554                 put_page(page);
555         }
556 }
557
558 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
559                                unsigned nbytes, int write)
560 {
561         unsigned long user_addr = (unsigned long) buf;
562         unsigned offset = user_addr & ~PAGE_MASK;
563         int npages;
564
565         /* This doesn't work with nfsd */
566         if (!current->mm)
567                 return -EPERM;
568
569         nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
570         npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
571         npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
572         down_read(&current->mm->mmap_sem);
573         npages = get_user_pages(current, current->mm, user_addr, npages, write,
574                                 0, req->pages, NULL);
575         up_read(&current->mm->mmap_sem);
576         if (npages < 0)
577                 return npages;
578
579         req->num_pages = npages;
580         req->page_offset = offset;
581         return 0;
582 }
583
584 static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
585                               size_t count, loff_t *ppos, int write)
586 {
587         struct inode *inode = file->f_path.dentry->d_inode;
588         struct fuse_conn *fc = get_fuse_conn(inode);
589         size_t nmax = write ? fc->max_write : fc->max_read;
590         loff_t pos = *ppos;
591         ssize_t res = 0;
592         struct fuse_req *req;
593
594         if (is_bad_inode(inode))
595                 return -EIO;
596
597         req = fuse_get_req(fc);
598         if (IS_ERR(req))
599                 return PTR_ERR(req);
600
601         while (count) {
602                 size_t nres;
603                 size_t nbytes = min(count, nmax);
604                 int err = fuse_get_user_pages(req, buf, nbytes, !write);
605                 if (err) {
606                         res = err;
607                         break;
608                 }
609                 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
610                 nbytes = min(count, nbytes);
611                 if (write)
612                         nres = fuse_send_write(req, file, inode, pos, nbytes);
613                 else
614                         nres = fuse_send_read(req, file, inode, pos, nbytes);
615                 fuse_release_user_pages(req, !write);
616                 if (req->out.h.error) {
617                         if (!res)
618                                 res = req->out.h.error;
619                         break;
620                 } else if (nres > nbytes) {
621                         res = -EIO;
622                         break;
623                 }
624                 count -= nres;
625                 res += nres;
626                 pos += nres;
627                 buf += nres;
628                 if (nres != nbytes)
629                         break;
630                 if (count) {
631                         fuse_put_request(fc, req);
632                         req = fuse_get_req(fc);
633                         if (IS_ERR(req))
634                                 break;
635                 }
636         }
637         fuse_put_request(fc, req);
638         if (res > 0) {
639                 if (write) {
640                         spin_lock(&fc->lock);
641                         if (pos > inode->i_size)
642                                 i_size_write(inode, pos);
643                         spin_unlock(&fc->lock);
644                 }
645                 *ppos = pos;
646         }
647         fuse_invalidate_attr(inode);
648
649         return res;
650 }
651
652 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
653                                      size_t count, loff_t *ppos)
654 {
655         return fuse_direct_io(file, buf, count, ppos, 0);
656 }
657
658 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
659                                  size_t count, loff_t *ppos)
660 {
661         struct inode *inode = file->f_path.dentry->d_inode;
662         ssize_t res;
663         /* Don't allow parallel writes to the same file */
664         mutex_lock(&inode->i_mutex);
665         res = generic_write_checks(file, ppos, &count, 0);
666         if (!res)
667                 res = fuse_direct_io(file, buf, count, ppos, 1);
668         mutex_unlock(&inode->i_mutex);
669         return res;
670 }
671
672 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
673 {
674         if ((vma->vm_flags & VM_SHARED)) {
675                 if ((vma->vm_flags & VM_WRITE))
676                         return -ENODEV;
677                 else
678                         vma->vm_flags &= ~VM_MAYWRITE;
679         }
680         return generic_file_mmap(file, vma);
681 }
682
683 static int fuse_set_page_dirty(struct page *page)
684 {
685         printk("fuse_set_page_dirty: should not happen\n");
686         dump_stack();
687         return 0;
688 }
689
690 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
691                                   struct file_lock *fl)
692 {
693         switch (ffl->type) {
694         case F_UNLCK:
695                 break;
696
697         case F_RDLCK:
698         case F_WRLCK:
699                 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
700                     ffl->end < ffl->start)
701                         return -EIO;
702
703                 fl->fl_start = ffl->start;
704                 fl->fl_end = ffl->end;
705                 fl->fl_pid = ffl->pid;
706                 break;
707
708         default:
709                 return -EIO;
710         }
711         fl->fl_type = ffl->type;
712         return 0;
713 }
714
715 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
716                          const struct file_lock *fl, int opcode, pid_t pid,
717                          int flock)
718 {
719         struct inode *inode = file->f_path.dentry->d_inode;
720         struct fuse_conn *fc = get_fuse_conn(inode);
721         struct fuse_file *ff = file->private_data;
722         struct fuse_lk_in *arg = &req->misc.lk_in;
723
724         arg->fh = ff->fh;
725         arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
726         arg->lk.start = fl->fl_start;
727         arg->lk.end = fl->fl_end;
728         arg->lk.type = fl->fl_type;
729         arg->lk.pid = pid;
730         if (flock)
731                 arg->lk_flags |= FUSE_LK_FLOCK;
732         req->in.h.opcode = opcode;
733         req->in.h.nodeid = get_node_id(inode);
734         req->in.numargs = 1;
735         req->in.args[0].size = sizeof(*arg);
736         req->in.args[0].value = arg;
737 }
738
739 static int fuse_getlk(struct file *file, struct file_lock *fl)
740 {
741         struct inode *inode = file->f_path.dentry->d_inode;
742         struct fuse_conn *fc = get_fuse_conn(inode);
743         struct fuse_req *req;
744         struct fuse_lk_out outarg;
745         int err;
746
747         req = fuse_get_req(fc);
748         if (IS_ERR(req))
749                 return PTR_ERR(req);
750
751         fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
752         req->out.numargs = 1;
753         req->out.args[0].size = sizeof(outarg);
754         req->out.args[0].value = &outarg;
755         request_send(fc, req);
756         err = req->out.h.error;
757         fuse_put_request(fc, req);
758         if (!err)
759                 err = convert_fuse_file_lock(&outarg.lk, fl);
760
761         return err;
762 }
763
764 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
765 {
766         struct inode *inode = file->f_path.dentry->d_inode;
767         struct fuse_conn *fc = get_fuse_conn(inode);
768         struct fuse_req *req;
769         int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
770         pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
771         int err;
772
773         /* Unlock on close is handled by the flush method */
774         if (fl->fl_flags & FL_CLOSE)
775                 return 0;
776
777         req = fuse_get_req(fc);
778         if (IS_ERR(req))
779                 return PTR_ERR(req);
780
781         fuse_lk_fill(req, file, fl, opcode, pid, flock);
782         request_send(fc, req);
783         err = req->out.h.error;
784         /* locking is restartable */
785         if (err == -EINTR)
786                 err = -ERESTARTSYS;
787         fuse_put_request(fc, req);
788         return err;
789 }
790
791 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
792 {
793         struct inode *inode = file->f_path.dentry->d_inode;
794         struct fuse_conn *fc = get_fuse_conn(inode);
795         int err;
796
797         if (cmd == F_GETLK) {
798                 if (fc->no_lock) {
799                         posix_test_lock(file, fl);
800                         err = 0;
801                 } else
802                         err = fuse_getlk(file, fl);
803         } else {
804                 if (fc->no_lock)
805                         err = posix_lock_file_wait(file, fl);
806                 else
807                         err = fuse_setlk(file, fl, 0);
808         }
809         return err;
810 }
811
812 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
813 {
814         struct inode *inode = file->f_path.dentry->d_inode;
815         struct fuse_conn *fc = get_fuse_conn(inode);
816         int err;
817
818         if (fc->no_lock) {
819                 err = flock_lock_file_wait(file, fl);
820         } else {
821                 /* emulate flock with POSIX locks */
822                 fl->fl_owner = (fl_owner_t) file;
823                 err = fuse_setlk(file, fl, 1);
824         }
825
826         return err;
827 }
828
829 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
830 {
831         struct inode *inode = mapping->host;
832         struct fuse_conn *fc = get_fuse_conn(inode);
833         struct fuse_req *req;
834         struct fuse_bmap_in inarg;
835         struct fuse_bmap_out outarg;
836         int err;
837
838         if (!inode->i_sb->s_bdev || fc->no_bmap)
839                 return 0;
840
841         req = fuse_get_req(fc);
842         if (IS_ERR(req))
843                 return 0;
844
845         memset(&inarg, 0, sizeof(inarg));
846         inarg.block = block;
847         inarg.blocksize = inode->i_sb->s_blocksize;
848         req->in.h.opcode = FUSE_BMAP;
849         req->in.h.nodeid = get_node_id(inode);
850         req->in.numargs = 1;
851         req->in.args[0].size = sizeof(inarg);
852         req->in.args[0].value = &inarg;
853         req->out.numargs = 1;
854         req->out.args[0].size = sizeof(outarg);
855         req->out.args[0].value = &outarg;
856         request_send(fc, req);
857         err = req->out.h.error;
858         fuse_put_request(fc, req);
859         if (err == -ENOSYS)
860                 fc->no_bmap = 1;
861
862         return err ? 0 : outarg.block;
863 }
864
865 static const struct file_operations fuse_file_operations = {
866         .llseek         = generic_file_llseek,
867         .read           = do_sync_read,
868         .aio_read       = generic_file_aio_read,
869         .write          = do_sync_write,
870         .aio_write      = generic_file_aio_write,
871         .mmap           = fuse_file_mmap,
872         .open           = fuse_open,
873         .flush          = fuse_flush,
874         .release        = fuse_release,
875         .fsync          = fuse_fsync,
876         .lock           = fuse_file_lock,
877         .flock          = fuse_file_flock,
878         .splice_read    = generic_file_splice_read,
879 };
880
881 static const struct file_operations fuse_direct_io_file_operations = {
882         .llseek         = generic_file_llseek,
883         .read           = fuse_direct_read,
884         .write          = fuse_direct_write,
885         .open           = fuse_open,
886         .flush          = fuse_flush,
887         .release        = fuse_release,
888         .fsync          = fuse_fsync,
889         .lock           = fuse_file_lock,
890         .flock          = fuse_file_flock,
891         /* no mmap and splice_read */
892 };
893
894 static const struct address_space_operations fuse_file_aops  = {
895         .readpage       = fuse_readpage,
896         .write_begin    = fuse_write_begin,
897         .write_end      = fuse_write_end,
898         .readpages      = fuse_readpages,
899         .set_page_dirty = fuse_set_page_dirty,
900         .bmap           = fuse_bmap,
901 };
902
903 void fuse_init_file_inode(struct inode *inode)
904 {
905         inode->i_fop = &fuse_file_operations;
906         inode->i_data.a_ops = &fuse_file_aops;
907 }