Btrfs: Fix extent replacment race
[safe/jmp/linux-2.6] / fs / btrfs / file.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "btrfs_inode.h"
36 #include "ioctl.h"
37 #include "print-tree.h"
38 #include "tree-log.h"
39 #include "locking.h"
40 #include "compat.h"
41
42
43 /* simple helper to fault in pages and copy.  This should go away
44  * and be replaced with calls into generic code.
45  */
46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
47                                          int write_bytes,
48                                          struct page **prepared_pages,
49                                          const char __user *buf)
50 {
51         long page_fault = 0;
52         int i;
53         int offset = pos & (PAGE_CACHE_SIZE - 1);
54
55         for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
56                 size_t count = min_t(size_t,
57                                      PAGE_CACHE_SIZE - offset, write_bytes);
58                 struct page *page = prepared_pages[i];
59                 fault_in_pages_readable(buf, count);
60
61                 /* Copy data from userspace to the current page */
62                 kmap(page);
63                 page_fault = __copy_from_user(page_address(page) + offset,
64                                               buf, count);
65                 /* Flush processor's dcache for this page */
66                 flush_dcache_page(page);
67                 kunmap(page);
68                 buf += count;
69                 write_bytes -= count;
70
71                 if (page_fault)
72                         break;
73         }
74         return page_fault ? -EFAULT : 0;
75 }
76
77 /*
78  * unlocks pages after btrfs_file_write is done with them
79  */
80 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
81 {
82         size_t i;
83         for (i = 0; i < num_pages; i++) {
84                 if (!pages[i])
85                         break;
86                 /* page checked is some magic around finding pages that
87                  * have been modified without going through btrfs_set_page_dirty
88                  * clear it here
89                  */
90                 ClearPageChecked(pages[i]);
91                 unlock_page(pages[i]);
92                 mark_page_accessed(pages[i]);
93                 page_cache_release(pages[i]);
94         }
95 }
96
97 /*
98  * after copy_from_user, pages need to be dirtied and we need to make
99  * sure holes are created between the current EOF and the start of
100  * any next extents (if required).
101  *
102  * this also makes the decision about creating an inline extent vs
103  * doing real data extents, marking pages dirty and delalloc as required.
104  */
105 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
106                                    struct btrfs_root *root,
107                                    struct file *file,
108                                    struct page **pages,
109                                    size_t num_pages,
110                                    loff_t pos,
111                                    size_t write_bytes)
112 {
113         int err = 0;
114         int i;
115         struct inode *inode = fdentry(file)->d_inode;
116         u64 num_bytes;
117         u64 start_pos;
118         u64 end_of_last_block;
119         u64 end_pos = pos + write_bytes;
120         loff_t isize = i_size_read(inode);
121
122         start_pos = pos & ~((u64)root->sectorsize - 1);
123         num_bytes = (write_bytes + pos - start_pos +
124                     root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
125
126         end_of_last_block = start_pos + num_bytes - 1;
127         btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
128         for (i = 0; i < num_pages; i++) {
129                 struct page *p = pages[i];
130                 SetPageUptodate(p);
131                 ClearPageChecked(p);
132                 set_page_dirty(p);
133         }
134         if (end_pos > isize) {
135                 i_size_write(inode, end_pos);
136                 /* we've only changed i_size in ram, and we haven't updated
137                  * the disk i_size.  There is no need to log the inode
138                  * at this time.
139                  */
140         }
141         return err;
142 }
143
144 /*
145  * this drops all the extents in the cache that intersect the range
146  * [start, end].  Existing extents are split as required.
147  */
148 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
149                             int skip_pinned)
150 {
151         struct extent_map *em;
152         struct extent_map *split = NULL;
153         struct extent_map *split2 = NULL;
154         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
155         u64 len = end - start + 1;
156         int ret;
157         int testend = 1;
158         unsigned long flags;
159         int compressed = 0;
160
161         WARN_ON(end < start);
162         if (end == (u64)-1) {
163                 len = (u64)-1;
164                 testend = 0;
165         }
166         while (1) {
167                 if (!split)
168                         split = alloc_extent_map(GFP_NOFS);
169                 if (!split2)
170                         split2 = alloc_extent_map(GFP_NOFS);
171
172                 write_lock(&em_tree->lock);
173                 em = lookup_extent_mapping(em_tree, start, len);
174                 if (!em) {
175                         write_unlock(&em_tree->lock);
176                         break;
177                 }
178                 flags = em->flags;
179                 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
180                         if (em->start <= start &&
181                             (!testend || em->start + em->len >= start + len)) {
182                                 free_extent_map(em);
183                                 write_unlock(&em_tree->lock);
184                                 break;
185                         }
186                         if (start < em->start) {
187                                 len = em->start - start;
188                         } else {
189                                 len = start + len - (em->start + em->len);
190                                 start = em->start + em->len;
191                         }
192                         free_extent_map(em);
193                         write_unlock(&em_tree->lock);
194                         continue;
195                 }
196                 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
197                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
198                 remove_extent_mapping(em_tree, em);
199
200                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
201                     em->start < start) {
202                         split->start = em->start;
203                         split->len = start - em->start;
204                         split->orig_start = em->orig_start;
205                         split->block_start = em->block_start;
206
207                         if (compressed)
208                                 split->block_len = em->block_len;
209                         else
210                                 split->block_len = split->len;
211
212                         split->bdev = em->bdev;
213                         split->flags = flags;
214                         ret = add_extent_mapping(em_tree, split);
215                         BUG_ON(ret);
216                         free_extent_map(split);
217                         split = split2;
218                         split2 = NULL;
219                 }
220                 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
221                     testend && em->start + em->len > start + len) {
222                         u64 diff = start + len - em->start;
223
224                         split->start = start + len;
225                         split->len = em->start + em->len - (start + len);
226                         split->bdev = em->bdev;
227                         split->flags = flags;
228
229                         if (compressed) {
230                                 split->block_len = em->block_len;
231                                 split->block_start = em->block_start;
232                                 split->orig_start = em->orig_start;
233                         } else {
234                                 split->block_len = split->len;
235                                 split->block_start = em->block_start + diff;
236                                 split->orig_start = split->start;
237                         }
238
239                         ret = add_extent_mapping(em_tree, split);
240                         BUG_ON(ret);
241                         free_extent_map(split);
242                         split = NULL;
243                 }
244                 write_unlock(&em_tree->lock);
245
246                 /* once for us */
247                 free_extent_map(em);
248                 /* once for the tree*/
249                 free_extent_map(em);
250         }
251         if (split)
252                 free_extent_map(split);
253         if (split2)
254                 free_extent_map(split2);
255         return 0;
256 }
257
258 /*
259  * this is very complex, but the basic idea is to drop all extents
260  * in the range start - end.  hint_block is filled in with a block number
261  * that would be a good hint to the block allocator for this file.
262  *
263  * If an extent intersects the range but is not entirely inside the range
264  * it is either truncated or split.  Anything entirely inside the range
265  * is deleted from the tree.
266  *
267  * inline_limit is used to tell this code which offsets in the file to keep
268  * if they contain inline extents.
269  */
270 noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
271                        struct btrfs_root *root, struct inode *inode,
272                        u64 start, u64 end, u64 locked_end,
273                        u64 inline_limit, u64 *hint_byte, int drop_cache)
274 {
275         u64 extent_end = 0;
276         u64 search_start = start;
277         u64 ram_bytes = 0;
278         u64 disk_bytenr = 0;
279         u64 orig_locked_end = locked_end;
280         u8 compression;
281         u8 encryption;
282         u16 other_encoding = 0;
283         struct extent_buffer *leaf;
284         struct btrfs_file_extent_item *extent;
285         struct btrfs_path *path;
286         struct btrfs_key key;
287         struct btrfs_file_extent_item old;
288         int keep;
289         int slot;
290         int bookend;
291         int found_type = 0;
292         int found_extent;
293         int found_inline;
294         int recow;
295         int ret;
296
297         inline_limit = 0;
298         if (drop_cache)
299                 btrfs_drop_extent_cache(inode, start, end - 1, 0);
300
301         path = btrfs_alloc_path();
302         if (!path)
303                 return -ENOMEM;
304         while (1) {
305                 recow = 0;
306                 btrfs_release_path(root, path);
307                 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
308                                                search_start, -1);
309                 if (ret < 0)
310                         goto out;
311                 if (ret > 0) {
312                         if (path->slots[0] == 0) {
313                                 ret = 0;
314                                 goto out;
315                         }
316                         path->slots[0]--;
317                 }
318 next_slot:
319                 keep = 0;
320                 bookend = 0;
321                 found_extent = 0;
322                 found_inline = 0;
323                 compression = 0;
324                 encryption = 0;
325                 extent = NULL;
326                 leaf = path->nodes[0];
327                 slot = path->slots[0];
328                 ret = 0;
329                 btrfs_item_key_to_cpu(leaf, &key, slot);
330                 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
331                     key.offset >= end) {
332                         goto out;
333                 }
334                 if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
335                     key.objectid != inode->i_ino) {
336                         goto out;
337                 }
338                 if (recow) {
339                         search_start = max(key.offset, start);
340                         continue;
341                 }
342                 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
343                         extent = btrfs_item_ptr(leaf, slot,
344                                                 struct btrfs_file_extent_item);
345                         found_type = btrfs_file_extent_type(leaf, extent);
346                         compression = btrfs_file_extent_compression(leaf,
347                                                                     extent);
348                         encryption = btrfs_file_extent_encryption(leaf,
349                                                                   extent);
350                         other_encoding = btrfs_file_extent_other_encoding(leaf,
351                                                                   extent);
352                         if (found_type == BTRFS_FILE_EXTENT_REG ||
353                             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
354                                 extent_end =
355                                      btrfs_file_extent_disk_bytenr(leaf,
356                                                                    extent);
357                                 if (extent_end)
358                                         *hint_byte = extent_end;
359
360                                 extent_end = key.offset +
361                                      btrfs_file_extent_num_bytes(leaf, extent);
362                                 ram_bytes = btrfs_file_extent_ram_bytes(leaf,
363                                                                 extent);
364                                 found_extent = 1;
365                         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
366                                 found_inline = 1;
367                                 extent_end = key.offset +
368                                      btrfs_file_extent_inline_len(leaf, extent);
369                         }
370                 } else {
371                         extent_end = search_start;
372                 }
373
374                 /* we found nothing we can drop */
375                 if ((!found_extent && !found_inline) ||
376                     search_start >= extent_end) {
377                         int nextret;
378                         u32 nritems;
379                         nritems = btrfs_header_nritems(leaf);
380                         if (slot >= nritems - 1) {
381                                 nextret = btrfs_next_leaf(root, path);
382                                 if (nextret)
383                                         goto out;
384                                 recow = 1;
385                         } else {
386                                 path->slots[0]++;
387                         }
388                         goto next_slot;
389                 }
390
391                 if (end <= extent_end && start >= key.offset && found_inline)
392                         *hint_byte = EXTENT_MAP_INLINE;
393
394                 if (found_extent) {
395                         read_extent_buffer(leaf, &old, (unsigned long)extent,
396                                            sizeof(old));
397                 }
398
399                 if (end < extent_end && end >= key.offset) {
400                         bookend = 1;
401                         if (found_inline && start <= key.offset)
402                                 keep = 1;
403                 }
404
405                 if (bookend && found_extent) {
406                         if (locked_end < extent_end) {
407                                 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
408                                                 locked_end, extent_end - 1,
409                                                 GFP_NOFS);
410                                 if (!ret) {
411                                         btrfs_release_path(root, path);
412                                         lock_extent(&BTRFS_I(inode)->io_tree,
413                                                 locked_end, extent_end - 1,
414                                                 GFP_NOFS);
415                                         locked_end = extent_end;
416                                         continue;
417                                 }
418                                 locked_end = extent_end;
419                         }
420                         disk_bytenr = le64_to_cpu(old.disk_bytenr);
421                         if (disk_bytenr != 0) {
422                                 ret = btrfs_inc_extent_ref(trans, root,
423                                            disk_bytenr,
424                                            le64_to_cpu(old.disk_num_bytes), 0,
425                                            root->root_key.objectid,
426                                            key.objectid, key.offset -
427                                            le64_to_cpu(old.offset));
428                                 BUG_ON(ret);
429                         }
430                 }
431
432                 if (found_inline) {
433                         u64 mask = root->sectorsize - 1;
434                         search_start = (extent_end + mask) & ~mask;
435                 } else
436                         search_start = extent_end;
437
438                 /* truncate existing extent */
439                 if (start > key.offset) {
440                         u64 new_num;
441                         u64 old_num;
442                         keep = 1;
443                         WARN_ON(start & (root->sectorsize - 1));
444                         if (found_extent) {
445                                 new_num = start - key.offset;
446                                 old_num = btrfs_file_extent_num_bytes(leaf,
447                                                                       extent);
448                                 *hint_byte =
449                                         btrfs_file_extent_disk_bytenr(leaf,
450                                                                       extent);
451                                 if (btrfs_file_extent_disk_bytenr(leaf,
452                                                                   extent)) {
453                                         inode_sub_bytes(inode, old_num -
454                                                         new_num);
455                                 }
456                                 btrfs_set_file_extent_num_bytes(leaf,
457                                                         extent, new_num);
458                                 btrfs_mark_buffer_dirty(leaf);
459                         } else if (key.offset < inline_limit &&
460                                    (end > extent_end) &&
461                                    (inline_limit < extent_end)) {
462                                 u32 new_size;
463                                 new_size = btrfs_file_extent_calc_inline_size(
464                                                    inline_limit - key.offset);
465                                 inode_sub_bytes(inode, extent_end -
466                                                 inline_limit);
467                                 btrfs_set_file_extent_ram_bytes(leaf, extent,
468                                                         new_size);
469                                 if (!compression && !encryption) {
470                                         btrfs_truncate_item(trans, root, path,
471                                                             new_size, 1);
472                                 }
473                         }
474                 }
475                 /* delete the entire extent */
476                 if (!keep) {
477                         if (found_inline)
478                                 inode_sub_bytes(inode, extent_end -
479                                                 key.offset);
480                         ret = btrfs_del_item(trans, root, path);
481                         /* TODO update progress marker and return */
482                         BUG_ON(ret);
483                         extent = NULL;
484                         btrfs_release_path(root, path);
485                         /* the extent will be freed later */
486                 }
487                 if (bookend && found_inline && start <= key.offset) {
488                         u32 new_size;
489                         new_size = btrfs_file_extent_calc_inline_size(
490                                                    extent_end - end);
491                         inode_sub_bytes(inode, end - key.offset);
492                         btrfs_set_file_extent_ram_bytes(leaf, extent,
493                                                         new_size);
494                         if (!compression && !encryption)
495                                 ret = btrfs_truncate_item(trans, root, path,
496                                                           new_size, 0);
497                         BUG_ON(ret);
498                 }
499                 /* create bookend, splitting the extent in two */
500                 if (bookend && found_extent) {
501                         struct btrfs_key ins;
502                         ins.objectid = inode->i_ino;
503                         ins.offset = end;
504                         btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
505
506                         btrfs_release_path(root, path);
507                         path->leave_spinning = 1;
508                         ret = btrfs_insert_empty_item(trans, root, path, &ins,
509                                                       sizeof(*extent));
510                         BUG_ON(ret);
511
512                         leaf = path->nodes[0];
513                         extent = btrfs_item_ptr(leaf, path->slots[0],
514                                                 struct btrfs_file_extent_item);
515                         write_extent_buffer(leaf, &old,
516                                             (unsigned long)extent, sizeof(old));
517
518                         btrfs_set_file_extent_compression(leaf, extent,
519                                                           compression);
520                         btrfs_set_file_extent_encryption(leaf, extent,
521                                                          encryption);
522                         btrfs_set_file_extent_other_encoding(leaf, extent,
523                                                              other_encoding);
524                         btrfs_set_file_extent_offset(leaf, extent,
525                                     le64_to_cpu(old.offset) + end - key.offset);
526                         WARN_ON(le64_to_cpu(old.num_bytes) <
527                                 (extent_end - end));
528                         btrfs_set_file_extent_num_bytes(leaf, extent,
529                                                         extent_end - end);
530
531                         /*
532                          * set the ram bytes to the size of the full extent
533                          * before splitting.  This is a worst case flag,
534                          * but its the best we can do because we don't know
535                          * how splitting affects compression
536                          */
537                         btrfs_set_file_extent_ram_bytes(leaf, extent,
538                                                         ram_bytes);
539                         btrfs_set_file_extent_type(leaf, extent, found_type);
540
541                         btrfs_unlock_up_safe(path, 1);
542                         btrfs_mark_buffer_dirty(path->nodes[0]);
543                         btrfs_set_lock_blocking(path->nodes[0]);
544
545                         path->leave_spinning = 0;
546                         btrfs_release_path(root, path);
547                         if (disk_bytenr != 0)
548                                 inode_add_bytes(inode, extent_end - end);
549                 }
550
551                 if (found_extent && !keep) {
552                         u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
553
554                         if (old_disk_bytenr != 0) {
555                                 inode_sub_bytes(inode,
556                                                 le64_to_cpu(old.num_bytes));
557                                 ret = btrfs_free_extent(trans, root,
558                                                 old_disk_bytenr,
559                                                 le64_to_cpu(old.disk_num_bytes),
560                                                 0, root->root_key.objectid,
561                                                 key.objectid, key.offset -
562                                                 le64_to_cpu(old.offset));
563                                 BUG_ON(ret);
564                                 *hint_byte = old_disk_bytenr;
565                         }
566                 }
567
568                 if (search_start >= end) {
569                         ret = 0;
570                         goto out;
571                 }
572         }
573 out:
574         btrfs_free_path(path);
575         if (locked_end > orig_locked_end) {
576                 unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
577                               locked_end - 1, GFP_NOFS);
578         }
579         return ret;
580 }
581
582 static int extent_mergeable(struct extent_buffer *leaf, int slot,
583                             u64 objectid, u64 bytenr, u64 *start, u64 *end)
584 {
585         struct btrfs_file_extent_item *fi;
586         struct btrfs_key key;
587         u64 extent_end;
588
589         if (slot < 0 || slot >= btrfs_header_nritems(leaf))
590                 return 0;
591
592         btrfs_item_key_to_cpu(leaf, &key, slot);
593         if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
594                 return 0;
595
596         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
597         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
598             btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
599             btrfs_file_extent_compression(leaf, fi) ||
600             btrfs_file_extent_encryption(leaf, fi) ||
601             btrfs_file_extent_other_encoding(leaf, fi))
602                 return 0;
603
604         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
605         if ((*start && *start != key.offset) || (*end && *end != extent_end))
606                 return 0;
607
608         *start = key.offset;
609         *end = extent_end;
610         return 1;
611 }
612
613 /*
614  * Mark extent in the range start - end as written.
615  *
616  * This changes extent type from 'pre-allocated' to 'regular'. If only
617  * part of extent is marked as written, the extent will be split into
618  * two or three.
619  */
620 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
621                               struct btrfs_root *root,
622                               struct inode *inode, u64 start, u64 end)
623 {
624         struct extent_buffer *leaf;
625         struct btrfs_path *path;
626         struct btrfs_file_extent_item *fi;
627         struct btrfs_key key;
628         u64 bytenr;
629         u64 num_bytes;
630         u64 extent_end;
631         u64 orig_offset;
632         u64 other_start;
633         u64 other_end;
634         u64 split = start;
635         u64 locked_end = end;
636         int extent_type;
637         int split_end = 1;
638         int ret;
639
640         btrfs_drop_extent_cache(inode, start, end - 1, 0);
641
642         path = btrfs_alloc_path();
643         BUG_ON(!path);
644 again:
645         key.objectid = inode->i_ino;
646         key.type = BTRFS_EXTENT_DATA_KEY;
647         if (split == start)
648                 key.offset = split;
649         else
650                 key.offset = split - 1;
651
652         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
653         if (ret > 0 && path->slots[0] > 0)
654                 path->slots[0]--;
655
656         leaf = path->nodes[0];
657         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
658         BUG_ON(key.objectid != inode->i_ino ||
659                key.type != BTRFS_EXTENT_DATA_KEY);
660         fi = btrfs_item_ptr(leaf, path->slots[0],
661                             struct btrfs_file_extent_item);
662         extent_type = btrfs_file_extent_type(leaf, fi);
663         BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
664         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
665         BUG_ON(key.offset > start || extent_end < end);
666
667         bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
668         num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
669         orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
670
671         if (key.offset == start)
672                 split = end;
673
674         if (key.offset == start && extent_end == end) {
675                 int del_nr = 0;
676                 int del_slot = 0;
677                 other_start = end;
678                 other_end = 0;
679                 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
680                                      bytenr, &other_start, &other_end)) {
681                         extent_end = other_end;
682                         del_slot = path->slots[0] + 1;
683                         del_nr++;
684                         ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
685                                                 0, root->root_key.objectid,
686                                                 inode->i_ino, orig_offset);
687                         BUG_ON(ret);
688                 }
689                 other_start = 0;
690                 other_end = start;
691                 if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
692                                      bytenr, &other_start, &other_end)) {
693                         key.offset = other_start;
694                         del_slot = path->slots[0];
695                         del_nr++;
696                         ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
697                                                 0, root->root_key.objectid,
698                                                 inode->i_ino, orig_offset);
699                         BUG_ON(ret);
700                 }
701                 split_end = 0;
702                 if (del_nr == 0) {
703                         btrfs_set_file_extent_type(leaf, fi,
704                                                    BTRFS_FILE_EXTENT_REG);
705                         goto done;
706                 }
707
708                 fi = btrfs_item_ptr(leaf, del_slot - 1,
709                                     struct btrfs_file_extent_item);
710                 btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
711                 btrfs_set_file_extent_num_bytes(leaf, fi,
712                                                 extent_end - key.offset);
713                 btrfs_mark_buffer_dirty(leaf);
714
715                 ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
716                 BUG_ON(ret);
717                 goto release;
718         } else if (split == start) {
719                 if (locked_end < extent_end) {
720                         ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
721                                         locked_end, extent_end - 1, GFP_NOFS);
722                         if (!ret) {
723                                 btrfs_release_path(root, path);
724                                 lock_extent(&BTRFS_I(inode)->io_tree,
725                                         locked_end, extent_end - 1, GFP_NOFS);
726                                 locked_end = extent_end;
727                                 goto again;
728                         }
729                         locked_end = extent_end;
730                 }
731                 btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
732         } else  {
733                 BUG_ON(key.offset != start);
734                 key.offset = split;
735                 btrfs_set_file_extent_offset(leaf, fi, key.offset -
736                                              orig_offset);
737                 btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
738                 btrfs_set_item_key_safe(trans, root, path, &key);
739                 extent_end = split;
740         }
741
742         if (extent_end == end) {
743                 split_end = 0;
744                 extent_type = BTRFS_FILE_EXTENT_REG;
745         }
746         if (extent_end == end && split == start) {
747                 other_start = end;
748                 other_end = 0;
749                 if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
750                                      bytenr, &other_start, &other_end)) {
751                         path->slots[0]++;
752                         fi = btrfs_item_ptr(leaf, path->slots[0],
753                                             struct btrfs_file_extent_item);
754                         key.offset = split;
755                         btrfs_set_item_key_safe(trans, root, path, &key);
756                         btrfs_set_file_extent_offset(leaf, fi, key.offset -
757                                                      orig_offset);
758                         btrfs_set_file_extent_num_bytes(leaf, fi,
759                                                         other_end - split);
760                         goto done;
761                 }
762         }
763         if (extent_end == end && split == end) {
764                 other_start = 0;
765                 other_end = start;
766                 if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
767                                      bytenr, &other_start, &other_end)) {
768                         path->slots[0]--;
769                         fi = btrfs_item_ptr(leaf, path->slots[0],
770                                             struct btrfs_file_extent_item);
771                         btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
772                                                         other_start);
773                         goto done;
774                 }
775         }
776
777         btrfs_mark_buffer_dirty(leaf);
778
779         ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
780                                    root->root_key.objectid,
781                                    inode->i_ino, orig_offset);
782         BUG_ON(ret);
783         btrfs_release_path(root, path);
784
785         key.offset = start;
786         ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
787         BUG_ON(ret);
788
789         leaf = path->nodes[0];
790         fi = btrfs_item_ptr(leaf, path->slots[0],
791                             struct btrfs_file_extent_item);
792         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
793         btrfs_set_file_extent_type(leaf, fi, extent_type);
794         btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
795         btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
796         btrfs_set_file_extent_offset(leaf, fi, key.offset - orig_offset);
797         btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
798         btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
799         btrfs_set_file_extent_compression(leaf, fi, 0);
800         btrfs_set_file_extent_encryption(leaf, fi, 0);
801         btrfs_set_file_extent_other_encoding(leaf, fi, 0);
802 done:
803         btrfs_mark_buffer_dirty(leaf);
804
805 release:
806         btrfs_release_path(root, path);
807         if (split_end && split == start) {
808                 split = end;
809                 goto again;
810         }
811         if (locked_end > end) {
812                 unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
813                               GFP_NOFS);
814         }
815         btrfs_free_path(path);
816         return 0;
817 }
818
819 /*
820  * this gets pages into the page cache and locks them down, it also properly
821  * waits for data=ordered extents to finish before allowing the pages to be
822  * modified.
823  */
824 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
825                          struct page **pages, size_t num_pages,
826                          loff_t pos, unsigned long first_index,
827                          unsigned long last_index, size_t write_bytes)
828 {
829         int i;
830         unsigned long index = pos >> PAGE_CACHE_SHIFT;
831         struct inode *inode = fdentry(file)->d_inode;
832         int err = 0;
833         u64 start_pos;
834         u64 last_pos;
835
836         start_pos = pos & ~((u64)root->sectorsize - 1);
837         last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
838
839         if (start_pos > inode->i_size) {
840                 err = btrfs_cont_expand(inode, start_pos);
841                 if (err)
842                         return err;
843         }
844
845         memset(pages, 0, num_pages * sizeof(struct page *));
846 again:
847         for (i = 0; i < num_pages; i++) {
848                 pages[i] = grab_cache_page(inode->i_mapping, index + i);
849                 if (!pages[i]) {
850                         err = -ENOMEM;
851                         BUG_ON(1);
852                 }
853                 wait_on_page_writeback(pages[i]);
854         }
855         if (start_pos < inode->i_size) {
856                 struct btrfs_ordered_extent *ordered;
857                 lock_extent(&BTRFS_I(inode)->io_tree,
858                             start_pos, last_pos - 1, GFP_NOFS);
859                 ordered = btrfs_lookup_first_ordered_extent(inode,
860                                                             last_pos - 1);
861                 if (ordered &&
862                     ordered->file_offset + ordered->len > start_pos &&
863                     ordered->file_offset < last_pos) {
864                         btrfs_put_ordered_extent(ordered);
865                         unlock_extent(&BTRFS_I(inode)->io_tree,
866                                       start_pos, last_pos - 1, GFP_NOFS);
867                         for (i = 0; i < num_pages; i++) {
868                                 unlock_page(pages[i]);
869                                 page_cache_release(pages[i]);
870                         }
871                         btrfs_wait_ordered_range(inode, start_pos,
872                                                  last_pos - start_pos);
873                         goto again;
874                 }
875                 if (ordered)
876                         btrfs_put_ordered_extent(ordered);
877
878                 clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
879                                   last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
880                                   GFP_NOFS);
881                 unlock_extent(&BTRFS_I(inode)->io_tree,
882                               start_pos, last_pos - 1, GFP_NOFS);
883         }
884         for (i = 0; i < num_pages; i++) {
885                 clear_page_dirty_for_io(pages[i]);
886                 set_page_extent_mapped(pages[i]);
887                 WARN_ON(!PageLocked(pages[i]));
888         }
889         return 0;
890 }
891
892 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
893                                 size_t count, loff_t *ppos)
894 {
895         loff_t pos;
896         loff_t start_pos;
897         ssize_t num_written = 0;
898         ssize_t err = 0;
899         int ret = 0;
900         struct inode *inode = fdentry(file)->d_inode;
901         struct btrfs_root *root = BTRFS_I(inode)->root;
902         struct page **pages = NULL;
903         int nrptrs;
904         struct page *pinned[2];
905         unsigned long first_index;
906         unsigned long last_index;
907         int will_write;
908
909         will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
910                       (file->f_flags & O_DIRECT));
911
912         nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
913                      PAGE_CACHE_SIZE / (sizeof(struct page *)));
914         pinned[0] = NULL;
915         pinned[1] = NULL;
916
917         pos = *ppos;
918         start_pos = pos;
919
920         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
921         current->backing_dev_info = inode->i_mapping->backing_dev_info;
922         err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
923         if (err)
924                 goto out_nolock;
925         if (count == 0)
926                 goto out_nolock;
927
928         err = file_remove_suid(file);
929         if (err)
930                 goto out_nolock;
931         file_update_time(file);
932
933         pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
934
935         mutex_lock(&inode->i_mutex);
936         BTRFS_I(inode)->sequence++;
937         first_index = pos >> PAGE_CACHE_SHIFT;
938         last_index = (pos + count) >> PAGE_CACHE_SHIFT;
939
940         /*
941          * there are lots of better ways to do this, but this code
942          * makes sure the first and last page in the file range are
943          * up to date and ready for cow
944          */
945         if ((pos & (PAGE_CACHE_SIZE - 1))) {
946                 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
947                 if (!PageUptodate(pinned[0])) {
948                         ret = btrfs_readpage(NULL, pinned[0]);
949                         BUG_ON(ret);
950                         wait_on_page_locked(pinned[0]);
951                 } else {
952                         unlock_page(pinned[0]);
953                 }
954         }
955         if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
956                 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
957                 if (!PageUptodate(pinned[1])) {
958                         ret = btrfs_readpage(NULL, pinned[1]);
959                         BUG_ON(ret);
960                         wait_on_page_locked(pinned[1]);
961                 } else {
962                         unlock_page(pinned[1]);
963                 }
964         }
965
966         while (count > 0) {
967                 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
968                 size_t write_bytes = min(count, nrptrs *
969                                         (size_t)PAGE_CACHE_SIZE -
970                                          offset);
971                 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
972                                         PAGE_CACHE_SHIFT;
973
974                 WARN_ON(num_pages > nrptrs);
975                 memset(pages, 0, sizeof(struct page *) * nrptrs);
976
977                 ret = btrfs_check_data_free_space(root, inode, write_bytes);
978                 if (ret)
979                         goto out;
980
981                 ret = prepare_pages(root, file, pages, num_pages,
982                                     pos, first_index, last_index,
983                                     write_bytes);
984                 if (ret) {
985                         btrfs_free_reserved_data_space(root, inode,
986                                                        write_bytes);
987                         goto out;
988                 }
989
990                 ret = btrfs_copy_from_user(pos, num_pages,
991                                            write_bytes, pages, buf);
992                 if (ret) {
993                         btrfs_free_reserved_data_space(root, inode,
994                                                        write_bytes);
995                         btrfs_drop_pages(pages, num_pages);
996                         goto out;
997                 }
998
999                 ret = dirty_and_release_pages(NULL, root, file, pages,
1000                                               num_pages, pos, write_bytes);
1001                 btrfs_drop_pages(pages, num_pages);
1002                 if (ret) {
1003                         btrfs_free_reserved_data_space(root, inode,
1004                                                        write_bytes);
1005                         goto out;
1006                 }
1007
1008                 if (will_write) {
1009                         btrfs_fdatawrite_range(inode->i_mapping, pos,
1010                                                pos + write_bytes - 1,
1011                                                WB_SYNC_ALL);
1012                 } else {
1013                         balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1014                                                            num_pages);
1015                         if (num_pages <
1016                             (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1017                                 btrfs_btree_balance_dirty(root, 1);
1018                         btrfs_throttle(root);
1019                 }
1020
1021                 buf += write_bytes;
1022                 count -= write_bytes;
1023                 pos += write_bytes;
1024                 num_written += write_bytes;
1025
1026                 cond_resched();
1027         }
1028 out:
1029         mutex_unlock(&inode->i_mutex);
1030         if (ret)
1031                 err = ret;
1032
1033 out_nolock:
1034         kfree(pages);
1035         if (pinned[0])
1036                 page_cache_release(pinned[0]);
1037         if (pinned[1])
1038                 page_cache_release(pinned[1]);
1039         *ppos = pos;
1040
1041         /*
1042          * we want to make sure fsync finds this change
1043          * but we haven't joined a transaction running right now.
1044          *
1045          * Later on, someone is sure to update the inode and get the
1046          * real transid recorded.
1047          *
1048          * We set last_trans now to the fs_info generation + 1,
1049          * this will either be one more than the running transaction
1050          * or the generation used for the next transaction if there isn't
1051          * one running right now.
1052          */
1053         BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1054
1055         if (num_written > 0 && will_write) {
1056                 struct btrfs_trans_handle *trans;
1057
1058                 err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1059                 if (err)
1060                         num_written = err;
1061
1062                 if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
1063                         trans = btrfs_start_transaction(root, 1);
1064                         ret = btrfs_log_dentry_safe(trans, root,
1065                                                     file->f_dentry);
1066                         if (ret == 0) {
1067                                 ret = btrfs_sync_log(trans, root);
1068                                 if (ret == 0)
1069                                         btrfs_end_transaction(trans, root);
1070                                 else
1071                                         btrfs_commit_transaction(trans, root);
1072                         } else {
1073                                 btrfs_commit_transaction(trans, root);
1074                         }
1075                 }
1076                 if (file->f_flags & O_DIRECT) {
1077                         invalidate_mapping_pages(inode->i_mapping,
1078                               start_pos >> PAGE_CACHE_SHIFT,
1079                              (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1080                 }
1081         }
1082         current->backing_dev_info = NULL;
1083         return num_written ? num_written : err;
1084 }
1085
1086 int btrfs_release_file(struct inode *inode, struct file *filp)
1087 {
1088         /*
1089          * ordered_data_close is set by settattr when we are about to truncate
1090          * a file from a non-zero size to a zero size.  This tries to
1091          * flush down new bytes that may have been written if the
1092          * application were using truncate to replace a file in place.
1093          */
1094         if (BTRFS_I(inode)->ordered_data_close) {
1095                 BTRFS_I(inode)->ordered_data_close = 0;
1096                 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1097                 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1098                         filemap_flush(inode->i_mapping);
1099         }
1100         if (filp->private_data)
1101                 btrfs_ioctl_trans_end(filp);
1102         return 0;
1103 }
1104
1105 /*
1106  * fsync call for both files and directories.  This logs the inode into
1107  * the tree log instead of forcing full commits whenever possible.
1108  *
1109  * It needs to call filemap_fdatawait so that all ordered extent updates are
1110  * in the metadata btree are up to date for copying to the log.
1111  *
1112  * It drops the inode mutex before doing the tree log commit.  This is an
1113  * important optimization for directories because holding the mutex prevents
1114  * new operations on the dir while we write to disk.
1115  */
1116 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1117 {
1118         struct inode *inode = dentry->d_inode;
1119         struct btrfs_root *root = BTRFS_I(inode)->root;
1120         int ret = 0;
1121         struct btrfs_trans_handle *trans;
1122
1123         /*
1124          * check the transaction that last modified this inode
1125          * and see if its already been committed
1126          */
1127         if (!BTRFS_I(inode)->last_trans)
1128                 goto out;
1129
1130         mutex_lock(&root->fs_info->trans_mutex);
1131         if (BTRFS_I(inode)->last_trans <=
1132             root->fs_info->last_trans_committed) {
1133                 BTRFS_I(inode)->last_trans = 0;
1134                 mutex_unlock(&root->fs_info->trans_mutex);
1135                 goto out;
1136         }
1137         mutex_unlock(&root->fs_info->trans_mutex);
1138
1139         root->log_batch++;
1140         filemap_fdatawrite(inode->i_mapping);
1141         btrfs_wait_ordered_range(inode, 0, (u64)-1);
1142         root->log_batch++;
1143
1144         if (datasync && !(inode->i_state & I_DIRTY_PAGES))
1145                 goto out;
1146         /*
1147          * ok we haven't committed the transaction yet, lets do a commit
1148          */
1149         if (file && file->private_data)
1150                 btrfs_ioctl_trans_end(file);
1151
1152         trans = btrfs_start_transaction(root, 1);
1153         if (!trans) {
1154                 ret = -ENOMEM;
1155                 goto out;
1156         }
1157
1158         ret = btrfs_log_dentry_safe(trans, root, dentry);
1159         if (ret < 0)
1160                 goto out;
1161
1162         /* we've logged all the items and now have a consistent
1163          * version of the file in the log.  It is possible that
1164          * someone will come in and modify the file, but that's
1165          * fine because the log is consistent on disk, and we
1166          * have references to all of the file's extents
1167          *
1168          * It is possible that someone will come in and log the
1169          * file again, but that will end up using the synchronization
1170          * inside btrfs_sync_log to keep things safe.
1171          */
1172         mutex_unlock(&dentry->d_inode->i_mutex);
1173
1174         if (ret > 0) {
1175                 ret = btrfs_commit_transaction(trans, root);
1176         } else {
1177                 ret = btrfs_sync_log(trans, root);
1178                 if (ret == 0)
1179                         ret = btrfs_end_transaction(trans, root);
1180                 else
1181                         ret = btrfs_commit_transaction(trans, root);
1182         }
1183         mutex_lock(&dentry->d_inode->i_mutex);
1184 out:
1185         return ret > 0 ? EIO : ret;
1186 }
1187
1188 static struct vm_operations_struct btrfs_file_vm_ops = {
1189         .fault          = filemap_fault,
1190         .page_mkwrite   = btrfs_page_mkwrite,
1191 };
1192
1193 static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
1194 {
1195         vma->vm_ops = &btrfs_file_vm_ops;
1196         file_accessed(filp);
1197         return 0;
1198 }
1199
1200 struct file_operations btrfs_file_operations = {
1201         .llseek         = generic_file_llseek,
1202         .read           = do_sync_read,
1203         .aio_read       = generic_file_aio_read,
1204         .splice_read    = generic_file_splice_read,
1205         .write          = btrfs_file_write,
1206         .mmap           = btrfs_file_mmap,
1207         .open           = generic_file_open,
1208         .release        = btrfs_release_file,
1209         .fsync          = btrfs_sync_file,
1210         .unlocked_ioctl = btrfs_ioctl,
1211 #ifdef CONFIG_COMPAT
1212         .compat_ioctl   = btrfs_ioctl,
1213 #endif
1214 };