Btrfs: Fix add_extent_mapping to check for duplicates across the whole range
authorChris Mason <chris.mason@oracle.com>
Wed, 20 Aug 2008 12:51:50 +0000 (08:51 -0400)
committerChris Mason <chris.mason@oracle.com>
Thu, 25 Sep 2008 15:04:06 +0000 (11:04 -0400)
add_extent_mapping was allowing the insertion of overlapping extents.
This never used to happen because it only inserted the extents from disk
and those were never overlapping.

But, with the data=ordered code, the disk and memory representations of the
file are not the same.  add_extent_mapping needs to ensure a new extent
does not overlap before it inserts.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/extent_map.c
fs/btrfs/inode.c

index 954b047..78ced11 100644 (file)
@@ -207,7 +207,14 @@ int add_extent_mapping(struct extent_map_tree *tree,
        int ret = 0;
        struct extent_map *merge = NULL;
        struct rb_node *rb;
+       struct extent_map *exist;
 
+       exist = lookup_extent_mapping(tree, em->start, em->len);
+       if (exist) {
+               free_extent_map(exist);
+               ret = -EEXIST;
+               goto out;
+       }
        assert_spin_locked(&tree->lock);
        rb = tree_insert(&tree->map, em->start, &em->rb_node);
        if (rb) {
index 6510789..6c77804 100644 (file)
@@ -641,8 +641,9 @@ int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
                if (ret == -ENOENT || ret == -EFBIG)
                        ret = 0;
                csum = 0;
-               printk("no csum found for inode %lu start %Lu\n", inode->i_ino,
-                      start);
+               if (printk_ratelimit())
+                       printk("no csum found for inode %lu start %Lu\n", inode->i_ino,
+                              start);
                goto out;
        }
        read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
@@ -1653,8 +1654,20 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
                btrfs_truncate_page(inode->i_mapping, inode->i_size);
 
                hole_size = block_end - hole_start;
-               btrfs_wait_ordered_range(inode, hole_start, hole_size);
-               lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+               while(1) {
+                       struct btrfs_ordered_extent *ordered;
+                       btrfs_wait_ordered_range(inode, hole_start, hole_size);
+
+                       lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
+                       ordered = btrfs_lookup_ordered_extent(inode, hole_start);
+                       if (ordered) {
+                               unlock_extent(io_tree, hole_start,
+                                             block_end - 1, GFP_NOFS);
+                               btrfs_put_ordered_extent(ordered);
+                       } else {
+                               break;
+                       }
+               }
 
                trans = btrfs_start_transaction(root, 1);
                btrfs_set_trans_block_group(trans, inode);