drop unused dentry argument to ->fsync
[safe/jmp/linux-2.6] / fs / btrfs / file.c
index e5ffb66..787b50a 100644 (file)
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/string.h>
-#include <linux/smp_lock.h>
 #include <linux/backing-dev.h>
 #include <linux/mpage.h>
 #include <linux/swap.h>
 #include <linux/writeback.h>
 #include <linux/statfs.h>
 #include <linux/compat.h>
-#include <linux/version.h>
+#include <linux/slab.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
 #include "ioctl.h"
 #include "print-tree.h"
+#include "tree-log.h"
+#include "locking.h"
 #include "compat.h"
 
 
-static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
-                               struct page **prepared_pages,
-                               const char __user * buf)
+/* simple helper to fault in pages and copy.  This should go away
+ * and be replaced with calls into generic code.
+ */
+static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
+                                        int write_bytes,
+                                        struct page **prepared_pages,
+                                        struct iov_iter *i)
 {
-       long page_fault = 0;
-       int i;
+       size_t copied;
+       int pg = 0;
        int offset = pos & (PAGE_CACHE_SIZE - 1);
 
-       for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
+       while (write_bytes > 0) {
                size_t count = min_t(size_t,
                                     PAGE_CACHE_SIZE - offset, write_bytes);
-               struct page *page = prepared_pages[i];
-               fault_in_pages_readable(buf, count);
+               struct page *page = prepared_pages[pg];
+again:
+               if (unlikely(iov_iter_fault_in_readable(i, count)))
+                       return -EFAULT;
 
                /* Copy data from userspace to the current page */
-               kmap(page);
-               page_fault = __copy_from_user(page_address(page) + offset,
-                                             buf, count);
+               copied = iov_iter_copy_from_user(page, i, offset, count);
+
                /* Flush processor's dcache for this page */
                flush_dcache_page(page);
-               kunmap(page);
-               buf += count;
-               write_bytes -= count;
+               iov_iter_advance(i, copied);
+               write_bytes -= copied;
 
-               if (page_fault)
-                       break;
+               if (unlikely(copied == 0)) {
+                       count = min_t(size_t, PAGE_CACHE_SIZE - offset,
+                                     iov_iter_single_seg_count(i));
+                       goto again;
+               }
+
+               if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
+                       offset += copied;
+               } else {
+                       pg++;
+                       offset = 0;
+               }
        }
-       return page_fault ? -EFAULT : 0;
+       return 0;
 }
 
-static void btrfs_drop_pages(struct page **pages, size_t num_pages)
+/*
+ * unlocks pages after btrfs_file_write is done with them
+ */
+static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
 {
        size_t i;
        for (i = 0; i < num_pages; i++) {
                if (!pages[i])
                        break;
+               /* page checked is some magic around finding pages that
+                * have been modified without going through btrfs_set_page_dirty
+                * clear it here
+                */
                ClearPageChecked(pages[i]);
                unlock_page(pages[i]);
                mark_page_accessed(pages[i]);
@@ -82,150 +104,15 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
        }
 }
 
-static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root, struct inode *inode,
-                               u64 offset, size_t size,
-                               struct page **pages, size_t page_offset,
-                               int num_pages)
-{
-       struct btrfs_key key;
-       struct btrfs_path *path;
-       struct extent_buffer *leaf;
-       char *kaddr;
-       unsigned long ptr;
-       struct btrfs_file_extent_item *ei;
-       struct page *page;
-       u32 datasize;
-       int err = 0;
-       int ret;
-       int i;
-       ssize_t cur_size;
-
-       path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
-
-       btrfs_set_trans_block_group(trans, inode);
-
-       key.objectid = inode->i_ino;
-       key.offset = offset;
-       btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
-
-       ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
-       if (ret < 0) {
-               err = ret;
-               goto fail;
-       }
-       if (ret == 1) {
-               struct btrfs_key found_key;
-
-               if (path->slots[0] == 0)
-                       goto insert;
-
-               path->slots[0]--;
-               leaf = path->nodes[0];
-               btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-
-               if (found_key.objectid != inode->i_ino)
-                       goto insert;
-
-               if (found_key.type != BTRFS_EXTENT_DATA_KEY)
-                       goto insert;
-               ei = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-
-               if (btrfs_file_extent_type(leaf, ei) !=
-                   BTRFS_FILE_EXTENT_INLINE) {
-                       goto insert;
-               }
-               btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-               ret = 0;
-       }
-       if (ret == 0) {
-               u32 found_size;
-               u64 found_end;
-
-               leaf = path->nodes[0];
-               ei = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-
-               if (btrfs_file_extent_type(leaf, ei) !=
-                   BTRFS_FILE_EXTENT_INLINE) {
-                       err = ret;
-                       btrfs_print_leaf(root, leaf);
-                       printk("found wasn't inline offset %Lu inode %lu\n",
-                              offset, inode->i_ino);
-                       goto fail;
-               }
-               found_size = btrfs_file_extent_inline_len(leaf,
-                                         btrfs_item_nr(leaf, path->slots[0]));
-               found_end = key.offset + found_size;
-
-               if (found_end < offset + size) {
-                       btrfs_release_path(root, path);
-                       ret = btrfs_search_slot(trans, root, &key, path,
-                                               offset + size - found_end, 1);
-                       BUG_ON(ret != 0);
-
-                       ret = btrfs_extend_item(trans, root, path,
-                                               offset + size - found_end);
-                       if (ret) {
-                               err = ret;
-                               goto fail;
-                       }
-                       leaf = path->nodes[0];
-                       ei = btrfs_item_ptr(leaf, path->slots[0],
-                                           struct btrfs_file_extent_item);
-                       inode->i_blocks += (offset + size - found_end) >> 9;
-               }
-               if (found_end < offset) {
-                       ptr = btrfs_file_extent_inline_start(ei) + found_size;
-                       memset_extent_buffer(leaf, 0, ptr, offset - found_end);
-               }
-       } else {
-insert:
-               btrfs_release_path(root, path);
-               datasize = offset + size - key.offset;
-               inode->i_blocks += datasize >> 9;
-               datasize = btrfs_file_extent_calc_inline_size(datasize);
-               ret = btrfs_insert_empty_item(trans, root, path, &key,
-                                             datasize);
-               if (ret) {
-                       err = ret;
-                       printk("got bad ret %d\n", ret);
-                       goto fail;
-               }
-               leaf = path->nodes[0];
-               ei = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-               btrfs_set_file_extent_generation(leaf, ei, trans->transid);
-               btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
-       }
-       ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
-
-       cur_size = size;
-       i = 0;
-       while (size > 0) {
-               page = pages[i];
-               kaddr = kmap_atomic(page, KM_USER0);
-               cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
-               write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
-               kunmap_atomic(kaddr, KM_USER0);
-               page_offset = 0;
-               ptr += cur_size;
-               size -= cur_size;
-               if (i >= num_pages) {
-                       printk("i %d num_pages %d\n", i, num_pages);
-               }
-               i++;
-       }
-       btrfs_mark_buffer_dirty(leaf);
-fail:
-       btrfs_free_path(path);
-       return err;
-}
-
-static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
+/*
+ * after copy_from_user, pages need to be dirtied and we need to make
+ * sure holes are created between the current EOF and the start of
+ * any next extents (if required).
+ *
+ * this also makes the decision about creating an inline extent vs
+ * doing real data extents, marking pages dirty and delalloc as required.
+ */
+static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   struct file *file,
                                   struct page **pages,
@@ -236,14 +123,10 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
        int err = 0;
        int i;
        struct inode *inode = fdentry(file)->d_inode;
-       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-       u64 hint_byte;
        u64 num_bytes;
        u64 start_pos;
        u64 end_of_last_block;
        u64 end_pos = pos + write_bytes;
-       u64 inline_size;
-       int did_inline = 0;
        loff_t isize = i_size_read(inode);
 
        start_pos = pos & ~((u64)root->sectorsize - 1);
@@ -251,109 +134,32 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
                    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
 
        end_of_last_block = start_pos + num_bytes - 1;
+       err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
+                                       NULL);
+       BUG_ON(err);
 
-       lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans) {
-               err = -ENOMEM;
-               goto out_unlock;
-       }
-       btrfs_set_trans_block_group(trans, inode);
-       hint_byte = 0;
-
-       if ((end_of_last_block & 4095) == 0) {
-               printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
-       }
-       set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
-
-       /* FIXME...EIEIO, ENOSPC and more */
-       /* insert any holes we need to create */
-       if (isize < start_pos) {
-               u64 last_pos_in_file;
-               u64 hole_size;
-               u64 mask = root->sectorsize - 1;
-               last_pos_in_file = (isize + mask) & ~mask;
-               hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
-               if (hole_size > 0) {
-                       btrfs_wait_ordered_range(inode, last_pos_in_file,
-                                                last_pos_in_file + hole_size);
-                       mutex_lock(&BTRFS_I(inode)->extent_mutex);
-                       err = btrfs_drop_extents(trans, root, inode,
-                                                last_pos_in_file,
-                                                last_pos_in_file + hole_size,
-                                                last_pos_in_file,
-                                                &hint_byte);
-                       if (err)
-                               goto failed;
-
-                       err = btrfs_insert_file_extent(trans, root,
-                                                      inode->i_ino,
-                                                      last_pos_in_file,
-                                                      0, 0, hole_size, 0);
-                       btrfs_drop_extent_cache(inode, last_pos_in_file,
-                                       last_pos_in_file + hole_size -1);
-                       mutex_unlock(&BTRFS_I(inode)->extent_mutex);
-                       btrfs_check_file(root, inode);
-               }
-               if (err)
-                       goto failed;
-       }
-
-       /*
-        * either allocate an extent for the new bytes or setup the key
-        * to show we are doing inline data in the extent
-        */
-       inline_size = end_pos;
-       if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
-           inline_size > root->fs_info->max_inline ||
-           (inline_size & (root->sectorsize -1)) == 0 ||
-           inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
-               /* check for reserved extents on each page, we don't want
-                * to reset the delalloc bit on things that already have
-                * extents reserved.
-                */
-               set_extent_delalloc(io_tree, start_pos,
-                                   end_of_last_block, GFP_NOFS);
-               for (i = 0; i < num_pages; i++) {
-                       struct page *p = pages[i];
-                       SetPageUptodate(p);
-                       ClearPageChecked(p);
-                       set_page_dirty(p);
-               }
-       } else {
-               u64 aligned_end;
-               /* step one, delete the existing extents in this range */
-               aligned_end = (pos + write_bytes + root->sectorsize - 1) &
-                       ~((u64)root->sectorsize - 1);
-               mutex_lock(&BTRFS_I(inode)->extent_mutex);
-               err = btrfs_drop_extents(trans, root, inode, start_pos,
-                                        aligned_end, aligned_end, &hint_byte);
-               if (err)
-                       goto failed;
-               if (isize > inline_size)
-                       inline_size = min_t(u64, isize, aligned_end);
-               inline_size -= start_pos;
-               err = insert_inline_extent(trans, root, inode, start_pos,
-                                          inline_size, pages, 0, num_pages);
-               btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
-               BUG_ON(err);
-               mutex_unlock(&BTRFS_I(inode)->extent_mutex);
-               did_inline = 1;
+       for (i = 0; i < num_pages; i++) {
+               struct page *p = pages[i];
+               SetPageUptodate(p);
+               ClearPageChecked(p);
+               set_page_dirty(p);
        }
        if (end_pos > isize) {
                i_size_write(inode, end_pos);
-               if (did_inline)
-                       BTRFS_I(inode)->disk_i_size = end_pos;
-               btrfs_update_inode(trans, root, inode);
+               /* we've only changed i_size in ram, and we haven't updated
+                * the disk i_size.  There is no need to log the inode
+                * at this time.
+                */
        }
-failed:
-       err = btrfs_end_transaction_throttle(trans, root);
-out_unlock:
-       unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
-       return err;
+       return 0;
 }
 
-int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
+/*
+ * this drops all the extents in the cache that intersect the range
+ * [start, end].  Existing extents are split as required.
+ */
+int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+                           int skip_pinned)
 {
        struct extent_map *em;
        struct extent_map *split = NULL;
@@ -362,42 +168,58 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
        u64 len = end - start + 1;
        int ret;
        int testend = 1;
+       unsigned long flags;
+       int compressed = 0;
 
        WARN_ON(end < start);
        if (end == (u64)-1) {
                len = (u64)-1;
                testend = 0;
        }
-       while(1) {
+       while (1) {
                if (!split)
                        split = alloc_extent_map(GFP_NOFS);
                if (!split2)
                        split2 = alloc_extent_map(GFP_NOFS);
 
-               spin_lock(&em_tree->lock);
+               write_lock(&em_tree->lock);
                em = lookup_extent_mapping(em_tree, start, len);
                if (!em) {
-                       spin_unlock(&em_tree->lock);
+                       write_unlock(&em_tree->lock);
                        break;
                }
-               if (test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
-                       printk(KERN_CRIT "inode %lu trying to drop pinned "
-                              "extent start %llu end %llu, em [%llu %llu]\n",
-                              inode->i_ino,
-                              (unsigned long long)start,
-                              (unsigned long long)end,
-                              (unsigned long long)em->start,
-                              (unsigned long long)em->len);
+               flags = em->flags;
+               if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+                       if (testend && em->start + em->len >= start + len) {
+                               free_extent_map(em);
+                               write_unlock(&em_tree->lock);
+                               break;
+                       }
+                       start = em->start + em->len;
+                       if (testend)
+                               len = start + len - (em->start + em->len);
+                       free_extent_map(em);
+                       write_unlock(&em_tree->lock);
+                       continue;
                }
+               compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
+               clear_bit(EXTENT_FLAG_PINNED, &em->flags);
                remove_extent_mapping(em_tree, em);
 
                if (em->block_start < EXTENT_MAP_LAST_BYTE &&
                    em->start < start) {
                        split->start = em->start;
                        split->len = start - em->start;
+                       split->orig_start = em->orig_start;
                        split->block_start = em->block_start;
+
+                       if (compressed)
+                               split->block_len = em->block_len;
+                       else
+                               split->block_len = split->len;
+
                        split->bdev = em->bdev;
-                       split->flags = em->flags;
+                       split->flags = flags;
                        ret = add_extent_mapping(em_tree, split);
                        BUG_ON(ret);
                        free_extent_map(split);
@@ -411,16 +233,24 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
                        split->start = start + len;
                        split->len = em->start + em->len - (start + len);
                        split->bdev = em->bdev;
-                       split->flags = em->flags;
+                       split->flags = flags;
 
-                       split->block_start = em->block_start + diff;
+                       if (compressed) {
+                               split->block_len = em->block_len;
+                               split->block_start = em->block_start;
+                               split->orig_start = em->orig_start;
+                       } else {
+                               split->block_len = split->len;
+                               split->block_start = em->block_start + diff;
+                               split->orig_start = split->start;
+                       }
 
                        ret = add_extent_mapping(em_tree, split);
                        BUG_ON(ret);
                        free_extent_map(split);
                        split = NULL;
                }
-               spin_unlock(&em_tree->lock);
+               write_unlock(&em_tree->lock);
 
                /* once for us */
                free_extent_map(em);
@@ -434,80 +264,6 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
        return 0;
 }
 
-int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
-{
-       return 0;
-#if 0
-       struct btrfs_path *path;
-       struct btrfs_key found_key;
-       struct extent_buffer *leaf;
-       struct btrfs_file_extent_item *extent;
-       u64 last_offset = 0;
-       int nritems;
-       int slot;
-       int found_type;
-       int ret;
-       int err = 0;
-       u64 extent_end = 0;
-
-       path = btrfs_alloc_path();
-       ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
-                                      last_offset, 0);
-       while(1) {
-               nritems = btrfs_header_nritems(path->nodes[0]);
-               if (path->slots[0] >= nritems) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret)
-                               goto out;
-                       nritems = btrfs_header_nritems(path->nodes[0]);
-               }
-               slot = path->slots[0];
-               leaf = path->nodes[0];
-               btrfs_item_key_to_cpu(leaf, &found_key, slot);
-               if (found_key.objectid != inode->i_ino)
-                       break;
-               if (found_key.type != BTRFS_EXTENT_DATA_KEY)
-                       goto out;
-
-               if (found_key.offset < last_offset) {
-                       WARN_ON(1);
-                       btrfs_print_leaf(root, leaf);
-                       printk("inode %lu found offset %Lu expected %Lu\n",
-                              inode->i_ino, found_key.offset, last_offset);
-                       err = 1;
-                       goto out;
-               }
-               extent = btrfs_item_ptr(leaf, slot,
-                                       struct btrfs_file_extent_item);
-               found_type = btrfs_file_extent_type(leaf, extent);
-               if (found_type == BTRFS_FILE_EXTENT_REG) {
-                       extent_end = found_key.offset +
-                            btrfs_file_extent_num_bytes(leaf, extent);
-               } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
-                       struct btrfs_item *item;
-                       item = btrfs_item_nr(leaf, slot);
-                       extent_end = found_key.offset +
-                            btrfs_file_extent_inline_len(leaf, item);
-                       extent_end = (extent_end + root->sectorsize - 1) &
-                               ~((u64)root->sectorsize -1 );
-               }
-               last_offset = extent_end;
-               path->slots[0]++;
-       }
-       if (0 && last_offset < inode->i_size) {
-               WARN_ON(1);
-               btrfs_print_leaf(root, leaf);
-               printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
-                      last_offset, inode->i_size);
-               err = 1;
-
-       }
-out:
-       btrfs_free_path(path);
-       return err;
-#endif
-}
-
 /*
  * this is very complex, but the basic idea is to drop all extents
  * in the range start - end.  hint_block is filled in with a block number
@@ -517,281 +273,498 @@ out:
  * it is either truncated or split.  Anything entirely inside the range
  * is deleted from the tree.
  */
-int btrfs_drop_extents(struct btrfs_trans_handle *trans,
-                      struct btrfs_root *root, struct inode *inode,
-                      u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
+int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
+                      u64 start, u64 end, u64 *hint_byte, int drop_cache)
 {
-       u64 extent_end = 0;
-       u64 search_start = start;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_buffer *leaf;
-       struct btrfs_file_extent_item *extent;
+       struct btrfs_file_extent_item *fi;
        struct btrfs_path *path;
        struct btrfs_key key;
-       struct btrfs_file_extent_item old;
-       int keep;
-       int slot;
-       int bookend;
-       int found_type;
-       int found_extent;
-       int found_inline;
+       struct btrfs_key new_key;
+       u64 search_start = start;
+       u64 disk_bytenr = 0;
+       u64 num_bytes = 0;
+       u64 extent_offset = 0;
+       u64 extent_end = 0;
+       int del_nr = 0;
+       int del_slot = 0;
+       int extent_type;
        int recow;
        int ret;
 
-       btrfs_drop_extent_cache(inode, start, end - 1);
+       if (drop_cache)
+               btrfs_drop_extent_cache(inode, start, end - 1, 0);
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
-       while(1) {
+
+       while (1) {
                recow = 0;
-               btrfs_release_path(root, path);
                ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
                                               search_start, -1);
                if (ret < 0)
-                       goto out;
-               if (ret > 0) {
-                       if (path->slots[0] == 0) {
-                               ret = 0;
-                               goto out;
-                       }
-                       path->slots[0]--;
+                       break;
+               if (ret > 0 && path->slots[0] > 0 && search_start == start) {
+                       leaf = path->nodes[0];
+                       btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
+                       if (key.objectid == inode->i_ino &&
+                           key.type == BTRFS_EXTENT_DATA_KEY)
+                               path->slots[0]--;
                }
+               ret = 0;
 next_slot:
-               keep = 0;
-               bookend = 0;
-               found_extent = 0;
-               found_inline = 0;
-               extent = NULL;
                leaf = path->nodes[0];
-               slot = path->slots[0];
-               ret = 0;
-               btrfs_item_key_to_cpu(leaf, &key, slot);
-               if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
-                   key.offset >= end) {
-                       goto out;
-               }
-               if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
-                   key.objectid != inode->i_ino) {
-                       goto out;
-               }
-               if (recow) {
-                       search_start = key.offset;
-                       continue;
-               }
-               if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
-                       extent = btrfs_item_ptr(leaf, slot,
-                                               struct btrfs_file_extent_item);
-                       found_type = btrfs_file_extent_type(leaf, extent);
-                       if (found_type == BTRFS_FILE_EXTENT_REG) {
-                               extent_end =
-                                    btrfs_file_extent_disk_bytenr(leaf,
-                                                                  extent);
-                               if (extent_end)
-                                       *hint_byte = extent_end;
-
-                               extent_end = key.offset +
-                                    btrfs_file_extent_num_bytes(leaf, extent);
-                               found_extent = 1;
-                       } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
-                               struct btrfs_item *item;
-                               item = btrfs_item_nr(leaf, slot);
-                               found_inline = 1;
-                               extent_end = key.offset +
-                                    btrfs_file_extent_inline_len(leaf, item);
+               if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+                       BUG_ON(del_nr > 0);
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               break;
+                       if (ret > 0) {
+                               ret = 0;
+                               break;
                        }
+                       leaf = path->nodes[0];
+                       recow = 1;
+               }
+
+               btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+               if (key.objectid > inode->i_ino ||
+                   key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
+                       break;
+
+               fi = btrfs_item_ptr(leaf, path->slots[0],
+                                   struct btrfs_file_extent_item);
+               extent_type = btrfs_file_extent_type(leaf, fi);
+
+               if (extent_type == BTRFS_FILE_EXTENT_REG ||
+                   extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
+                       disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+                       num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+                       extent_offset = btrfs_file_extent_offset(leaf, fi);
+                       extent_end = key.offset +
+                               btrfs_file_extent_num_bytes(leaf, fi);
+               } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+                       extent_end = key.offset +
+                               btrfs_file_extent_inline_len(leaf, fi);
                } else {
+                       WARN_ON(1);
                        extent_end = search_start;
                }
 
-               /* we found nothing we can drop */
-               if ((!found_extent && !found_inline) ||
-                   search_start >= extent_end) {
-                       int nextret;
-                       u32 nritems;
-                       nritems = btrfs_header_nritems(leaf);
-                       if (slot >= nritems - 1) {
-                               nextret = btrfs_next_leaf(root, path);
-                               if (nextret)
-                                       goto out;
-                               recow = 1;
-                       } else {
-                               path->slots[0]++;
-                       }
+               if (extent_end <= search_start) {
+                       path->slots[0]++;
                        goto next_slot;
                }
 
-               if (found_inline) {
-                       u64 mask = root->sectorsize - 1;
-                       search_start = (extent_end + mask) & ~mask;
-               } else
-                       search_start = extent_end;
-               if (end <= extent_end && start >= key.offset && found_inline) {
-                       *hint_byte = EXTENT_MAP_INLINE;
+               search_start = max(key.offset, start);
+               if (recow) {
+                       btrfs_release_path(root, path);
                        continue;
                }
-               if (end < extent_end && end >= key.offset) {
-                       if (found_extent) {
-                               u64 disk_bytenr =
-                                   btrfs_file_extent_disk_bytenr(leaf, extent);
-                               u64 disk_num_bytes =
-                                   btrfs_file_extent_disk_num_bytes(leaf,
-                                                                     extent);
-                               read_extent_buffer(leaf, &old,
-                                                  (unsigned long)extent,
-                                                  sizeof(old));
-                               if (disk_bytenr != 0) {
-                                       ret = btrfs_inc_extent_ref(trans, root,
-                                                disk_bytenr, disk_num_bytes,
-                                                root->root_key.objectid,
-                                                trans->transid,
-                                                key.objectid, end);
-                                       BUG_ON(ret);
-                               }
+
+               /*
+                *     | - range to drop - |
+                *  | -------- extent -------- |
+                */
+               if (start > key.offset && end < extent_end) {
+                       BUG_ON(del_nr > 0);
+                       BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+
+                       memcpy(&new_key, &key, sizeof(new_key));
+                       new_key.offset = start;
+                       ret = btrfs_duplicate_item(trans, root, path,
+                                                  &new_key);
+                       if (ret == -EAGAIN) {
+                               btrfs_release_path(root, path);
+                               continue;
                        }
-                       bookend = 1;
-                       if (found_inline && start <= key.offset)
-                               keep = 1;
+                       if (ret < 0)
+                               break;
+
+                       leaf = path->nodes[0];
+                       fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+                                           struct btrfs_file_extent_item);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       start - key.offset);
+
+                       fi = btrfs_item_ptr(leaf, path->slots[0],
+                                           struct btrfs_file_extent_item);
+
+                       extent_offset += start - key.offset;
+                       btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       extent_end - start);
+                       btrfs_mark_buffer_dirty(leaf);
+
+                       if (disk_bytenr > 0) {
+                               ret = btrfs_inc_extent_ref(trans, root,
+                                               disk_bytenr, num_bytes, 0,
+                                               root->root_key.objectid,
+                                               new_key.objectid,
+                                               start - extent_offset);
+                               BUG_ON(ret);
+                               *hint_byte = disk_bytenr;
+                       }
+                       key.offset = start;
                }
-               /* truncate existing extent */
-               if (start > key.offset) {
-                       u64 new_num;
-                       u64 old_num;
-                       keep = 1;
-                       WARN_ON(start & (root->sectorsize - 1));
-                       if (found_extent) {
-                               new_num = start - key.offset;
-                               old_num = btrfs_file_extent_num_bytes(leaf,
-                                                                     extent);
-                               *hint_byte =
-                                       btrfs_file_extent_disk_bytenr(leaf,
-                                                                     extent);
-                               if (btrfs_file_extent_disk_bytenr(leaf,
-                                                                 extent)) {
-                                       dec_i_blocks(inode, old_num - new_num);
-                               }
-                               btrfs_set_file_extent_num_bytes(leaf, extent,
-                                                               new_num);
-                               btrfs_mark_buffer_dirty(leaf);
-                       } else if (key.offset < inline_limit &&
-                                  (end > extent_end) &&
-                                  (inline_limit < extent_end)) {
-                               u32 new_size;
-                               new_size = btrfs_file_extent_calc_inline_size(
-                                                  inline_limit - key.offset);
-                               dec_i_blocks(inode, (extent_end - key.offset) -
-                                       (inline_limit - key.offset));
-                               btrfs_truncate_item(trans, root, path,
-                                                   new_size, 1);
+               /*
+                *  | ---- range to drop ----- |
+                *      | -------- extent -------- |
+                */
+               if (start <= key.offset && end < extent_end) {
+                       BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+
+                       memcpy(&new_key, &key, sizeof(new_key));
+                       new_key.offset = end;
+                       btrfs_set_item_key_safe(trans, root, path, &new_key);
+
+                       extent_offset += end - key.offset;
+                       btrfs_set_file_extent_offset(leaf, fi, extent_offset);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       extent_end - end);
+                       btrfs_mark_buffer_dirty(leaf);
+                       if (disk_bytenr > 0) {
+                               inode_sub_bytes(inode, end - key.offset);
+                               *hint_byte = disk_bytenr;
                        }
+                       break;
                }
-               /* delete the entire extent */
-               if (!keep) {
-                       u64 disk_bytenr = 0;
-                       u64 disk_num_bytes = 0;
-                       u64 extent_num_bytes = 0;
-                       u64 root_gen;
-                       u64 root_owner;
-
-                       root_gen = btrfs_header_generation(leaf);
-                       root_owner = btrfs_header_owner(leaf);
-                       if (found_extent) {
-                               disk_bytenr =
-                                     btrfs_file_extent_disk_bytenr(leaf,
-                                                                    extent);
-                               disk_num_bytes =
-                                     btrfs_file_extent_disk_num_bytes(leaf,
-                                                                      extent);
-                               extent_num_bytes =
-                                     btrfs_file_extent_num_bytes(leaf, extent);
-                               *hint_byte =
-                                       btrfs_file_extent_disk_bytenr(leaf,
-                                                                     extent);
+
+               search_start = extent_end;
+               /*
+                *       | ---- range to drop ----- |
+                *  | -------- extent -------- |
+                */
+               if (start > key.offset && end >= extent_end) {
+                       BUG_ON(del_nr > 0);
+                       BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
+
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       start - key.offset);
+                       btrfs_mark_buffer_dirty(leaf);
+                       if (disk_bytenr > 0) {
+                               inode_sub_bytes(inode, extent_end - start);
+                               *hint_byte = disk_bytenr;
                        }
-                       ret = btrfs_del_item(trans, root, path);
-                       /* TODO update progress marker and return */
-                       BUG_ON(ret);
-                       btrfs_release_path(root, path);
-                       extent = NULL;
-                       if (found_extent && disk_bytenr != 0) {
-                               dec_i_blocks(inode, extent_num_bytes);
-                               ret = btrfs_free_extent(trans, root,
-                                               disk_bytenr,
-                                               disk_num_bytes,
-                                               root_owner,
-                                               root_gen, inode->i_ino,
-                                               key.offset, 0);
+                       if (end == extent_end)
+                               break;
+
+                       path->slots[0]++;
+                       goto next_slot;
+               }
+
+               /*
+                *  | ---- range to drop ----- |
+                *    | ------ extent ------ |
+                */
+               if (start <= key.offset && end >= extent_end) {
+                       if (del_nr == 0) {
+                               del_slot = path->slots[0];
+                               del_nr = 1;
+                       } else {
+                               BUG_ON(del_slot + del_nr != path->slots[0]);
+                               del_nr++;
                        }
 
-                       BUG_ON(ret);
-                       if (!bookend && search_start >= end) {
-                               ret = 0;
-                               goto out;
+                       if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
+                               inode_sub_bytes(inode,
+                                               extent_end - key.offset);
+                               extent_end = ALIGN(extent_end,
+                                                  root->sectorsize);
+                       } else if (disk_bytenr > 0) {
+                               ret = btrfs_free_extent(trans, root,
+                                               disk_bytenr, num_bytes, 0,
+                                               root->root_key.objectid,
+                                               key.objectid, key.offset -
+                                               extent_offset);
+                               BUG_ON(ret);
+                               inode_sub_bytes(inode,
+                                               extent_end - key.offset);
+                               *hint_byte = disk_bytenr;
                        }
-                       if (!bookend)
-                               continue;
-               }
-               if (bookend && found_inline && start <= key.offset) {
-                       u32 new_size;
-                       new_size = btrfs_file_extent_calc_inline_size(
-                                                  extent_end - end);
-                       dec_i_blocks(inode, (extent_end - key.offset) -
-                                       (extent_end - end));
-                       btrfs_truncate_item(trans, root, path, new_size, 0);
-               }
-               /* create bookend, splitting the extent in two */
-               if (bookend && found_extent) {
-                       struct btrfs_key ins;
-                       ins.objectid = inode->i_ino;
-                       ins.offset = end;
-                       btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
-                       btrfs_release_path(root, path);
-                       ret = btrfs_insert_empty_item(trans, root, path, &ins,
-                                                     sizeof(*extent));
 
-                       leaf = path->nodes[0];
-                       if (ret) {
-                               btrfs_print_leaf(root, leaf);
-                               printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
+                       if (end == extent_end)
+                               break;
+
+                       if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
+                               path->slots[0]++;
+                               goto next_slot;
                        }
+
+                       ret = btrfs_del_items(trans, root, path, del_slot,
+                                             del_nr);
                        BUG_ON(ret);
-                       extent = btrfs_item_ptr(leaf, path->slots[0],
-                                               struct btrfs_file_extent_item);
-                       write_extent_buffer(leaf, &old,
-                                           (unsigned long)extent, sizeof(old));
-
-                       btrfs_set_file_extent_offset(leaf, extent,
-                                   le64_to_cpu(old.offset) + end - key.offset);
-                       WARN_ON(le64_to_cpu(old.num_bytes) <
-                               (extent_end - end));
-                       btrfs_set_file_extent_num_bytes(leaf, extent,
+
+                       del_nr = 0;
+                       del_slot = 0;
+
+                       btrfs_release_path(root, path);
+                       continue;
+               }
+
+               BUG_ON(1);
+       }
+
+       if (del_nr > 0) {
+               ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+               BUG_ON(ret);
+       }
+
+       btrfs_free_path(path);
+       return ret;
+}
+
+static int extent_mergeable(struct extent_buffer *leaf, int slot,
+                           u64 objectid, u64 bytenr, u64 orig_offset,
+                           u64 *start, u64 *end)
+{
+       struct btrfs_file_extent_item *fi;
+       struct btrfs_key key;
+       u64 extent_end;
+
+       if (slot < 0 || slot >= btrfs_header_nritems(leaf))
+               return 0;
+
+       btrfs_item_key_to_cpu(leaf, &key, slot);
+       if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
+               return 0;
+
+       fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+       if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
+           btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
+           btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
+           btrfs_file_extent_compression(leaf, fi) ||
+           btrfs_file_extent_encryption(leaf, fi) ||
+           btrfs_file_extent_other_encoding(leaf, fi))
+               return 0;
+
+       extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
+       if ((*start && *start != key.offset) || (*end && *end != extent_end))
+               return 0;
+
+       *start = key.offset;
+       *end = extent_end;
+       return 1;
+}
+
+/*
+ * Mark extent in the range start - end as written.
+ *
+ * This changes extent type from 'pre-allocated' to 'regular'. If only
+ * part of extent is marked as written, the extent will be split into
+ * two or three.
+ */
+int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
+                             struct inode *inode, u64 start, u64 end)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct extent_buffer *leaf;
+       struct btrfs_path *path;
+       struct btrfs_file_extent_item *fi;
+       struct btrfs_key key;
+       struct btrfs_key new_key;
+       u64 bytenr;
+       u64 num_bytes;
+       u64 extent_end;
+       u64 orig_offset;
+       u64 other_start;
+       u64 other_end;
+       u64 split;
+       int del_nr = 0;
+       int del_slot = 0;
+       int recow;
+       int ret;
+
+       btrfs_drop_extent_cache(inode, start, end - 1, 0);
+
+       path = btrfs_alloc_path();
+       BUG_ON(!path);
+again:
+       recow = 0;
+       split = start;
+       key.objectid = inode->i_ino;
+       key.type = BTRFS_EXTENT_DATA_KEY;
+       key.offset = split;
+
+       ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+       if (ret > 0 && path->slots[0] > 0)
+               path->slots[0]--;
+
+       leaf = path->nodes[0];
+       btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+       BUG_ON(key.objectid != inode->i_ino ||
+              key.type != BTRFS_EXTENT_DATA_KEY);
+       fi = btrfs_item_ptr(leaf, path->slots[0],
+                           struct btrfs_file_extent_item);
+       BUG_ON(btrfs_file_extent_type(leaf, fi) !=
+              BTRFS_FILE_EXTENT_PREALLOC);
+       extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
+       BUG_ON(key.offset > start || extent_end < end);
+
+       bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
+       num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
+       orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
+       memcpy(&new_key, &key, sizeof(new_key));
+
+       if (start == key.offset && end < extent_end) {
+               other_start = 0;
+               other_end = start;
+               if (extent_mergeable(leaf, path->slots[0] - 1,
+                                    inode->i_ino, bytenr, orig_offset,
+                                    &other_start, &other_end)) {
+                       new_key.offset = end;
+                       btrfs_set_item_key_safe(trans, root, path, &new_key);
+                       fi = btrfs_item_ptr(leaf, path->slots[0],
+                                           struct btrfs_file_extent_item);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
                                                        extent_end - end);
-                       btrfs_set_file_extent_type(leaf, extent,
-                                                  BTRFS_FILE_EXTENT_REG);
-
-                       btrfs_mark_buffer_dirty(path->nodes[0]);
-                       if (le64_to_cpu(old.disk_bytenr) != 0) {
-                               inode->i_blocks +=
-                                     btrfs_file_extent_num_bytes(leaf,
-                                                                 extent) >> 9;
-                       }
-                       ret = 0;
+                       btrfs_set_file_extent_offset(leaf, fi,
+                                                    end - orig_offset);
+                       fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+                                           struct btrfs_file_extent_item);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       end - other_start);
+                       btrfs_mark_buffer_dirty(leaf);
                        goto out;
                }
        }
+
+       if (start > key.offset && end == extent_end) {
+               other_start = end;
+               other_end = 0;
+               if (extent_mergeable(leaf, path->slots[0] + 1,
+                                    inode->i_ino, bytenr, orig_offset,
+                                    &other_start, &other_end)) {
+                       fi = btrfs_item_ptr(leaf, path->slots[0],
+                                           struct btrfs_file_extent_item);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       start - key.offset);
+                       path->slots[0]++;
+                       new_key.offset = start;
+                       btrfs_set_item_key_safe(trans, root, path, &new_key);
+
+                       fi = btrfs_item_ptr(leaf, path->slots[0],
+                                           struct btrfs_file_extent_item);
+                       btrfs_set_file_extent_num_bytes(leaf, fi,
+                                                       other_end - start);
+                       btrfs_set_file_extent_offset(leaf, fi,
+                                                    start - orig_offset);
+                       btrfs_mark_buffer_dirty(leaf);
+                       goto out;
+               }
+       }
+
+       while (start > key.offset || end < extent_end) {
+               if (key.offset == start)
+                       split = end;
+
+               new_key.offset = split;
+               ret = btrfs_duplicate_item(trans, root, path, &new_key);
+               if (ret == -EAGAIN) {
+                       btrfs_release_path(root, path);
+                       goto again;
+               }
+               BUG_ON(ret < 0);
+
+               leaf = path->nodes[0];
+               fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+                                   struct btrfs_file_extent_item);
+               btrfs_set_file_extent_num_bytes(leaf, fi,
+                                               split - key.offset);
+
+               fi = btrfs_item_ptr(leaf, path->slots[0],
+                                   struct btrfs_file_extent_item);
+
+               btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
+               btrfs_set_file_extent_num_bytes(leaf, fi,
+                                               extent_end - split);
+               btrfs_mark_buffer_dirty(leaf);
+
+               ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
+                                          root->root_key.objectid,
+                                          inode->i_ino, orig_offset);
+               BUG_ON(ret);
+
+               if (split == start) {
+                       key.offset = start;
+               } else {
+                       BUG_ON(start != key.offset);
+                       path->slots[0]--;
+                       extent_end = end;
+               }
+               recow = 1;
+       }
+
+       other_start = end;
+       other_end = 0;
+       if (extent_mergeable(leaf, path->slots[0] + 1,
+                            inode->i_ino, bytenr, orig_offset,
+                            &other_start, &other_end)) {
+               if (recow) {
+                       btrfs_release_path(root, path);
+                       goto again;
+               }
+               extent_end = other_end;
+               del_slot = path->slots[0] + 1;
+               del_nr++;
+               ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+                                       0, root->root_key.objectid,
+                                       inode->i_ino, orig_offset);
+               BUG_ON(ret);
+       }
+       other_start = 0;
+       other_end = start;
+       if (extent_mergeable(leaf, path->slots[0] - 1,
+                            inode->i_ino, bytenr, orig_offset,
+                            &other_start, &other_end)) {
+               if (recow) {
+                       btrfs_release_path(root, path);
+                       goto again;
+               }
+               key.offset = other_start;
+               del_slot = path->slots[0];
+               del_nr++;
+               ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
+                                       0, root->root_key.objectid,
+                                       inode->i_ino, orig_offset);
+               BUG_ON(ret);
+       }
+       if (del_nr == 0) {
+               fi = btrfs_item_ptr(leaf, path->slots[0],
+                          struct btrfs_file_extent_item);
+               btrfs_set_file_extent_type(leaf, fi,
+                                          BTRFS_FILE_EXTENT_REG);
+               btrfs_mark_buffer_dirty(leaf);
+       } else {
+               fi = btrfs_item_ptr(leaf, del_slot - 1,
+                          struct btrfs_file_extent_item);
+               btrfs_set_file_extent_type(leaf, fi,
+                                          BTRFS_FILE_EXTENT_REG);
+               btrfs_set_file_extent_num_bytes(leaf, fi,
+                                               extent_end - key.offset);
+               btrfs_mark_buffer_dirty(leaf);
+
+               ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+               BUG_ON(ret);
+       }
 out:
        btrfs_free_path(path);
-       btrfs_check_file(root, inode);
-       return ret;
+       return 0;
 }
 
 /*
- * this gets pages into the page cache and locks them down
+ * this gets pages into the page cache and locks them down, it also properly
+ * waits for data=ordered extents to finish before allowing the pages to be
+ * modified.
  */
-static int prepare_pages(struct btrfs_root *root, struct file *file,
+static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
                         struct page **pages, size_t num_pages,
                         loff_t pos, unsigned long first_index,
                         unsigned long last_index, size_t write_bytes)
 {
+       struct extent_state *cached_state = NULL;
        int i;
        unsigned long index = pos >> PAGE_CACHE_SHIFT;
        struct inode *inode = fdentry(file)->d_inode;
@@ -802,6 +775,12 @@ static int prepare_pages(struct btrfs_root *root, struct file *file,
        start_pos = pos & ~((u64)root->sectorsize - 1);
        last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
 
+       if (start_pos > inode->i_size) {
+               err = btrfs_cont_expand(inode, start_pos);
+               if (err)
+                       return err;
+       }
+
        memset(pages, 0, num_pages * sizeof(struct page *));
 again:
        for (i = 0; i < num_pages; i++) {
@@ -814,15 +793,18 @@ again:
        }
        if (start_pos < inode->i_size) {
                struct btrfs_ordered_extent *ordered;
-               lock_extent(&BTRFS_I(inode)->io_tree,
-                           start_pos, last_pos - 1, GFP_NOFS);
-               ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
+               lock_extent_bits(&BTRFS_I(inode)->io_tree,
+                                start_pos, last_pos - 1, 0, &cached_state,
+                                GFP_NOFS);
+               ordered = btrfs_lookup_first_ordered_extent(inode,
+                                                           last_pos - 1);
                if (ordered &&
                    ordered->file_offset + ordered->len > start_pos &&
                    ordered->file_offset < last_pos) {
                        btrfs_put_ordered_extent(ordered);
-                       unlock_extent(&BTRFS_I(inode)->io_tree,
-                                     start_pos, last_pos - 1, GFP_NOFS);
+                       unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+                                            start_pos, last_pos - 1,
+                                            &cached_state, GFP_NOFS);
                        for (i = 0; i < num_pages; i++) {
                                unlock_page(pages[i]);
                                page_cache_release(pages[i]);
@@ -834,77 +816,122 @@ again:
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
 
-               clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
-                                 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
+               clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
+                                 last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
+                                 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
                                  GFP_NOFS);
-               unlock_extent(&BTRFS_I(inode)->io_tree,
-                             start_pos, last_pos - 1, GFP_NOFS);
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+                                    start_pos, last_pos - 1, &cached_state,
+                                    GFP_NOFS);
        }
        for (i = 0; i < num_pages; i++) {
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-               ClearPageDirty(pages[i]);
-#else
-               cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
-#endif
+               clear_page_dirty_for_io(pages[i]);
                set_page_extent_mapped(pages[i]);
                WARN_ON(!PageLocked(pages[i]));
        }
        return 0;
 }
 
-static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
-                               size_t count, loff_t *ppos)
+static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
+                                   const struct iovec *iov,
+                                   unsigned long nr_segs, loff_t pos)
 {
-       loff_t pos;
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = fdentry(file)->d_inode;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct page *pinned[2];
+       struct page **pages = NULL;
+       struct iov_iter i;
+       loff_t *ppos = &iocb->ki_pos;
        loff_t start_pos;
        ssize_t num_written = 0;
        ssize_t err = 0;
+       size_t count;
+       size_t ocount;
        int ret = 0;
-       struct inode *inode = fdentry(file)->d_inode;
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct page **pages = NULL;
        int nrptrs;
-       struct page *pinned[2];
        unsigned long first_index;
        unsigned long last_index;
+       int will_write;
+       int buffered = 0;
+
+       will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
+                     (file->f_flags & O_DIRECT));
 
-       nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
-                    PAGE_CACHE_SIZE / (sizeof(struct page *)));
        pinned[0] = NULL;
        pinned[1] = NULL;
 
-       pos = *ppos;
        start_pos = pos;
 
        vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+       mutex_lock(&inode->i_mutex);
+
+       err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
+       if (err)
+               goto out;
+       count = ocount;
+
        current->backing_dev_info = inode->i_mapping->backing_dev_info;
        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
        if (err)
-               goto out_nolock;
+               goto out;
+
        if (count == 0)
-               goto out_nolock;
-#ifdef REMOVE_SUID_PATH
-       err = remove_suid(&file->f_path);
-#else
-       err = remove_suid(fdentry(file));
-#endif
+               goto out;
+
+       err = file_remove_suid(file);
        if (err)
-               goto out_nolock;
+               goto out;
+
        file_update_time(file);
+       BTRFS_I(inode)->sequence++;
+
+       if (unlikely(file->f_flags & O_DIRECT)) {
+               num_written = generic_file_direct_write(iocb, iov, &nr_segs,
+                                                       pos, ppos, count,
+                                                       ocount);
+               /*
+                * the generic O_DIRECT will update in-memory i_size after the
+                * DIOs are done.  But our endio handlers that update the on
+                * disk i_size never update past the in memory i_size.  So we
+                * need one more update here to catch any additions to the
+                * file
+                */
+               if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
+                       btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
+                       mark_inode_dirty(inode);
+               }
+
+               if (num_written < 0) {
+                       ret = num_written;
+                       num_written = 0;
+                       goto out;
+               } else if (num_written == count) {
+                       /* pick up pos changes done by the generic code */
+                       pos = *ppos;
+                       goto out;
+               }
+               /*
+                * We are going to do buffered for the rest of the range, so we
+                * need to make sure to invalidate the buffered pages when we're
+                * done.
+                */
+               buffered = 1;
+               pos += num_written;
+       }
 
+       iov_iter_init(&i, iov, nr_segs, count, num_written);
+       nrptrs = min((iov_iter_count(&i) + PAGE_CACHE_SIZE - 1) /
+                    PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
+                    (sizeof(struct page *)));
        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
 
-       mutex_lock(&inode->i_mutex);
-       first_index = pos >> PAGE_CACHE_SHIFT;
-       last_index = (pos + count) >> PAGE_CACHE_SHIFT;
+       /* generic_write_checks can change our pos */
+       start_pos = pos;
 
-       /*
-        * if this is a nodatasum mount, force summing off for the inode
-        * all the time.  That way a later mount with summing on won't
-        * get confused
-        */
-       if (btrfs_test_opt(root, NODATASUM))
-               btrfs_set_flag(inode, NODATASUM);
+       first_index = pos >> PAGE_CACHE_SHIFT;
+       last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
 
        /*
         * there are lots of better ways to do this, but this code
@@ -921,7 +948,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
                        unlock_page(pinned[0]);
                }
        }
-       if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
+       if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
                pinned[1] = grab_cache_page(inode->i_mapping, last_index);
                if (!PageUptodate(pinned[1])) {
                        ret = btrfs_readpage(NULL, pinned[1]);
@@ -932,54 +959,64 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
                }
        }
 
-       while(count > 0) {
+       while (iov_iter_count(&i) > 0) {
                size_t offset = pos & (PAGE_CACHE_SIZE - 1);
-               size_t write_bytes = min(count, nrptrs *
-                                       (size_t)PAGE_CACHE_SIZE -
+               size_t write_bytes = min(iov_iter_count(&i),
+                                        nrptrs * (size_t)PAGE_CACHE_SIZE -
                                         offset);
                size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
                                        PAGE_CACHE_SHIFT;
 
                WARN_ON(num_pages > nrptrs);
-               memset(pages, 0, sizeof(pages));
+               memset(pages, 0, sizeof(struct page *) * nrptrs);
 
-               ret = btrfs_check_free_space(root, write_bytes, 0);
+               ret = btrfs_delalloc_reserve_space(inode, write_bytes);
                if (ret)
                        goto out;
 
                ret = prepare_pages(root, file, pages, num_pages,
                                    pos, first_index, last_index,
                                    write_bytes);
-               if (ret)
+               if (ret) {
+                       btrfs_delalloc_release_space(inode, write_bytes);
                        goto out;
+               }
 
                ret = btrfs_copy_from_user(pos, num_pages,
-                                          write_bytes, pages, buf);
-               if (ret) {
-                       btrfs_drop_pages(pages, num_pages);
-                       goto out;
+                                          write_bytes, pages, &i);
+               if (ret == 0) {
+                       dirty_and_release_pages(NULL, root, file, pages,
+                                               num_pages, pos, write_bytes);
                }
 
-               ret = dirty_and_release_pages(NULL, root, file, pages,
-                                             num_pages, pos, write_bytes);
                btrfs_drop_pages(pages, num_pages);
-               if (ret)
+               if (ret) {
+                       btrfs_delalloc_release_space(inode, write_bytes);
                        goto out;
+               }
+
+               if (will_write) {
+                       filemap_fdatawrite_range(inode->i_mapping, pos,
+                                                pos + write_bytes - 1);
+               } else {
+                       balance_dirty_pages_ratelimited_nr(inode->i_mapping,
+                                                          num_pages);
+                       if (num_pages <
+                           (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
+                               btrfs_btree_balance_dirty(root, 1);
+                       btrfs_throttle(root);
+               }
 
-               buf += write_bytes;
-               count -= write_bytes;
                pos += write_bytes;
                num_written += write_bytes;
 
-               balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
-               if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
-                       btrfs_btree_balance_dirty(root, 1);
                cond_resched();
        }
 out:
        mutex_unlock(&inode->i_mutex);
+       if (ret)
+               err = ret;
 
-out_nolock:
        kfree(pages);
        if (pinned[0])
                page_cache_release(pinned[0]);
@@ -987,46 +1024,98 @@ out_nolock:
                page_cache_release(pinned[1]);
        *ppos = pos;
 
-       if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-               err = sync_page_range(inode, inode->i_mapping,
-                                     start_pos, num_written);
-               if (err < 0)
+       /*
+        * we want to make sure fsync finds this change
+        * but we haven't joined a transaction running right now.
+        *
+        * Later on, someone is sure to update the inode and get the
+        * real transid recorded.
+        *
+        * We set last_trans now to the fs_info generation + 1,
+        * this will either be one more than the running transaction
+        * or the generation used for the next transaction if there isn't
+        * one running right now.
+        */
+       BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
+
+       if (num_written > 0 && will_write) {
+               struct btrfs_trans_handle *trans;
+
+               err = btrfs_wait_ordered_range(inode, start_pos, num_written);
+               if (err)
                        num_written = err;
-       } else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
-               do_sync_file_range(file, start_pos,
-                                     start_pos + num_written - 1,
-                                     SYNC_FILE_RANGE_WRITE |
-                                     SYNC_FILE_RANGE_WAIT_AFTER);
-#else
-               do_sync_mapping_range(inode->i_mapping, start_pos,
-                                     start_pos + num_written - 1,
-                                     SYNC_FILE_RANGE_WRITE |
-                                     SYNC_FILE_RANGE_WAIT_AFTER);
-#endif
-               invalidate_mapping_pages(inode->i_mapping,
-                     start_pos >> PAGE_CACHE_SHIFT,
-                    (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
+
+               if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
+                       trans = btrfs_start_transaction(root, 0);
+                       ret = btrfs_log_dentry_safe(trans, root,
+                                                   file->f_dentry);
+                       if (ret == 0) {
+                               ret = btrfs_sync_log(trans, root);
+                               if (ret == 0)
+                                       btrfs_end_transaction(trans, root);
+                               else
+                                       btrfs_commit_transaction(trans, root);
+                       } else if (ret != BTRFS_NO_LOG_SYNC) {
+                               btrfs_commit_transaction(trans, root);
+                       } else {
+                               btrfs_end_transaction(trans, root);
+                       }
+               }
+               if (file->f_flags & O_DIRECT && buffered) {
+                       invalidate_mapping_pages(inode->i_mapping,
+                             start_pos >> PAGE_CACHE_SHIFT,
+                            (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
+               }
        }
        current->backing_dev_info = NULL;
        return num_written ? num_written : err;
 }
 
-int btrfs_release_file(struct inode * inode, struct file * filp)
+int btrfs_release_file(struct inode *inode, struct file *filp)
 {
+       /*
+        * ordered_data_close is set by settattr when we are about to truncate
+        * a file from a non-zero size to a zero size.  This tries to
+        * flush down new bytes that may have been written if the
+        * application were using truncate to replace a file in place.
+        */
+       if (BTRFS_I(inode)->ordered_data_close) {
+               BTRFS_I(inode)->ordered_data_close = 0;
+               btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
+               if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
+                       filemap_flush(inode->i_mapping);
+       }
        if (filp->private_data)
                btrfs_ioctl_trans_end(filp);
        return 0;
 }
 
-static int btrfs_sync_file(struct file *file,
-                          struct dentry *dentry, int datasync)
+/*
+ * fsync call for both files and directories.  This logs the inode into
+ * the tree log instead of forcing full commits whenever possible.
+ *
+ * It needs to call filemap_fdatawait so that all ordered extent updates are
+ * in the metadata btree are up to date for copying to the log.
+ *
+ * It drops the inode mutex before doing the tree log commit.  This is an
+ * important optimization for directories because holding the mutex prevents
+ * new operations on the dir while we write to disk.
+ */
+int btrfs_sync_file(struct file *file, int datasync)
 {
+       struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = dentry->d_inode;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret = 0;
        struct btrfs_trans_handle *trans;
 
+
+       /* we wait first, since the writeback may change the inode */
+       root->log_batch++;
+       /* the VFS called filemap_fdatawrite for us */
+       btrfs_wait_ordered_range(inode, 0, (u64)-1);
+       root->log_batch++;
+
        /*
         * check the transaction that last modified this inode
         * and see if its already been committed
@@ -1034,6 +1123,11 @@ static int btrfs_sync_file(struct file *file,
        if (!BTRFS_I(inode)->last_trans)
                goto out;
 
+       /*
+        * if the last transaction that changed this file was before
+        * the current transaction, we can bail out now without any
+        * syncing
+        */
        mutex_lock(&root->fs_info->trans_mutex);
        if (BTRFS_I(inode)->last_trans <=
            root->fs_info->last_trans_committed) {
@@ -1046,26 +1140,51 @@ static int btrfs_sync_file(struct file *file,
        /*
         * ok we haven't committed the transaction yet, lets do a commit
         */
-       if (file->private_data)
+       if (file && file->private_data)
                btrfs_ioctl_trans_end(file);
 
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans) {
-               ret = -ENOMEM;
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out;
+       }
+
+       ret = btrfs_log_dentry_safe(trans, root, dentry);
+       if (ret < 0)
                goto out;
+
+       /* we've logged all the items and now have a consistent
+        * version of the file in the log.  It is possible that
+        * someone will come in and modify the file, but that's
+        * fine because the log is consistent on disk, and we
+        * have references to all of the file's extents
+        *
+        * It is possible that someone will come in and log the
+        * file again, but that will end up using the synchronization
+        * inside btrfs_sync_log to keep things safe.
+        */
+       mutex_unlock(&dentry->d_inode->i_mutex);
+
+       if (ret != BTRFS_NO_LOG_SYNC) {
+               if (ret > 0) {
+                       ret = btrfs_commit_transaction(trans, root);
+               } else {
+                       ret = btrfs_sync_log(trans, root);
+                       if (ret == 0)
+                               ret = btrfs_end_transaction(trans, root);
+                       else
+                               ret = btrfs_commit_transaction(trans, root);
+               }
+       } else {
+               ret = btrfs_end_transaction(trans, root);
        }
-       ret = btrfs_commit_transaction(trans, root);
+       mutex_lock(&dentry->d_inode->i_mutex);
 out:
-       return ret > 0 ? EIO : ret;
+       return ret > 0 ? -EIO : ret;
 }
 
-static struct vm_operations_struct btrfs_file_vm_ops = {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-       .nopage         = filemap_nopage,
-       .populate       = filemap_populate,
-#else
+static const struct vm_operations_struct btrfs_file_vm_ops = {
        .fault          = filemap_fault,
-#endif
        .page_mkwrite   = btrfs_page_mkwrite,
 };
 
@@ -1076,15 +1195,12 @@ static int btrfs_file_mmap(struct file  *filp, struct vm_area_struct *vma)
        return 0;
 }
 
-struct file_operations btrfs_file_operations = {
+const struct file_operations btrfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .aio_read       = generic_file_aio_read,
        .splice_read    = generic_file_splice_read,
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
-       .sendfile       = generic_file_sendfile,
-#endif
-       .write          = btrfs_file_write,
+       .aio_write      = btrfs_file_aio_write,
        .mmap           = btrfs_file_mmap,
        .open           = generic_file_open,
        .release        = btrfs_release_file,
@@ -1094,4 +1210,3 @@ struct file_operations btrfs_file_operations = {
        .compat_ioctl   = btrfs_ioctl,
 #endif
 };
-