2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
43 #include "transaction.h"
44 #include "btrfs_inode.h"
46 #include "print-tree.h"
48 #include "ordered-data.h"
51 #include "ref-cache.h"
52 #include "compression.h"
54 struct btrfs_iget_args {
56 struct btrfs_root *root;
59 static struct inode_operations btrfs_dir_inode_operations;
60 static struct inode_operations btrfs_symlink_inode_operations;
61 static struct inode_operations btrfs_dir_ro_inode_operations;
62 static struct inode_operations btrfs_special_inode_operations;
63 static struct inode_operations btrfs_file_inode_operations;
64 static struct address_space_operations btrfs_aops;
65 static struct address_space_operations btrfs_symlink_aops;
66 static struct file_operations btrfs_dir_file_operations;
67 static struct extent_io_ops btrfs_extent_io_ops;
69 static struct kmem_cache *btrfs_inode_cachep;
70 struct kmem_cache *btrfs_trans_handle_cachep;
71 struct kmem_cache *btrfs_transaction_cachep;
72 struct kmem_cache *btrfs_bit_radix_cachep;
73 struct kmem_cache *btrfs_path_cachep;
76 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
77 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
78 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
79 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
80 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
81 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
82 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
83 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
86 static void btrfs_truncate(struct inode *inode);
87 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
88 static noinline int cow_file_range(struct inode *inode,
89 struct page *locked_page,
90 u64 start, u64 end, int *page_started,
91 unsigned long *nr_written, int unlock);
93 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
97 err = btrfs_init_acl(inode, dir);
99 err = btrfs_xattr_security_init(inode, dir);
104 * a very lame attempt at stopping writes when the FS is 85% full. There
105 * are countless ways this is incorrect, but it is better than nothing.
107 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
115 spin_lock(&root->fs_info->delalloc_lock);
116 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
117 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
125 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
127 spin_unlock(&root->fs_info->delalloc_lock);
132 * this does all the hard work for inserting an inline extent into
133 * the btree. The caller should have done a btrfs_drop_extents so that
134 * no overlapping inline items exist in the btree
136 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
137 struct btrfs_root *root, struct inode *inode,
138 u64 start, size_t size, size_t compressed_size,
139 struct page **compressed_pages)
141 struct btrfs_key key;
142 struct btrfs_path *path;
143 struct extent_buffer *leaf;
144 struct page *page = NULL;
147 struct btrfs_file_extent_item *ei;
150 size_t cur_size = size;
152 unsigned long offset;
153 int use_compress = 0;
155 if (compressed_size && compressed_pages) {
157 cur_size = compressed_size;
160 path = btrfs_alloc_path();
164 btrfs_set_trans_block_group(trans, inode);
166 key.objectid = inode->i_ino;
168 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
169 datasize = btrfs_file_extent_calc_inline_size(cur_size);
171 inode_add_bytes(inode, size);
172 ret = btrfs_insert_empty_item(trans, root, path, &key,
179 leaf = path->nodes[0];
180 ei = btrfs_item_ptr(leaf, path->slots[0],
181 struct btrfs_file_extent_item);
182 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
183 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
184 btrfs_set_file_extent_encryption(leaf, ei, 0);
185 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
186 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
187 ptr = btrfs_file_extent_inline_start(ei);
192 while (compressed_size > 0) {
193 cpage = compressed_pages[i];
194 cur_size = min_t(unsigned long, compressed_size,
198 write_extent_buffer(leaf, kaddr, ptr, cur_size);
203 compressed_size -= cur_size;
205 btrfs_set_file_extent_compression(leaf, ei,
206 BTRFS_COMPRESS_ZLIB);
208 page = find_get_page(inode->i_mapping,
209 start >> PAGE_CACHE_SHIFT);
210 btrfs_set_file_extent_compression(leaf, ei, 0);
211 kaddr = kmap_atomic(page, KM_USER0);
212 offset = start & (PAGE_CACHE_SIZE - 1);
213 write_extent_buffer(leaf, kaddr + offset, ptr, size);
214 kunmap_atomic(kaddr, KM_USER0);
215 page_cache_release(page);
217 btrfs_mark_buffer_dirty(leaf);
218 btrfs_free_path(path);
220 BTRFS_I(inode)->disk_i_size = inode->i_size;
221 btrfs_update_inode(trans, root, inode);
224 btrfs_free_path(path);
230 * conditionally insert an inline extent into the file. This
231 * does the checks required to make sure the data is small enough
232 * to fit as an inline extent.
234 static int cow_file_range_inline(struct btrfs_trans_handle *trans,
235 struct btrfs_root *root,
236 struct inode *inode, u64 start, u64 end,
237 size_t compressed_size,
238 struct page **compressed_pages)
240 u64 isize = i_size_read(inode);
241 u64 actual_end = min(end + 1, isize);
242 u64 inline_len = actual_end - start;
243 u64 aligned_end = (end + root->sectorsize - 1) &
244 ~((u64)root->sectorsize - 1);
246 u64 data_len = inline_len;
250 data_len = compressed_size;
253 actual_end >= PAGE_CACHE_SIZE ||
254 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
256 (actual_end & (root->sectorsize - 1)) == 0) ||
258 data_len > root->fs_info->max_inline) {
262 ret = btrfs_drop_extents(trans, root, inode, start,
263 aligned_end, start, &hint_byte);
266 if (isize > actual_end)
267 inline_len = min_t(u64, isize, actual_end);
268 ret = insert_inline_extent(trans, root, inode, start,
269 inline_len, compressed_size,
272 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
276 struct async_extent {
281 unsigned long nr_pages;
282 struct list_head list;
287 struct btrfs_root *root;
288 struct page *locked_page;
291 struct list_head extents;
292 struct btrfs_work work;
295 static noinline int add_async_extent(struct async_cow *cow,
296 u64 start, u64 ram_size,
299 unsigned long nr_pages)
301 struct async_extent *async_extent;
303 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
304 async_extent->start = start;
305 async_extent->ram_size = ram_size;
306 async_extent->compressed_size = compressed_size;
307 async_extent->pages = pages;
308 async_extent->nr_pages = nr_pages;
309 list_add_tail(&async_extent->list, &cow->extents);
314 * we create compressed extents in two phases. The first
315 * phase compresses a range of pages that have already been
316 * locked (both pages and state bits are locked).
318 * This is done inside an ordered work queue, and the compression
319 * is spread across many cpus. The actual IO submission is step
320 * two, and the ordered work queue takes care of making sure that
321 * happens in the same order things were put onto the queue by
322 * writepages and friends.
324 * If this code finds it can't get good compression, it puts an
325 * entry onto the work queue to write the uncompressed bytes. This
326 * makes sure that both compressed inodes and uncompressed inodes
327 * are written in the same order that pdflush sent them down.
329 static noinline int compress_file_range(struct inode *inode,
330 struct page *locked_page,
332 struct async_cow *async_cow,
335 struct btrfs_root *root = BTRFS_I(inode)->root;
336 struct btrfs_trans_handle *trans;
340 u64 blocksize = root->sectorsize;
342 u64 isize = i_size_read(inode);
344 struct page **pages = NULL;
345 unsigned long nr_pages;
346 unsigned long nr_pages_ret = 0;
347 unsigned long total_compressed = 0;
348 unsigned long total_in = 0;
349 unsigned long max_compressed = 128 * 1024;
350 unsigned long max_uncompressed = 128 * 1024;
356 actual_end = min_t(u64, isize, end + 1);
359 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
360 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
362 total_compressed = actual_end - start;
364 /* we want to make sure that amount of ram required to uncompress
365 * an extent is reasonable, so we limit the total size in ram
366 * of a compressed extent to 128k. This is a crucial number
367 * because it also controls how easily we can spread reads across
368 * cpus for decompression.
370 * We also want to make sure the amount of IO required to do
371 * a random read is reasonably small, so we limit the size of
372 * a compressed extent to 128k.
374 total_compressed = min(total_compressed, max_uncompressed);
375 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
376 num_bytes = max(blocksize, num_bytes);
377 disk_num_bytes = num_bytes;
382 * we do compression for mount -o compress and when the
383 * inode has not been flagged as nocompress. This flag can
384 * change at any time if we discover bad compression ratios.
386 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
387 btrfs_test_opt(root, COMPRESS)) {
389 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
391 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
392 total_compressed, pages,
393 nr_pages, &nr_pages_ret,
399 unsigned long offset = total_compressed &
400 (PAGE_CACHE_SIZE - 1);
401 struct page *page = pages[nr_pages_ret - 1];
404 /* zero the tail end of the last page, we might be
405 * sending it down to disk
408 kaddr = kmap_atomic(page, KM_USER0);
409 memset(kaddr + offset, 0,
410 PAGE_CACHE_SIZE - offset);
411 kunmap_atomic(kaddr, KM_USER0);
417 trans = btrfs_join_transaction(root, 1);
419 btrfs_set_trans_block_group(trans, inode);
421 /* lets try to make an inline extent */
422 if (ret || total_in < (actual_end - start)) {
423 /* we didn't compress the entire range, try
424 * to make an uncompressed inline extent.
426 ret = cow_file_range_inline(trans, root, inode,
427 start, end, 0, NULL);
429 /* try making a compressed inline extent */
430 ret = cow_file_range_inline(trans, root, inode,
432 total_compressed, pages);
434 btrfs_end_transaction(trans, root);
437 * inline extent creation worked, we don't need
438 * to create any more async work items. Unlock
439 * and free up our temp pages.
441 extent_clear_unlock_delalloc(inode,
442 &BTRFS_I(inode)->io_tree,
443 start, end, NULL, 1, 0,
452 * we aren't doing an inline extent round the compressed size
453 * up to a block size boundary so the allocator does sane
456 total_compressed = (total_compressed + blocksize - 1) &
460 * one last check to make sure the compression is really a
461 * win, compare the page count read with the blocks on disk
463 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
464 ~(PAGE_CACHE_SIZE - 1);
465 if (total_compressed >= total_in) {
468 disk_num_bytes = total_compressed;
469 num_bytes = total_in;
472 if (!will_compress && pages) {
474 * the compression code ran but failed to make things smaller,
475 * free any pages it allocated and our page pointer array
477 for (i = 0; i < nr_pages_ret; i++) {
478 WARN_ON(pages[i]->mapping);
479 page_cache_release(pages[i]);
483 total_compressed = 0;
486 /* flag the file so we don't compress in the future */
487 btrfs_set_flag(inode, NOCOMPRESS);
492 /* the async work queues will take care of doing actual
493 * allocation on disk for these compressed pages,
494 * and will submit them to the elevator.
496 add_async_extent(async_cow, start, num_bytes,
497 total_compressed, pages, nr_pages_ret);
499 if (start + num_bytes < end && start + num_bytes < actual_end) {
507 * No compression, but we still need to write the pages in
508 * the file we've been given so far. redirty the locked
509 * page if it corresponds to our extent and set things up
510 * for the async work queue to run cow_file_range to do
511 * the normal delalloc dance
513 if (page_offset(locked_page) >= start &&
514 page_offset(locked_page) <= end) {
515 __set_page_dirty_nobuffers(locked_page);
516 /* unlocked later on in the async handlers */
518 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
526 for (i = 0; i < nr_pages_ret; i++) {
527 WARN_ON(pages[i]->mapping);
528 page_cache_release(pages[i]);
536 * phase two of compressed writeback. This is the ordered portion
537 * of the code, which only gets called in the order the work was
538 * queued. We walk all the async extents created by compress_file_range
539 * and send them down to the disk.
541 static noinline int submit_compressed_extents(struct inode *inode,
542 struct async_cow *async_cow)
544 struct async_extent *async_extent;
546 struct btrfs_trans_handle *trans;
547 struct btrfs_key ins;
548 struct extent_map *em;
549 struct btrfs_root *root = BTRFS_I(inode)->root;
550 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
551 struct extent_io_tree *io_tree;
554 if (list_empty(&async_cow->extents))
557 trans = btrfs_join_transaction(root, 1);
559 while (!list_empty(&async_cow->extents)) {
560 async_extent = list_entry(async_cow->extents.next,
561 struct async_extent, list);
562 list_del(&async_extent->list);
564 io_tree = &BTRFS_I(inode)->io_tree;
566 /* did the compression code fall back to uncompressed IO? */
567 if (!async_extent->pages) {
568 int page_started = 0;
569 unsigned long nr_written = 0;
571 lock_extent(io_tree, async_extent->start,
572 async_extent->start +
573 async_extent->ram_size - 1, GFP_NOFS);
575 /* allocate blocks */
576 cow_file_range(inode, async_cow->locked_page,
578 async_extent->start +
579 async_extent->ram_size - 1,
580 &page_started, &nr_written, 0);
583 * if page_started, cow_file_range inserted an
584 * inline extent and took care of all the unlocking
585 * and IO for us. Otherwise, we need to submit
586 * all those pages down to the drive.
589 extent_write_locked_range(io_tree,
590 inode, async_extent->start,
591 async_extent->start +
592 async_extent->ram_size - 1,
600 lock_extent(io_tree, async_extent->start,
601 async_extent->start + async_extent->ram_size - 1,
604 * here we're doing allocation and writeback of the
607 btrfs_drop_extent_cache(inode, async_extent->start,
608 async_extent->start +
609 async_extent->ram_size - 1, 0);
611 ret = btrfs_reserve_extent(trans, root,
612 async_extent->compressed_size,
613 async_extent->compressed_size,
617 em = alloc_extent_map(GFP_NOFS);
618 em->start = async_extent->start;
619 em->len = async_extent->ram_size;
620 em->orig_start = em->start;
622 em->block_start = ins.objectid;
623 em->block_len = ins.offset;
624 em->bdev = root->fs_info->fs_devices->latest_bdev;
625 set_bit(EXTENT_FLAG_PINNED, &em->flags);
626 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
629 spin_lock(&em_tree->lock);
630 ret = add_extent_mapping(em_tree, em);
631 spin_unlock(&em_tree->lock);
632 if (ret != -EEXIST) {
636 btrfs_drop_extent_cache(inode, async_extent->start,
637 async_extent->start +
638 async_extent->ram_size - 1, 0);
641 ret = btrfs_add_ordered_extent(inode, async_extent->start,
643 async_extent->ram_size,
645 BTRFS_ORDERED_COMPRESSED);
648 btrfs_end_transaction(trans, root);
651 * clear dirty, set writeback and unlock the pages.
653 extent_clear_unlock_delalloc(inode,
654 &BTRFS_I(inode)->io_tree,
656 async_extent->start +
657 async_extent->ram_size - 1,
658 NULL, 1, 1, 0, 1, 1, 0);
660 ret = btrfs_submit_compressed_write(inode,
662 async_extent->ram_size,
664 ins.offset, async_extent->pages,
665 async_extent->nr_pages);
668 trans = btrfs_join_transaction(root, 1);
669 alloc_hint = ins.objectid + ins.offset;
674 btrfs_end_transaction(trans, root);
679 * when extent_io.c finds a delayed allocation range in the file,
680 * the call backs end up in this code. The basic idea is to
681 * allocate extents on disk for the range, and create ordered data structs
682 * in ram to track those extents.
684 * locked_page is the page that writepage had locked already. We use
685 * it to make sure we don't do extra locks or unlocks.
687 * *page_started is set to one if we unlock locked_page and do everything
688 * required to start IO on it. It may be clean and already done with
691 static noinline int cow_file_range(struct inode *inode,
692 struct page *locked_page,
693 u64 start, u64 end, int *page_started,
694 unsigned long *nr_written,
697 struct btrfs_root *root = BTRFS_I(inode)->root;
698 struct btrfs_trans_handle *trans;
701 unsigned long ram_size;
704 u64 blocksize = root->sectorsize;
706 u64 isize = i_size_read(inode);
707 struct btrfs_key ins;
708 struct extent_map *em;
709 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
712 trans = btrfs_join_transaction(root, 1);
714 btrfs_set_trans_block_group(trans, inode);
716 actual_end = min_t(u64, isize, end + 1);
718 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
719 num_bytes = max(blocksize, num_bytes);
720 disk_num_bytes = num_bytes;
724 /* lets try to make an inline extent */
725 ret = cow_file_range_inline(trans, root, inode,
726 start, end, 0, NULL);
728 extent_clear_unlock_delalloc(inode,
729 &BTRFS_I(inode)->io_tree,
730 start, end, NULL, 1, 1,
732 *nr_written = *nr_written +
733 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
740 BUG_ON(disk_num_bytes >
741 btrfs_super_total_bytes(&root->fs_info->super_copy));
743 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
745 while (disk_num_bytes > 0) {
746 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
747 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
748 root->sectorsize, 0, alloc_hint,
752 em = alloc_extent_map(GFP_NOFS);
754 em->orig_start = em->start;
756 ram_size = ins.offset;
757 em->len = ins.offset;
759 em->block_start = ins.objectid;
760 em->block_len = ins.offset;
761 em->bdev = root->fs_info->fs_devices->latest_bdev;
762 set_bit(EXTENT_FLAG_PINNED, &em->flags);
765 spin_lock(&em_tree->lock);
766 ret = add_extent_mapping(em_tree, em);
767 spin_unlock(&em_tree->lock);
768 if (ret != -EEXIST) {
772 btrfs_drop_extent_cache(inode, start,
773 start + ram_size - 1, 0);
776 cur_alloc_size = ins.offset;
777 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
778 ram_size, cur_alloc_size, 0);
781 if (root->root_key.objectid ==
782 BTRFS_DATA_RELOC_TREE_OBJECTID) {
783 ret = btrfs_reloc_clone_csums(inode, start,
788 if (disk_num_bytes < cur_alloc_size)
791 /* we're not doing compressed IO, don't unlock the first
792 * page (which the caller expects to stay locked), don't
793 * clear any dirty bits and don't set any writeback bits
795 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
796 start, start + ram_size - 1,
797 locked_page, unlock, 1,
799 disk_num_bytes -= cur_alloc_size;
800 num_bytes -= cur_alloc_size;
801 alloc_hint = ins.objectid + ins.offset;
802 start += cur_alloc_size;
806 btrfs_end_transaction(trans, root);
812 * work queue call back to started compression on a file and pages
814 static noinline void async_cow_start(struct btrfs_work *work)
816 struct async_cow *async_cow;
818 async_cow = container_of(work, struct async_cow, work);
820 compress_file_range(async_cow->inode, async_cow->locked_page,
821 async_cow->start, async_cow->end, async_cow,
824 async_cow->inode = NULL;
828 * work queue call back to submit previously compressed pages
830 static noinline void async_cow_submit(struct btrfs_work *work)
832 struct async_cow *async_cow;
833 struct btrfs_root *root;
834 unsigned long nr_pages;
836 async_cow = container_of(work, struct async_cow, work);
838 root = async_cow->root;
839 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
842 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
844 if (atomic_read(&root->fs_info->async_delalloc_pages) <
846 waitqueue_active(&root->fs_info->async_submit_wait))
847 wake_up(&root->fs_info->async_submit_wait);
849 if (async_cow->inode)
850 submit_compressed_extents(async_cow->inode, async_cow);
853 static noinline void async_cow_free(struct btrfs_work *work)
855 struct async_cow *async_cow;
856 async_cow = container_of(work, struct async_cow, work);
860 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
861 u64 start, u64 end, int *page_started,
862 unsigned long *nr_written)
864 struct async_cow *async_cow;
865 struct btrfs_root *root = BTRFS_I(inode)->root;
866 unsigned long nr_pages;
868 int limit = 10 * 1024 * 1042;
870 if (!btrfs_test_opt(root, COMPRESS)) {
871 return cow_file_range(inode, locked_page, start, end,
872 page_started, nr_written, 1);
875 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
876 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
877 while (start < end) {
878 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
879 async_cow->inode = inode;
880 async_cow->root = root;
881 async_cow->locked_page = locked_page;
882 async_cow->start = start;
884 if (btrfs_test_flag(inode, NOCOMPRESS))
887 cur_end = min(end, start + 512 * 1024 - 1);
889 async_cow->end = cur_end;
890 INIT_LIST_HEAD(&async_cow->extents);
892 async_cow->work.func = async_cow_start;
893 async_cow->work.ordered_func = async_cow_submit;
894 async_cow->work.ordered_free = async_cow_free;
895 async_cow->work.flags = 0;
897 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
899 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
901 btrfs_queue_worker(&root->fs_info->delalloc_workers,
904 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
905 wait_event(root->fs_info->async_submit_wait,
906 (atomic_read(&root->fs_info->async_delalloc_pages) <
910 while (atomic_read(&root->fs_info->async_submit_draining) &&
911 atomic_read(&root->fs_info->async_delalloc_pages)) {
912 wait_event(root->fs_info->async_submit_wait,
913 (atomic_read(&root->fs_info->async_delalloc_pages) ==
917 *nr_written += nr_pages;
924 static noinline int csum_exist_in_range(struct btrfs_root *root,
925 u64 bytenr, u64 num_bytes)
928 struct btrfs_ordered_sum *sums;
931 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
932 bytenr + num_bytes - 1, &list);
933 if (ret == 0 && list_empty(&list))
936 while (!list_empty(&list)) {
937 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
938 list_del(&sums->list);
945 * when nowcow writeback call back. This checks for snapshots or COW copies
946 * of the extents that exist in the file, and COWs the file as required.
948 * If no cow copies or snapshots exist, we write directly to the existing
951 static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
952 u64 start, u64 end, int *page_started, int force,
953 unsigned long *nr_written)
955 struct btrfs_root *root = BTRFS_I(inode)->root;
956 struct btrfs_trans_handle *trans;
957 struct extent_buffer *leaf;
958 struct btrfs_path *path;
959 struct btrfs_file_extent_item *fi;
960 struct btrfs_key found_key;
972 path = btrfs_alloc_path();
974 trans = btrfs_join_transaction(root, 1);
980 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
983 if (ret > 0 && path->slots[0] > 0 && check_prev) {
984 leaf = path->nodes[0];
985 btrfs_item_key_to_cpu(leaf, &found_key,
987 if (found_key.objectid == inode->i_ino &&
988 found_key.type == BTRFS_EXTENT_DATA_KEY)
993 leaf = path->nodes[0];
994 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
995 ret = btrfs_next_leaf(root, path);
1000 leaf = path->nodes[0];
1006 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1008 if (found_key.objectid > inode->i_ino ||
1009 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1010 found_key.offset > end)
1013 if (found_key.offset > cur_offset) {
1014 extent_end = found_key.offset;
1018 fi = btrfs_item_ptr(leaf, path->slots[0],
1019 struct btrfs_file_extent_item);
1020 extent_type = btrfs_file_extent_type(leaf, fi);
1022 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1023 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1024 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1025 extent_end = found_key.offset +
1026 btrfs_file_extent_num_bytes(leaf, fi);
1027 if (extent_end <= start) {
1031 if (disk_bytenr == 0)
1033 if (btrfs_file_extent_compression(leaf, fi) ||
1034 btrfs_file_extent_encryption(leaf, fi) ||
1035 btrfs_file_extent_other_encoding(leaf, fi))
1037 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1039 if (btrfs_extent_readonly(root, disk_bytenr))
1041 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1044 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1045 disk_bytenr += cur_offset - found_key.offset;
1046 num_bytes = min(end + 1, extent_end) - cur_offset;
1048 * force cow if csum exists in the range.
1049 * this ensure that csum for a given extent are
1050 * either valid or do not exist.
1052 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1055 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1056 extent_end = found_key.offset +
1057 btrfs_file_extent_inline_len(leaf, fi);
1058 extent_end = ALIGN(extent_end, root->sectorsize);
1063 if (extent_end <= start) {
1068 if (cow_start == (u64)-1)
1069 cow_start = cur_offset;
1070 cur_offset = extent_end;
1071 if (cur_offset > end)
1077 btrfs_release_path(root, path);
1078 if (cow_start != (u64)-1) {
1079 ret = cow_file_range(inode, locked_page, cow_start,
1080 found_key.offset - 1, page_started,
1083 cow_start = (u64)-1;
1086 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1087 struct extent_map *em;
1088 struct extent_map_tree *em_tree;
1089 em_tree = &BTRFS_I(inode)->extent_tree;
1090 em = alloc_extent_map(GFP_NOFS);
1091 em->start = cur_offset;
1092 em->orig_start = em->start;
1093 em->len = num_bytes;
1094 em->block_len = num_bytes;
1095 em->block_start = disk_bytenr;
1096 em->bdev = root->fs_info->fs_devices->latest_bdev;
1097 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1099 spin_lock(&em_tree->lock);
1100 ret = add_extent_mapping(em_tree, em);
1101 spin_unlock(&em_tree->lock);
1102 if (ret != -EEXIST) {
1103 free_extent_map(em);
1106 btrfs_drop_extent_cache(inode, em->start,
1107 em->start + em->len - 1, 0);
1109 type = BTRFS_ORDERED_PREALLOC;
1111 type = BTRFS_ORDERED_NOCOW;
1114 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1115 num_bytes, num_bytes, type);
1118 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1119 cur_offset, cur_offset + num_bytes - 1,
1120 locked_page, 1, 1, 1, 0, 0, 0);
1121 cur_offset = extent_end;
1122 if (cur_offset > end)
1125 btrfs_release_path(root, path);
1127 if (cur_offset <= end && cow_start == (u64)-1)
1128 cow_start = cur_offset;
1129 if (cow_start != (u64)-1) {
1130 ret = cow_file_range(inode, locked_page, cow_start, end,
1131 page_started, nr_written, 1);
1135 ret = btrfs_end_transaction(trans, root);
1137 btrfs_free_path(path);
1142 * extent_io.c call back to do delayed allocation processing
1144 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1145 u64 start, u64 end, int *page_started,
1146 unsigned long *nr_written)
1150 if (btrfs_test_flag(inode, NODATACOW))
1151 ret = run_delalloc_nocow(inode, locked_page, start, end,
1152 page_started, 1, nr_written);
1153 else if (btrfs_test_flag(inode, PREALLOC))
1154 ret = run_delalloc_nocow(inode, locked_page, start, end,
1155 page_started, 0, nr_written);
1157 ret = cow_file_range_async(inode, locked_page, start, end,
1158 page_started, nr_written);
1164 * extent_io.c set_bit_hook, used to track delayed allocation
1165 * bytes in this file, and to maintain the list of inodes that
1166 * have pending delalloc work to be done.
1168 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1169 unsigned long old, unsigned long bits)
1172 * set_bit and clear bit hooks normally require _irqsave/restore
1173 * but in this case, we are only testeing for the DELALLOC
1174 * bit, which is only set or cleared with irqs on
1176 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1177 struct btrfs_root *root = BTRFS_I(inode)->root;
1178 spin_lock(&root->fs_info->delalloc_lock);
1179 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1180 root->fs_info->delalloc_bytes += end - start + 1;
1181 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1182 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1183 &root->fs_info->delalloc_inodes);
1185 spin_unlock(&root->fs_info->delalloc_lock);
1191 * extent_io.c clear_bit_hook, see set_bit_hook for why
1193 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1194 unsigned long old, unsigned long bits)
1197 * set_bit and clear bit hooks normally require _irqsave/restore
1198 * but in this case, we are only testeing for the DELALLOC
1199 * bit, which is only set or cleared with irqs on
1201 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1202 struct btrfs_root *root = BTRFS_I(inode)->root;
1204 spin_lock(&root->fs_info->delalloc_lock);
1205 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1206 printk(KERN_INFO "btrfs warning: delalloc account "
1208 (unsigned long long)end - start + 1,
1209 (unsigned long long)
1210 root->fs_info->delalloc_bytes);
1211 root->fs_info->delalloc_bytes = 0;
1212 BTRFS_I(inode)->delalloc_bytes = 0;
1214 root->fs_info->delalloc_bytes -= end - start + 1;
1215 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1217 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1218 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1219 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1221 spin_unlock(&root->fs_info->delalloc_lock);
1227 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1228 * we don't create bios that span stripes or chunks
1230 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1231 size_t size, struct bio *bio,
1232 unsigned long bio_flags)
1234 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1235 struct btrfs_mapping_tree *map_tree;
1236 u64 logical = (u64)bio->bi_sector << 9;
1241 if (bio_flags & EXTENT_BIO_COMPRESSED)
1244 length = bio->bi_size;
1245 map_tree = &root->fs_info->mapping_tree;
1246 map_length = length;
1247 ret = btrfs_map_block(map_tree, READ, logical,
1248 &map_length, NULL, 0);
1250 if (map_length < length + size)
1256 * in order to insert checksums into the metadata in large chunks,
1257 * we wait until bio submission time. All the pages in the bio are
1258 * checksummed and sums are attached onto the ordered extent record.
1260 * At IO completion time the cums attached on the ordered extent record
1261 * are inserted into the btree
1263 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1264 struct bio *bio, int mirror_num,
1265 unsigned long bio_flags)
1267 struct btrfs_root *root = BTRFS_I(inode)->root;
1270 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1276 * in order to insert checksums into the metadata in large chunks,
1277 * we wait until bio submission time. All the pages in the bio are
1278 * checksummed and sums are attached onto the ordered extent record.
1280 * At IO completion time the cums attached on the ordered extent record
1281 * are inserted into the btree
1283 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1284 int mirror_num, unsigned long bio_flags)
1286 struct btrfs_root *root = BTRFS_I(inode)->root;
1287 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1291 * extent_io.c submission hook. This does the right thing for csum calculation
1292 * on write, or reading the csums from the tree before a read
1294 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1295 int mirror_num, unsigned long bio_flags)
1297 struct btrfs_root *root = BTRFS_I(inode)->root;
1301 skip_sum = btrfs_test_flag(inode, NODATASUM);
1303 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1306 if (!(rw & (1 << BIO_RW))) {
1307 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1308 return btrfs_submit_compressed_read(inode, bio,
1309 mirror_num, bio_flags);
1310 } else if (!skip_sum)
1311 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1313 } else if (!skip_sum) {
1314 /* csum items have already been cloned */
1315 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1317 /* we're doing a write, do the async checksumming */
1318 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1319 inode, rw, bio, mirror_num,
1320 bio_flags, __btrfs_submit_bio_start,
1321 __btrfs_submit_bio_done);
1325 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1329 * given a list of ordered sums record them in the inode. This happens
1330 * at IO completion time based on sums calculated at bio submission time.
1332 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1333 struct inode *inode, u64 file_offset,
1334 struct list_head *list)
1336 struct btrfs_ordered_sum *sum;
1338 btrfs_set_trans_block_group(trans, inode);
1340 list_for_each_entry(sum, list, list) {
1341 btrfs_csum_file_blocks(trans,
1342 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1347 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1349 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1351 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1355 /* see btrfs_writepage_start_hook for details on why this is required */
1356 struct btrfs_writepage_fixup {
1358 struct btrfs_work work;
1361 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1363 struct btrfs_writepage_fixup *fixup;
1364 struct btrfs_ordered_extent *ordered;
1366 struct inode *inode;
1370 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1374 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1375 ClearPageChecked(page);
1379 inode = page->mapping->host;
1380 page_start = page_offset(page);
1381 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1383 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1385 /* already ordered? We're done */
1386 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1387 EXTENT_ORDERED, 0)) {
1391 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1393 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1394 page_end, GFP_NOFS);
1396 btrfs_start_ordered_extent(inode, ordered, 1);
1400 btrfs_set_extent_delalloc(inode, page_start, page_end);
1401 ClearPageChecked(page);
1403 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1406 page_cache_release(page);
1410 * There are a few paths in the higher layers of the kernel that directly
1411 * set the page dirty bit without asking the filesystem if it is a
1412 * good idea. This causes problems because we want to make sure COW
1413 * properly happens and the data=ordered rules are followed.
1415 * In our case any range that doesn't have the ORDERED bit set
1416 * hasn't been properly setup for IO. We kick off an async process
1417 * to fix it up. The async helper will wait for ordered extents, set
1418 * the delalloc bit and make it safe to write the page.
1420 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1422 struct inode *inode = page->mapping->host;
1423 struct btrfs_writepage_fixup *fixup;
1424 struct btrfs_root *root = BTRFS_I(inode)->root;
1427 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1432 if (PageChecked(page))
1435 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1439 SetPageChecked(page);
1440 page_cache_get(page);
1441 fixup->work.func = btrfs_writepage_fixup_worker;
1443 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1447 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1448 struct inode *inode, u64 file_pos,
1449 u64 disk_bytenr, u64 disk_num_bytes,
1450 u64 num_bytes, u64 ram_bytes,
1451 u8 compression, u8 encryption,
1452 u16 other_encoding, int extent_type)
1454 struct btrfs_root *root = BTRFS_I(inode)->root;
1455 struct btrfs_file_extent_item *fi;
1456 struct btrfs_path *path;
1457 struct extent_buffer *leaf;
1458 struct btrfs_key ins;
1462 path = btrfs_alloc_path();
1465 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1466 file_pos + num_bytes, file_pos, &hint);
1469 ins.objectid = inode->i_ino;
1470 ins.offset = file_pos;
1471 ins.type = BTRFS_EXTENT_DATA_KEY;
1472 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1474 leaf = path->nodes[0];
1475 fi = btrfs_item_ptr(leaf, path->slots[0],
1476 struct btrfs_file_extent_item);
1477 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1478 btrfs_set_file_extent_type(leaf, fi, extent_type);
1479 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1480 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1481 btrfs_set_file_extent_offset(leaf, fi, 0);
1482 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1483 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1484 btrfs_set_file_extent_compression(leaf, fi, compression);
1485 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1486 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1487 btrfs_mark_buffer_dirty(leaf);
1489 inode_add_bytes(inode, num_bytes);
1490 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1492 ins.objectid = disk_bytenr;
1493 ins.offset = disk_num_bytes;
1494 ins.type = BTRFS_EXTENT_ITEM_KEY;
1495 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1496 root->root_key.objectid,
1497 trans->transid, inode->i_ino, &ins);
1500 btrfs_free_path(path);
1504 /* as ordered data IO finishes, this gets called so we can finish
1505 * an ordered extent if the range of bytes in the file it covers are
1508 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1510 struct btrfs_root *root = BTRFS_I(inode)->root;
1511 struct btrfs_trans_handle *trans;
1512 struct btrfs_ordered_extent *ordered_extent;
1513 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1517 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1521 trans = btrfs_join_transaction(root, 1);
1523 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1524 BUG_ON(!ordered_extent);
1525 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1528 lock_extent(io_tree, ordered_extent->file_offset,
1529 ordered_extent->file_offset + ordered_extent->len - 1,
1532 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1534 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1536 ret = btrfs_mark_extent_written(trans, root, inode,
1537 ordered_extent->file_offset,
1538 ordered_extent->file_offset +
1539 ordered_extent->len);
1542 ret = insert_reserved_file_extent(trans, inode,
1543 ordered_extent->file_offset,
1544 ordered_extent->start,
1545 ordered_extent->disk_len,
1546 ordered_extent->len,
1547 ordered_extent->len,
1549 BTRFS_FILE_EXTENT_REG);
1552 unlock_extent(io_tree, ordered_extent->file_offset,
1553 ordered_extent->file_offset + ordered_extent->len - 1,
1556 add_pending_csums(trans, inode, ordered_extent->file_offset,
1557 &ordered_extent->list);
1559 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1560 btrfs_ordered_update_i_size(inode, ordered_extent);
1561 btrfs_update_inode(trans, root, inode);
1562 btrfs_remove_ordered_extent(inode, ordered_extent);
1563 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1566 btrfs_put_ordered_extent(ordered_extent);
1567 /* once for the tree */
1568 btrfs_put_ordered_extent(ordered_extent);
1570 btrfs_end_transaction(trans, root);
1574 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1575 struct extent_state *state, int uptodate)
1577 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1581 * When IO fails, either with EIO or csum verification fails, we
1582 * try other mirrors that might have a good copy of the data. This
1583 * io_failure_record is used to record state as we go through all the
1584 * mirrors. If another mirror has good data, the page is set up to date
1585 * and things continue. If a good mirror can't be found, the original
1586 * bio end_io callback is called to indicate things have failed.
1588 struct io_failure_record {
1593 unsigned long bio_flags;
1597 static int btrfs_io_failed_hook(struct bio *failed_bio,
1598 struct page *page, u64 start, u64 end,
1599 struct extent_state *state)
1601 struct io_failure_record *failrec = NULL;
1603 struct extent_map *em;
1604 struct inode *inode = page->mapping->host;
1605 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1606 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1613 ret = get_state_private(failure_tree, start, &private);
1615 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1618 failrec->start = start;
1619 failrec->len = end - start + 1;
1620 failrec->last_mirror = 0;
1621 failrec->bio_flags = 0;
1623 spin_lock(&em_tree->lock);
1624 em = lookup_extent_mapping(em_tree, start, failrec->len);
1625 if (em->start > start || em->start + em->len < start) {
1626 free_extent_map(em);
1629 spin_unlock(&em_tree->lock);
1631 if (!em || IS_ERR(em)) {
1635 logical = start - em->start;
1636 logical = em->block_start + logical;
1637 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1638 logical = em->block_start;
1639 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1641 failrec->logical = logical;
1642 free_extent_map(em);
1643 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1644 EXTENT_DIRTY, GFP_NOFS);
1645 set_state_private(failure_tree, start,
1646 (u64)(unsigned long)failrec);
1648 failrec = (struct io_failure_record *)(unsigned long)private;
1650 num_copies = btrfs_num_copies(
1651 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1652 failrec->logical, failrec->len);
1653 failrec->last_mirror++;
1655 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1656 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1659 if (state && state->start != failrec->start)
1661 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1663 if (!state || failrec->last_mirror > num_copies) {
1664 set_state_private(failure_tree, failrec->start, 0);
1665 clear_extent_bits(failure_tree, failrec->start,
1666 failrec->start + failrec->len - 1,
1667 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1671 bio = bio_alloc(GFP_NOFS, 1);
1672 bio->bi_private = state;
1673 bio->bi_end_io = failed_bio->bi_end_io;
1674 bio->bi_sector = failrec->logical >> 9;
1675 bio->bi_bdev = failed_bio->bi_bdev;
1678 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1679 if (failed_bio->bi_rw & (1 << BIO_RW))
1684 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1685 failrec->last_mirror,
1686 failrec->bio_flags);
1691 * each time an IO finishes, we do a fast check in the IO failure tree
1692 * to see if we need to process or clean up an io_failure_record
1694 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1697 u64 private_failure;
1698 struct io_failure_record *failure;
1702 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1703 (u64)-1, 1, EXTENT_DIRTY)) {
1704 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1705 start, &private_failure);
1707 failure = (struct io_failure_record *)(unsigned long)
1709 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1711 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1713 failure->start + failure->len - 1,
1714 EXTENT_DIRTY | EXTENT_LOCKED,
1723 * when reads are done, we need to check csums to verify the data is correct
1724 * if there's a match, we allow the bio to finish. If not, we go through
1725 * the io_failure_record routines to find good copies
1727 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1728 struct extent_state *state)
1730 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1731 struct inode *inode = page->mapping->host;
1732 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1734 u64 private = ~(u32)0;
1736 struct btrfs_root *root = BTRFS_I(inode)->root;
1739 if (PageChecked(page)) {
1740 ClearPageChecked(page);
1743 if (btrfs_test_flag(inode, NODATASUM))
1746 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1747 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1748 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1753 if (state && state->start == start) {
1754 private = state->private;
1757 ret = get_state_private(io_tree, start, &private);
1759 kaddr = kmap_atomic(page, KM_USER0);
1763 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1764 btrfs_csum_final(csum, (char *)&csum);
1765 if (csum != private)
1768 kunmap_atomic(kaddr, KM_USER0);
1770 /* if the io failure tree for this inode is non-empty,
1771 * check to see if we've recovered from a failed IO
1773 btrfs_clean_io_failures(inode, start);
1777 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1778 "private %llu\n", page->mapping->host->i_ino,
1779 (unsigned long long)start, csum,
1780 (unsigned long long)private);
1781 memset(kaddr + offset, 1, end - start + 1);
1782 flush_dcache_page(page);
1783 kunmap_atomic(kaddr, KM_USER0);
1790 * This creates an orphan entry for the given inode in case something goes
1791 * wrong in the middle of an unlink/truncate.
1793 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1795 struct btrfs_root *root = BTRFS_I(inode)->root;
1798 spin_lock(&root->list_lock);
1800 /* already on the orphan list, we're good */
1801 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1802 spin_unlock(&root->list_lock);
1806 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1808 spin_unlock(&root->list_lock);
1811 * insert an orphan item to track this unlinked/truncated file
1813 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1819 * We have done the truncate/delete so we can go ahead and remove the orphan
1820 * item for this particular inode.
1822 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1824 struct btrfs_root *root = BTRFS_I(inode)->root;
1827 spin_lock(&root->list_lock);
1829 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1830 spin_unlock(&root->list_lock);
1834 list_del_init(&BTRFS_I(inode)->i_orphan);
1836 spin_unlock(&root->list_lock);
1840 spin_unlock(&root->list_lock);
1842 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1848 * this cleans up any orphans that may be left on the list from the last use
1851 void btrfs_orphan_cleanup(struct btrfs_root *root)
1853 struct btrfs_path *path;
1854 struct extent_buffer *leaf;
1855 struct btrfs_item *item;
1856 struct btrfs_key key, found_key;
1857 struct btrfs_trans_handle *trans;
1858 struct inode *inode;
1859 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1861 path = btrfs_alloc_path();
1866 key.objectid = BTRFS_ORPHAN_OBJECTID;
1867 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1868 key.offset = (u64)-1;
1872 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1874 printk(KERN_ERR "Error searching slot for orphan: %d"
1880 * if ret == 0 means we found what we were searching for, which
1881 * is weird, but possible, so only screw with path if we didnt
1882 * find the key and see if we have stuff that matches
1885 if (path->slots[0] == 0)
1890 /* pull out the item */
1891 leaf = path->nodes[0];
1892 item = btrfs_item_nr(leaf, path->slots[0]);
1893 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1895 /* make sure the item matches what we want */
1896 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1898 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1901 /* release the path since we're done with it */
1902 btrfs_release_path(root, path);
1905 * this is where we are basically btrfs_lookup, without the
1906 * crossing root thing. we store the inode number in the
1907 * offset of the orphan item.
1909 inode = btrfs_iget_locked(root->fs_info->sb,
1910 found_key.offset, root);
1914 if (inode->i_state & I_NEW) {
1915 BTRFS_I(inode)->root = root;
1917 /* have to set the location manually */
1918 BTRFS_I(inode)->location.objectid = inode->i_ino;
1919 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1920 BTRFS_I(inode)->location.offset = 0;
1922 btrfs_read_locked_inode(inode);
1923 unlock_new_inode(inode);
1927 * add this inode to the orphan list so btrfs_orphan_del does
1928 * the proper thing when we hit it
1930 spin_lock(&root->list_lock);
1931 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1932 spin_unlock(&root->list_lock);
1935 * if this is a bad inode, means we actually succeeded in
1936 * removing the inode, but not the orphan record, which means
1937 * we need to manually delete the orphan since iput will just
1938 * do a destroy_inode
1940 if (is_bad_inode(inode)) {
1941 trans = btrfs_start_transaction(root, 1);
1942 btrfs_orphan_del(trans, inode);
1943 btrfs_end_transaction(trans, root);
1948 /* if we have links, this was a truncate, lets do that */
1949 if (inode->i_nlink) {
1951 btrfs_truncate(inode);
1956 /* this will do delete_inode and everything for us */
1961 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1963 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1965 btrfs_free_path(path);
1969 * read an inode from the btree into the in-memory inode
1971 void btrfs_read_locked_inode(struct inode *inode)
1973 struct btrfs_path *path;
1974 struct extent_buffer *leaf;
1975 struct btrfs_inode_item *inode_item;
1976 struct btrfs_timespec *tspec;
1977 struct btrfs_root *root = BTRFS_I(inode)->root;
1978 struct btrfs_key location;
1979 u64 alloc_group_block;
1983 path = btrfs_alloc_path();
1985 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1987 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1991 leaf = path->nodes[0];
1992 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1993 struct btrfs_inode_item);
1995 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
1996 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
1997 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
1998 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
1999 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2001 tspec = btrfs_inode_atime(inode_item);
2002 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2003 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2005 tspec = btrfs_inode_mtime(inode_item);
2006 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2007 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2009 tspec = btrfs_inode_ctime(inode_item);
2010 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2011 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2013 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2014 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2015 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2016 inode->i_generation = BTRFS_I(inode)->generation;
2018 rdev = btrfs_inode_rdev(leaf, inode_item);
2020 BTRFS_I(inode)->index_cnt = (u64)-1;
2021 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2023 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2024 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2025 alloc_group_block, 0);
2026 btrfs_free_path(path);
2029 switch (inode->i_mode & S_IFMT) {
2031 inode->i_mapping->a_ops = &btrfs_aops;
2032 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2033 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2034 inode->i_fop = &btrfs_file_operations;
2035 inode->i_op = &btrfs_file_inode_operations;
2038 inode->i_fop = &btrfs_dir_file_operations;
2039 if (root == root->fs_info->tree_root)
2040 inode->i_op = &btrfs_dir_ro_inode_operations;
2042 inode->i_op = &btrfs_dir_inode_operations;
2045 inode->i_op = &btrfs_symlink_inode_operations;
2046 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2047 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2050 inode->i_op = &btrfs_special_inode_operations;
2051 init_special_inode(inode, inode->i_mode, rdev);
2057 btrfs_free_path(path);
2058 make_bad_inode(inode);
2062 * given a leaf and an inode, copy the inode fields into the leaf
2064 static void fill_inode_item(struct btrfs_trans_handle *trans,
2065 struct extent_buffer *leaf,
2066 struct btrfs_inode_item *item,
2067 struct inode *inode)
2069 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2070 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2071 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2072 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2073 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2075 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2076 inode->i_atime.tv_sec);
2077 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2078 inode->i_atime.tv_nsec);
2080 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2081 inode->i_mtime.tv_sec);
2082 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2083 inode->i_mtime.tv_nsec);
2085 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2086 inode->i_ctime.tv_sec);
2087 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2088 inode->i_ctime.tv_nsec);
2090 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2091 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2092 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2093 btrfs_set_inode_transid(leaf, item, trans->transid);
2094 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2095 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2096 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2100 * copy everything in the in-memory inode into the btree.
2102 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2103 struct btrfs_root *root, struct inode *inode)
2105 struct btrfs_inode_item *inode_item;
2106 struct btrfs_path *path;
2107 struct extent_buffer *leaf;
2110 path = btrfs_alloc_path();
2112 ret = btrfs_lookup_inode(trans, root, path,
2113 &BTRFS_I(inode)->location, 1);
2120 leaf = path->nodes[0];
2121 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2122 struct btrfs_inode_item);
2124 fill_inode_item(trans, leaf, inode_item, inode);
2125 btrfs_mark_buffer_dirty(leaf);
2126 btrfs_set_inode_last_trans(trans, inode);
2129 btrfs_free_path(path);
2135 * unlink helper that gets used here in inode.c and in the tree logging
2136 * recovery code. It remove a link in a directory with a given name, and
2137 * also drops the back refs in the inode to the directory
2139 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2140 struct btrfs_root *root,
2141 struct inode *dir, struct inode *inode,
2142 const char *name, int name_len)
2144 struct btrfs_path *path;
2146 struct extent_buffer *leaf;
2147 struct btrfs_dir_item *di;
2148 struct btrfs_key key;
2151 path = btrfs_alloc_path();
2157 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2158 name, name_len, -1);
2167 leaf = path->nodes[0];
2168 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2169 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2172 btrfs_release_path(root, path);
2174 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2176 dir->i_ino, &index);
2178 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2179 "inode %lu parent %lu\n", name_len, name,
2180 inode->i_ino, dir->i_ino);
2184 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2185 index, name, name_len, -1);
2194 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2195 btrfs_release_path(root, path);
2197 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2199 BUG_ON(ret != 0 && ret != -ENOENT);
2201 BTRFS_I(dir)->log_dirty_trans = trans->transid;
2203 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2207 btrfs_free_path(path);
2211 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2212 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2213 btrfs_update_inode(trans, root, dir);
2214 btrfs_drop_nlink(inode);
2215 ret = btrfs_update_inode(trans, root, inode);
2216 dir->i_sb->s_dirt = 1;
2221 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2223 struct btrfs_root *root;
2224 struct btrfs_trans_handle *trans;
2225 struct inode *inode = dentry->d_inode;
2227 unsigned long nr = 0;
2229 root = BTRFS_I(dir)->root;
2231 ret = btrfs_check_free_space(root, 1, 1);
2235 trans = btrfs_start_transaction(root, 1);
2237 btrfs_set_trans_block_group(trans, dir);
2238 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2239 dentry->d_name.name, dentry->d_name.len);
2241 if (inode->i_nlink == 0)
2242 ret = btrfs_orphan_add(trans, inode);
2244 nr = trans->blocks_used;
2246 btrfs_end_transaction_throttle(trans, root);
2248 btrfs_btree_balance_dirty(root, nr);
2252 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2254 struct inode *inode = dentry->d_inode;
2257 struct btrfs_root *root = BTRFS_I(dir)->root;
2258 struct btrfs_trans_handle *trans;
2259 unsigned long nr = 0;
2262 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2263 * the root of a subvolume or snapshot
2265 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2266 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2270 ret = btrfs_check_free_space(root, 1, 1);
2274 trans = btrfs_start_transaction(root, 1);
2275 btrfs_set_trans_block_group(trans, dir);
2277 err = btrfs_orphan_add(trans, inode);
2281 /* now the directory is empty */
2282 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2283 dentry->d_name.name, dentry->d_name.len);
2285 btrfs_i_size_write(inode, 0);
2288 nr = trans->blocks_used;
2289 ret = btrfs_end_transaction_throttle(trans, root);
2291 btrfs_btree_balance_dirty(root, nr);
2300 * when truncating bytes in a file, it is possible to avoid reading
2301 * the leaves that contain only checksum items. This can be the
2302 * majority of the IO required to delete a large file, but it must
2303 * be done carefully.
2305 * The keys in the level just above the leaves are checked to make sure
2306 * the lowest key in a given leaf is a csum key, and starts at an offset
2307 * after the new size.
2309 * Then the key for the next leaf is checked to make sure it also has
2310 * a checksum item for the same file. If it does, we know our target leaf
2311 * contains only checksum items, and it can be safely freed without reading
2314 * This is just an optimization targeted at large files. It may do
2315 * nothing. It will return 0 unless things went badly.
2317 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2318 struct btrfs_root *root,
2319 struct btrfs_path *path,
2320 struct inode *inode, u64 new_size)
2322 struct btrfs_key key;
2325 struct btrfs_key found_key;
2326 struct btrfs_key other_key;
2327 struct btrfs_leaf_ref *ref;
2331 path->lowest_level = 1;
2332 key.objectid = inode->i_ino;
2333 key.type = BTRFS_CSUM_ITEM_KEY;
2334 key.offset = new_size;
2336 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2340 if (path->nodes[1] == NULL) {
2345 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2346 nritems = btrfs_header_nritems(path->nodes[1]);
2351 if (path->slots[1] >= nritems)
2354 /* did we find a key greater than anything we want to delete? */
2355 if (found_key.objectid > inode->i_ino ||
2356 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2359 /* we check the next key in the node to make sure the leave contains
2360 * only checksum items. This comparison doesn't work if our
2361 * leaf is the last one in the node
2363 if (path->slots[1] + 1 >= nritems) {
2365 /* search forward from the last key in the node, this
2366 * will bring us into the next node in the tree
2368 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2370 /* unlikely, but we inc below, so check to be safe */
2371 if (found_key.offset == (u64)-1)
2374 /* search_forward needs a path with locks held, do the
2375 * search again for the original key. It is possible
2376 * this will race with a balance and return a path that
2377 * we could modify, but this drop is just an optimization
2378 * and is allowed to miss some leaves.
2380 btrfs_release_path(root, path);
2383 /* setup a max key for search_forward */
2384 other_key.offset = (u64)-1;
2385 other_key.type = key.type;
2386 other_key.objectid = key.objectid;
2388 path->keep_locks = 1;
2389 ret = btrfs_search_forward(root, &found_key, &other_key,
2391 path->keep_locks = 0;
2392 if (ret || found_key.objectid != key.objectid ||
2393 found_key.type != key.type) {
2398 key.offset = found_key.offset;
2399 btrfs_release_path(root, path);
2404 /* we know there's one more slot after us in the tree,
2405 * read that key so we can verify it is also a checksum item
2407 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2409 if (found_key.objectid < inode->i_ino)
2412 if (found_key.type != key.type || found_key.offset < new_size)
2416 * if the key for the next leaf isn't a csum key from this objectid,
2417 * we can't be sure there aren't good items inside this leaf.
2420 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2423 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2424 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2426 * it is safe to delete this leaf, it contains only
2427 * csum items from this inode at an offset >= new_size
2429 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2432 if (root->ref_cows && leaf_gen < trans->transid) {
2433 ref = btrfs_alloc_leaf_ref(root, 0);
2435 ref->root_gen = root->root_key.offset;
2436 ref->bytenr = leaf_start;
2438 ref->generation = leaf_gen;
2441 ret = btrfs_add_leaf_ref(root, ref, 0);
2443 btrfs_free_leaf_ref(root, ref);
2449 btrfs_release_path(root, path);
2451 if (other_key.objectid == inode->i_ino &&
2452 other_key.type == key.type && other_key.offset > key.offset) {
2453 key.offset = other_key.offset;
2459 /* fixup any changes we've made to the path */
2460 path->lowest_level = 0;
2461 path->keep_locks = 0;
2462 btrfs_release_path(root, path);
2469 * this can truncate away extent items, csum items and directory items.
2470 * It starts at a high offset and removes keys until it can't find
2471 * any higher than new_size
2473 * csum items that cross the new i_size are truncated to the new size
2476 * min_type is the minimum key type to truncate down to. If set to 0, this
2477 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2479 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2480 struct btrfs_root *root,
2481 struct inode *inode,
2482 u64 new_size, u32 min_type)
2485 struct btrfs_path *path;
2486 struct btrfs_key key;
2487 struct btrfs_key found_key;
2489 struct extent_buffer *leaf;
2490 struct btrfs_file_extent_item *fi;
2491 u64 extent_start = 0;
2492 u64 extent_num_bytes = 0;
2498 int pending_del_nr = 0;
2499 int pending_del_slot = 0;
2500 int extent_type = -1;
2502 u64 mask = root->sectorsize - 1;
2505 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2506 path = btrfs_alloc_path();
2510 /* FIXME, add redo link to tree so we don't leak on crash */
2511 key.objectid = inode->i_ino;
2512 key.offset = (u64)-1;
2515 btrfs_init_path(path);
2518 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2523 /* there are no items in the tree for us to truncate, we're
2526 if (path->slots[0] == 0) {
2535 leaf = path->nodes[0];
2536 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2537 found_type = btrfs_key_type(&found_key);
2540 if (found_key.objectid != inode->i_ino)
2543 if (found_type < min_type)
2546 item_end = found_key.offset;
2547 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2548 fi = btrfs_item_ptr(leaf, path->slots[0],
2549 struct btrfs_file_extent_item);
2550 extent_type = btrfs_file_extent_type(leaf, fi);
2551 encoding = btrfs_file_extent_compression(leaf, fi);
2552 encoding |= btrfs_file_extent_encryption(leaf, fi);
2553 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2555 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2557 btrfs_file_extent_num_bytes(leaf, fi);
2558 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2559 item_end += btrfs_file_extent_inline_len(leaf,
2564 if (item_end < new_size) {
2565 if (found_type == BTRFS_DIR_ITEM_KEY)
2566 found_type = BTRFS_INODE_ITEM_KEY;
2567 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2568 found_type = BTRFS_EXTENT_DATA_KEY;
2569 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2570 found_type = BTRFS_XATTR_ITEM_KEY;
2571 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2572 found_type = BTRFS_INODE_REF_KEY;
2573 else if (found_type)
2577 btrfs_set_key_type(&key, found_type);
2580 if (found_key.offset >= new_size)
2586 /* FIXME, shrink the extent if the ref count is only 1 */
2587 if (found_type != BTRFS_EXTENT_DATA_KEY)
2590 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2592 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2593 if (!del_item && !encoding) {
2594 u64 orig_num_bytes =
2595 btrfs_file_extent_num_bytes(leaf, fi);
2596 extent_num_bytes = new_size -
2597 found_key.offset + root->sectorsize - 1;
2598 extent_num_bytes = extent_num_bytes &
2599 ~((u64)root->sectorsize - 1);
2600 btrfs_set_file_extent_num_bytes(leaf, fi,
2602 num_dec = (orig_num_bytes -
2604 if (root->ref_cows && extent_start != 0)
2605 inode_sub_bytes(inode, num_dec);
2606 btrfs_mark_buffer_dirty(leaf);
2609 btrfs_file_extent_disk_num_bytes(leaf,
2611 /* FIXME blocksize != 4096 */
2612 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2613 if (extent_start != 0) {
2616 inode_sub_bytes(inode, num_dec);
2618 root_gen = btrfs_header_generation(leaf);
2619 root_owner = btrfs_header_owner(leaf);
2621 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2623 * we can't truncate inline items that have had
2627 btrfs_file_extent_compression(leaf, fi) == 0 &&
2628 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2629 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2630 u32 size = new_size - found_key.offset;
2632 if (root->ref_cows) {
2633 inode_sub_bytes(inode, item_end + 1 -
2637 btrfs_file_extent_calc_inline_size(size);
2638 ret = btrfs_truncate_item(trans, root, path,
2641 } else if (root->ref_cows) {
2642 inode_sub_bytes(inode, item_end + 1 -
2648 if (!pending_del_nr) {
2649 /* no pending yet, add ourselves */
2650 pending_del_slot = path->slots[0];
2652 } else if (pending_del_nr &&
2653 path->slots[0] + 1 == pending_del_slot) {
2654 /* hop on the pending chunk */
2656 pending_del_slot = path->slots[0];
2664 ret = btrfs_free_extent(trans, root, extent_start,
2666 leaf->start, root_owner,
2667 root_gen, inode->i_ino, 0);
2671 if (path->slots[0] == 0) {
2674 btrfs_release_path(root, path);
2679 if (pending_del_nr &&
2680 path->slots[0] + 1 != pending_del_slot) {
2681 struct btrfs_key debug;
2683 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2685 ret = btrfs_del_items(trans, root, path,
2690 btrfs_release_path(root, path);
2696 if (pending_del_nr) {
2697 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2700 btrfs_free_path(path);
2701 inode->i_sb->s_dirt = 1;
2706 * taken from block_truncate_page, but does cow as it zeros out
2707 * any bytes left in the last page in the file.
2709 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2711 struct inode *inode = mapping->host;
2712 struct btrfs_root *root = BTRFS_I(inode)->root;
2713 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2714 struct btrfs_ordered_extent *ordered;
2716 u32 blocksize = root->sectorsize;
2717 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2718 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2724 if ((offset & (blocksize - 1)) == 0)
2729 page = grab_cache_page(mapping, index);
2733 page_start = page_offset(page);
2734 page_end = page_start + PAGE_CACHE_SIZE - 1;
2736 if (!PageUptodate(page)) {
2737 ret = btrfs_readpage(NULL, page);
2739 if (page->mapping != mapping) {
2741 page_cache_release(page);
2744 if (!PageUptodate(page)) {
2749 wait_on_page_writeback(page);
2751 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2752 set_page_extent_mapped(page);
2754 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2756 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2758 page_cache_release(page);
2759 btrfs_start_ordered_extent(inode, ordered, 1);
2760 btrfs_put_ordered_extent(ordered);
2764 btrfs_set_extent_delalloc(inode, page_start, page_end);
2766 if (offset != PAGE_CACHE_SIZE) {
2768 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2769 flush_dcache_page(page);
2772 ClearPageChecked(page);
2773 set_page_dirty(page);
2774 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2778 page_cache_release(page);
2783 int btrfs_cont_expand(struct inode *inode, loff_t size)
2785 struct btrfs_trans_handle *trans;
2786 struct btrfs_root *root = BTRFS_I(inode)->root;
2787 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2788 struct extent_map *em;
2789 u64 mask = root->sectorsize - 1;
2790 u64 hole_start = (inode->i_size + mask) & ~mask;
2791 u64 block_end = (size + mask) & ~mask;
2797 if (size <= hole_start)
2800 err = btrfs_check_free_space(root, 1, 0);
2804 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2807 struct btrfs_ordered_extent *ordered;
2808 btrfs_wait_ordered_range(inode, hole_start,
2809 block_end - hole_start);
2810 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2811 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2814 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2815 btrfs_put_ordered_extent(ordered);
2818 trans = btrfs_start_transaction(root, 1);
2819 btrfs_set_trans_block_group(trans, inode);
2821 cur_offset = hole_start;
2823 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2824 block_end - cur_offset, 0);
2825 BUG_ON(IS_ERR(em) || !em);
2826 last_byte = min(extent_map_end(em), block_end);
2827 last_byte = (last_byte + mask) & ~mask;
2828 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2830 hole_size = last_byte - cur_offset;
2831 err = btrfs_drop_extents(trans, root, inode,
2833 cur_offset + hole_size,
2834 cur_offset, &hint_byte);
2837 err = btrfs_insert_file_extent(trans, root,
2838 inode->i_ino, cur_offset, 0,
2839 0, hole_size, 0, hole_size,
2841 btrfs_drop_extent_cache(inode, hole_start,
2844 free_extent_map(em);
2845 cur_offset = last_byte;
2846 if (err || cur_offset >= block_end)
2850 btrfs_end_transaction(trans, root);
2851 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2855 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2857 struct inode *inode = dentry->d_inode;
2860 err = inode_change_ok(inode, attr);
2864 if (S_ISREG(inode->i_mode) &&
2865 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2866 err = btrfs_cont_expand(inode, attr->ia_size);
2871 err = inode_setattr(inode, attr);
2873 if (!err && ((attr->ia_valid & ATTR_MODE)))
2874 err = btrfs_acl_chmod(inode);
2878 void btrfs_delete_inode(struct inode *inode)
2880 struct btrfs_trans_handle *trans;
2881 struct btrfs_root *root = BTRFS_I(inode)->root;
2885 truncate_inode_pages(&inode->i_data, 0);
2886 if (is_bad_inode(inode)) {
2887 btrfs_orphan_del(NULL, inode);
2890 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2892 btrfs_i_size_write(inode, 0);
2893 trans = btrfs_join_transaction(root, 1);
2895 btrfs_set_trans_block_group(trans, inode);
2896 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2898 btrfs_orphan_del(NULL, inode);
2899 goto no_delete_lock;
2902 btrfs_orphan_del(trans, inode);
2904 nr = trans->blocks_used;
2907 btrfs_end_transaction(trans, root);
2908 btrfs_btree_balance_dirty(root, nr);
2912 nr = trans->blocks_used;
2913 btrfs_end_transaction(trans, root);
2914 btrfs_btree_balance_dirty(root, nr);
2920 * this returns the key found in the dir entry in the location pointer.
2921 * If no dir entries were found, location->objectid is 0.
2923 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2924 struct btrfs_key *location)
2926 const char *name = dentry->d_name.name;
2927 int namelen = dentry->d_name.len;
2928 struct btrfs_dir_item *di;
2929 struct btrfs_path *path;
2930 struct btrfs_root *root = BTRFS_I(dir)->root;
2933 path = btrfs_alloc_path();
2936 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2941 if (!di || IS_ERR(di))
2944 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2946 btrfs_free_path(path);
2949 location->objectid = 0;
2954 * when we hit a tree root in a directory, the btrfs part of the inode
2955 * needs to be changed to reflect the root directory of the tree root. This
2956 * is kind of like crossing a mount point.
2958 static int fixup_tree_root_location(struct btrfs_root *root,
2959 struct btrfs_key *location,
2960 struct btrfs_root **sub_root,
2961 struct dentry *dentry)
2963 struct btrfs_root_item *ri;
2965 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2967 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2970 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2971 dentry->d_name.name,
2972 dentry->d_name.len);
2973 if (IS_ERR(*sub_root))
2974 return PTR_ERR(*sub_root);
2976 ri = &(*sub_root)->root_item;
2977 location->objectid = btrfs_root_dirid(ri);
2978 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2979 location->offset = 0;
2984 static noinline void init_btrfs_i(struct inode *inode)
2986 struct btrfs_inode *bi = BTRFS_I(inode);
2989 bi->i_default_acl = NULL;
2994 bi->logged_trans = 0;
2995 bi->delalloc_bytes = 0;
2996 bi->disk_i_size = 0;
2998 bi->index_cnt = (u64)-1;
2999 bi->log_dirty_trans = 0;
3000 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3001 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3002 inode->i_mapping, GFP_NOFS);
3003 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3004 inode->i_mapping, GFP_NOFS);
3005 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3006 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3007 mutex_init(&BTRFS_I(inode)->extent_mutex);
3008 mutex_init(&BTRFS_I(inode)->log_mutex);
3011 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3013 struct btrfs_iget_args *args = p;
3014 inode->i_ino = args->ino;
3015 init_btrfs_i(inode);
3016 BTRFS_I(inode)->root = args->root;
3020 static int btrfs_find_actor(struct inode *inode, void *opaque)
3022 struct btrfs_iget_args *args = opaque;
3023 return args->ino == inode->i_ino &&
3024 args->root == BTRFS_I(inode)->root;
3027 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3028 struct btrfs_root *root, int wait)
3030 struct inode *inode;
3031 struct btrfs_iget_args args;
3032 args.ino = objectid;
3036 inode = ilookup5(s, objectid, btrfs_find_actor,
3039 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3045 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3046 struct btrfs_root *root)
3048 struct inode *inode;
3049 struct btrfs_iget_args args;
3050 args.ino = objectid;
3053 inode = iget5_locked(s, objectid, btrfs_find_actor,
3054 btrfs_init_locked_inode,
3059 /* Get an inode object given its location and corresponding root.
3060 * Returns in *is_new if the inode was read from disk
3062 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3063 struct btrfs_root *root, int *is_new)
3065 struct inode *inode;
3067 inode = btrfs_iget_locked(s, location->objectid, root);
3069 return ERR_PTR(-EACCES);
3071 if (inode->i_state & I_NEW) {
3072 BTRFS_I(inode)->root = root;
3073 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3074 btrfs_read_locked_inode(inode);
3075 unlock_new_inode(inode);
3086 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3088 struct inode *inode;
3089 struct btrfs_inode *bi = BTRFS_I(dir);
3090 struct btrfs_root *root = bi->root;
3091 struct btrfs_root *sub_root = root;
3092 struct btrfs_key location;
3095 if (dentry->d_name.len > BTRFS_NAME_LEN)
3096 return ERR_PTR(-ENAMETOOLONG);
3098 ret = btrfs_inode_by_name(dir, dentry, &location);
3101 return ERR_PTR(ret);
3104 if (location.objectid) {
3105 ret = fixup_tree_root_location(root, &location, &sub_root,
3108 return ERR_PTR(ret);
3110 return ERR_PTR(-ENOENT);
3111 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3113 return ERR_CAST(inode);
3118 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3119 struct nameidata *nd)
3121 struct inode *inode;
3123 if (dentry->d_name.len > BTRFS_NAME_LEN)
3124 return ERR_PTR(-ENAMETOOLONG);
3126 inode = btrfs_lookup_dentry(dir, dentry);
3128 return ERR_CAST(inode);
3130 return d_splice_alias(inode, dentry);
3133 static unsigned char btrfs_filetype_table[] = {
3134 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3137 static int btrfs_real_readdir(struct file *filp, void *dirent,
3140 struct inode *inode = filp->f_dentry->d_inode;
3141 struct btrfs_root *root = BTRFS_I(inode)->root;
3142 struct btrfs_item *item;
3143 struct btrfs_dir_item *di;
3144 struct btrfs_key key;
3145 struct btrfs_key found_key;
3146 struct btrfs_path *path;
3149 struct extent_buffer *leaf;
3152 unsigned char d_type;
3157 int key_type = BTRFS_DIR_INDEX_KEY;
3162 /* FIXME, use a real flag for deciding about the key type */
3163 if (root->fs_info->tree_root == root)
3164 key_type = BTRFS_DIR_ITEM_KEY;
3166 /* special case for "." */
3167 if (filp->f_pos == 0) {
3168 over = filldir(dirent, ".", 1,
3175 /* special case for .., just use the back ref */
3176 if (filp->f_pos == 1) {
3177 u64 pino = parent_ino(filp->f_path.dentry);
3178 over = filldir(dirent, "..", 2,
3184 path = btrfs_alloc_path();
3187 btrfs_set_key_type(&key, key_type);
3188 key.offset = filp->f_pos;
3189 key.objectid = inode->i_ino;
3191 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3197 leaf = path->nodes[0];
3198 nritems = btrfs_header_nritems(leaf);
3199 slot = path->slots[0];
3200 if (advance || slot >= nritems) {
3201 if (slot >= nritems - 1) {
3202 ret = btrfs_next_leaf(root, path);
3205 leaf = path->nodes[0];
3206 nritems = btrfs_header_nritems(leaf);
3207 slot = path->slots[0];
3215 item = btrfs_item_nr(leaf, slot);
3216 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3218 if (found_key.objectid != key.objectid)
3220 if (btrfs_key_type(&found_key) != key_type)
3222 if (found_key.offset < filp->f_pos)
3225 filp->f_pos = found_key.offset;
3227 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3229 di_total = btrfs_item_size(leaf, item);
3231 while (di_cur < di_total) {
3232 struct btrfs_key location;
3234 name_len = btrfs_dir_name_len(leaf, di);
3235 if (name_len <= sizeof(tmp_name)) {
3236 name_ptr = tmp_name;
3238 name_ptr = kmalloc(name_len, GFP_NOFS);
3244 read_extent_buffer(leaf, name_ptr,
3245 (unsigned long)(di + 1), name_len);
3247 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3248 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3250 /* is this a reference to our own snapshot? If so
3253 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3254 location.objectid == root->root_key.objectid) {
3258 over = filldir(dirent, name_ptr, name_len,
3259 found_key.offset, location.objectid,
3263 if (name_ptr != tmp_name)
3268 di_len = btrfs_dir_name_len(leaf, di) +
3269 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3271 di = (struct btrfs_dir_item *)((char *)di + di_len);
3275 /* Reached end of directory/root. Bump pos past the last item. */
3276 if (key_type == BTRFS_DIR_INDEX_KEY)
3277 filp->f_pos = INT_LIMIT(off_t);
3283 btrfs_free_path(path);
3287 int btrfs_write_inode(struct inode *inode, int wait)
3289 struct btrfs_root *root = BTRFS_I(inode)->root;
3290 struct btrfs_trans_handle *trans;
3293 if (root->fs_info->btree_inode == inode)
3297 trans = btrfs_join_transaction(root, 1);
3298 btrfs_set_trans_block_group(trans, inode);
3299 ret = btrfs_commit_transaction(trans, root);
3305 * This is somewhat expensive, updating the tree every time the
3306 * inode changes. But, it is most likely to find the inode in cache.
3307 * FIXME, needs more benchmarking...there are no reasons other than performance
3308 * to keep or drop this code.
3310 void btrfs_dirty_inode(struct inode *inode)
3312 struct btrfs_root *root = BTRFS_I(inode)->root;
3313 struct btrfs_trans_handle *trans;
3315 trans = btrfs_join_transaction(root, 1);
3316 btrfs_set_trans_block_group(trans, inode);
3317 btrfs_update_inode(trans, root, inode);
3318 btrfs_end_transaction(trans, root);
3322 * find the highest existing sequence number in a directory
3323 * and then set the in-memory index_cnt variable to reflect
3324 * free sequence numbers
3326 static int btrfs_set_inode_index_count(struct inode *inode)
3328 struct btrfs_root *root = BTRFS_I(inode)->root;
3329 struct btrfs_key key, found_key;
3330 struct btrfs_path *path;
3331 struct extent_buffer *leaf;
3334 key.objectid = inode->i_ino;
3335 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3336 key.offset = (u64)-1;
3338 path = btrfs_alloc_path();
3342 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3345 /* FIXME: we should be able to handle this */
3351 * MAGIC NUMBER EXPLANATION:
3352 * since we search a directory based on f_pos we have to start at 2
3353 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3354 * else has to start at 2
3356 if (path->slots[0] == 0) {
3357 BTRFS_I(inode)->index_cnt = 2;
3363 leaf = path->nodes[0];
3364 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3366 if (found_key.objectid != inode->i_ino ||
3367 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3368 BTRFS_I(inode)->index_cnt = 2;
3372 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3374 btrfs_free_path(path);
3379 * helper to find a free sequence number in a given directory. This current
3380 * code is very simple, later versions will do smarter things in the btree
3382 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3386 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3387 ret = btrfs_set_inode_index_count(dir);
3392 *index = BTRFS_I(dir)->index_cnt;
3393 BTRFS_I(dir)->index_cnt++;
3398 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3399 struct btrfs_root *root,
3401 const char *name, int name_len,
3402 u64 ref_objectid, u64 objectid,
3403 u64 alloc_hint, int mode, u64 *index)
3405 struct inode *inode;
3406 struct btrfs_inode_item *inode_item;
3407 struct btrfs_key *location;
3408 struct btrfs_path *path;
3409 struct btrfs_inode_ref *ref;
3410 struct btrfs_key key[2];
3416 path = btrfs_alloc_path();
3419 inode = new_inode(root->fs_info->sb);
3421 return ERR_PTR(-ENOMEM);
3424 ret = btrfs_set_inode_index(dir, index);
3426 return ERR_PTR(ret);
3429 * index_cnt is ignored for everything but a dir,
3430 * btrfs_get_inode_index_count has an explanation for the magic
3433 init_btrfs_i(inode);
3434 BTRFS_I(inode)->index_cnt = 2;
3435 BTRFS_I(inode)->root = root;
3436 BTRFS_I(inode)->generation = trans->transid;
3442 BTRFS_I(inode)->block_group =
3443 btrfs_find_block_group(root, 0, alloc_hint, owner);
3444 if ((mode & S_IFREG)) {
3445 if (btrfs_test_opt(root, NODATASUM))
3446 btrfs_set_flag(inode, NODATASUM);
3447 if (btrfs_test_opt(root, NODATACOW))
3448 btrfs_set_flag(inode, NODATACOW);
3451 key[0].objectid = objectid;
3452 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3455 key[1].objectid = objectid;
3456 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3457 key[1].offset = ref_objectid;
3459 sizes[0] = sizeof(struct btrfs_inode_item);
3460 sizes[1] = name_len + sizeof(*ref);
3462 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3466 if (objectid > root->highest_inode)
3467 root->highest_inode = objectid;
3469 inode->i_uid = current_fsuid();
3470 inode->i_gid = current_fsgid();
3471 inode->i_mode = mode;
3472 inode->i_ino = objectid;
3473 inode_set_bytes(inode, 0);
3474 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3475 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3476 struct btrfs_inode_item);
3477 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3479 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3480 struct btrfs_inode_ref);
3481 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3482 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3483 ptr = (unsigned long)(ref + 1);
3484 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3486 btrfs_mark_buffer_dirty(path->nodes[0]);
3487 btrfs_free_path(path);
3489 location = &BTRFS_I(inode)->location;
3490 location->objectid = objectid;
3491 location->offset = 0;
3492 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3494 insert_inode_hash(inode);
3498 BTRFS_I(dir)->index_cnt--;
3499 btrfs_free_path(path);
3500 return ERR_PTR(ret);
3503 static inline u8 btrfs_inode_type(struct inode *inode)
3505 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3509 * utility function to add 'inode' into 'parent_inode' with
3510 * a give name and a given sequence number.
3511 * if 'add_backref' is true, also insert a backref from the
3512 * inode to the parent directory.
3514 int btrfs_add_link(struct btrfs_trans_handle *trans,
3515 struct inode *parent_inode, struct inode *inode,
3516 const char *name, int name_len, int add_backref, u64 index)
3519 struct btrfs_key key;
3520 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3522 key.objectid = inode->i_ino;
3523 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3526 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3527 parent_inode->i_ino,
3528 &key, btrfs_inode_type(inode),
3532 ret = btrfs_insert_inode_ref(trans, root,
3535 parent_inode->i_ino,
3538 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3540 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3541 ret = btrfs_update_inode(trans, root, parent_inode);
3546 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3547 struct dentry *dentry, struct inode *inode,
3548 int backref, u64 index)
3550 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3551 inode, dentry->d_name.name,
3552 dentry->d_name.len, backref, index);
3554 d_instantiate(dentry, inode);
3562 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3563 int mode, dev_t rdev)
3565 struct btrfs_trans_handle *trans;
3566 struct btrfs_root *root = BTRFS_I(dir)->root;
3567 struct inode *inode = NULL;
3571 unsigned long nr = 0;
3574 if (!new_valid_dev(rdev))
3577 err = btrfs_check_free_space(root, 1, 0);
3581 trans = btrfs_start_transaction(root, 1);
3582 btrfs_set_trans_block_group(trans, dir);
3584 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3590 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3592 dentry->d_parent->d_inode->i_ino, objectid,
3593 BTRFS_I(dir)->block_group, mode, &index);
3594 err = PTR_ERR(inode);
3598 err = btrfs_init_inode_security(inode, dir);
3604 btrfs_set_trans_block_group(trans, inode);
3605 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3609 inode->i_op = &btrfs_special_inode_operations;
3610 init_special_inode(inode, inode->i_mode, rdev);
3611 btrfs_update_inode(trans, root, inode);
3613 dir->i_sb->s_dirt = 1;
3614 btrfs_update_inode_block_group(trans, inode);
3615 btrfs_update_inode_block_group(trans, dir);
3617 nr = trans->blocks_used;
3618 btrfs_end_transaction_throttle(trans, root);
3621 inode_dec_link_count(inode);
3624 btrfs_btree_balance_dirty(root, nr);
3628 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3629 int mode, struct nameidata *nd)
3631 struct btrfs_trans_handle *trans;
3632 struct btrfs_root *root = BTRFS_I(dir)->root;
3633 struct inode *inode = NULL;
3636 unsigned long nr = 0;
3640 err = btrfs_check_free_space(root, 1, 0);
3643 trans = btrfs_start_transaction(root, 1);
3644 btrfs_set_trans_block_group(trans, dir);
3646 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3652 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3654 dentry->d_parent->d_inode->i_ino,
3655 objectid, BTRFS_I(dir)->block_group, mode,
3657 err = PTR_ERR(inode);
3661 err = btrfs_init_inode_security(inode, dir);
3667 btrfs_set_trans_block_group(trans, inode);
3668 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3672 inode->i_mapping->a_ops = &btrfs_aops;
3673 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3674 inode->i_fop = &btrfs_file_operations;
3675 inode->i_op = &btrfs_file_inode_operations;
3676 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3678 dir->i_sb->s_dirt = 1;
3679 btrfs_update_inode_block_group(trans, inode);
3680 btrfs_update_inode_block_group(trans, dir);
3682 nr = trans->blocks_used;
3683 btrfs_end_transaction_throttle(trans, root);
3686 inode_dec_link_count(inode);
3689 btrfs_btree_balance_dirty(root, nr);
3693 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3694 struct dentry *dentry)
3696 struct btrfs_trans_handle *trans;
3697 struct btrfs_root *root = BTRFS_I(dir)->root;
3698 struct inode *inode = old_dentry->d_inode;
3700 unsigned long nr = 0;
3704 if (inode->i_nlink == 0)
3707 btrfs_inc_nlink(inode);
3708 err = btrfs_check_free_space(root, 1, 0);
3711 err = btrfs_set_inode_index(dir, &index);
3715 trans = btrfs_start_transaction(root, 1);
3717 btrfs_set_trans_block_group(trans, dir);
3718 atomic_inc(&inode->i_count);
3720 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3725 dir->i_sb->s_dirt = 1;
3726 btrfs_update_inode_block_group(trans, dir);
3727 err = btrfs_update_inode(trans, root, inode);
3732 nr = trans->blocks_used;
3733 btrfs_end_transaction_throttle(trans, root);
3736 inode_dec_link_count(inode);
3739 btrfs_btree_balance_dirty(root, nr);
3743 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3745 struct inode *inode = NULL;
3746 struct btrfs_trans_handle *trans;
3747 struct btrfs_root *root = BTRFS_I(dir)->root;
3749 int drop_on_err = 0;
3752 unsigned long nr = 1;
3754 err = btrfs_check_free_space(root, 1, 0);
3758 trans = btrfs_start_transaction(root, 1);
3759 btrfs_set_trans_block_group(trans, dir);
3761 if (IS_ERR(trans)) {
3762 err = PTR_ERR(trans);
3766 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3772 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3774 dentry->d_parent->d_inode->i_ino, objectid,
3775 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3777 if (IS_ERR(inode)) {
3778 err = PTR_ERR(inode);
3784 err = btrfs_init_inode_security(inode, dir);
3788 inode->i_op = &btrfs_dir_inode_operations;
3789 inode->i_fop = &btrfs_dir_file_operations;
3790 btrfs_set_trans_block_group(trans, inode);
3792 btrfs_i_size_write(inode, 0);
3793 err = btrfs_update_inode(trans, root, inode);
3797 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3798 inode, dentry->d_name.name,
3799 dentry->d_name.len, 0, index);
3803 d_instantiate(dentry, inode);
3805 dir->i_sb->s_dirt = 1;
3806 btrfs_update_inode_block_group(trans, inode);
3807 btrfs_update_inode_block_group(trans, dir);
3810 nr = trans->blocks_used;
3811 btrfs_end_transaction_throttle(trans, root);
3816 btrfs_btree_balance_dirty(root, nr);
3820 /* helper for btfs_get_extent. Given an existing extent in the tree,
3821 * and an extent that you want to insert, deal with overlap and insert
3822 * the new extent into the tree.
3824 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3825 struct extent_map *existing,
3826 struct extent_map *em,
3827 u64 map_start, u64 map_len)
3831 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3832 start_diff = map_start - em->start;
3833 em->start = map_start;
3835 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3836 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3837 em->block_start += start_diff;
3838 em->block_len -= start_diff;
3840 return add_extent_mapping(em_tree, em);
3843 static noinline int uncompress_inline(struct btrfs_path *path,
3844 struct inode *inode, struct page *page,
3845 size_t pg_offset, u64 extent_offset,
3846 struct btrfs_file_extent_item *item)
3849 struct extent_buffer *leaf = path->nodes[0];
3852 unsigned long inline_size;
3855 WARN_ON(pg_offset != 0);
3856 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3857 inline_size = btrfs_file_extent_inline_item_len(leaf,
3858 btrfs_item_nr(leaf, path->slots[0]));
3859 tmp = kmalloc(inline_size, GFP_NOFS);
3860 ptr = btrfs_file_extent_inline_start(item);
3862 read_extent_buffer(leaf, tmp, ptr, inline_size);
3864 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3865 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3866 inline_size, max_size);
3868 char *kaddr = kmap_atomic(page, KM_USER0);
3869 unsigned long copy_size = min_t(u64,
3870 PAGE_CACHE_SIZE - pg_offset,
3871 max_size - extent_offset);
3872 memset(kaddr + pg_offset, 0, copy_size);
3873 kunmap_atomic(kaddr, KM_USER0);
3880 * a bit scary, this does extent mapping from logical file offset to the disk.
3881 * the ugly parts come from merging extents from the disk with the in-ram
3882 * representation. This gets more complex because of the data=ordered code,
3883 * where the in-ram extents might be locked pending data=ordered completion.
3885 * This also copies inline extents directly into the page.
3888 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3889 size_t pg_offset, u64 start, u64 len,
3895 u64 extent_start = 0;
3897 u64 objectid = inode->i_ino;
3899 struct btrfs_path *path = NULL;
3900 struct btrfs_root *root = BTRFS_I(inode)->root;
3901 struct btrfs_file_extent_item *item;
3902 struct extent_buffer *leaf;
3903 struct btrfs_key found_key;
3904 struct extent_map *em = NULL;
3905 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3906 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3907 struct btrfs_trans_handle *trans = NULL;
3911 spin_lock(&em_tree->lock);
3912 em = lookup_extent_mapping(em_tree, start, len);
3914 em->bdev = root->fs_info->fs_devices->latest_bdev;
3915 spin_unlock(&em_tree->lock);
3918 if (em->start > start || em->start + em->len <= start)
3919 free_extent_map(em);
3920 else if (em->block_start == EXTENT_MAP_INLINE && page)
3921 free_extent_map(em);
3925 em = alloc_extent_map(GFP_NOFS);
3930 em->bdev = root->fs_info->fs_devices->latest_bdev;
3931 em->start = EXTENT_MAP_HOLE;
3932 em->orig_start = EXTENT_MAP_HOLE;
3934 em->block_len = (u64)-1;
3937 path = btrfs_alloc_path();
3941 ret = btrfs_lookup_file_extent(trans, root, path,
3942 objectid, start, trans != NULL);
3949 if (path->slots[0] == 0)
3954 leaf = path->nodes[0];
3955 item = btrfs_item_ptr(leaf, path->slots[0],
3956 struct btrfs_file_extent_item);
3957 /* are we inside the extent that was found? */
3958 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3959 found_type = btrfs_key_type(&found_key);
3960 if (found_key.objectid != objectid ||
3961 found_type != BTRFS_EXTENT_DATA_KEY) {
3965 found_type = btrfs_file_extent_type(leaf, item);
3966 extent_start = found_key.offset;
3967 compressed = btrfs_file_extent_compression(leaf, item);
3968 if (found_type == BTRFS_FILE_EXTENT_REG ||
3969 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3970 extent_end = extent_start +
3971 btrfs_file_extent_num_bytes(leaf, item);
3972 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3974 size = btrfs_file_extent_inline_len(leaf, item);
3975 extent_end = (extent_start + size + root->sectorsize - 1) &
3976 ~((u64)root->sectorsize - 1);
3979 if (start >= extent_end) {
3981 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3982 ret = btrfs_next_leaf(root, path);
3989 leaf = path->nodes[0];
3991 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3992 if (found_key.objectid != objectid ||
3993 found_key.type != BTRFS_EXTENT_DATA_KEY)
3995 if (start + len <= found_key.offset)
3998 em->len = found_key.offset - start;
4002 if (found_type == BTRFS_FILE_EXTENT_REG ||
4003 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4004 em->start = extent_start;
4005 em->len = extent_end - extent_start;
4006 em->orig_start = extent_start -
4007 btrfs_file_extent_offset(leaf, item);
4008 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4010 em->block_start = EXTENT_MAP_HOLE;
4014 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4015 em->block_start = bytenr;
4016 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4019 bytenr += btrfs_file_extent_offset(leaf, item);
4020 em->block_start = bytenr;
4021 em->block_len = em->len;
4022 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4023 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4026 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4030 size_t extent_offset;
4033 em->block_start = EXTENT_MAP_INLINE;
4034 if (!page || create) {
4035 em->start = extent_start;
4036 em->len = extent_end - extent_start;
4040 size = btrfs_file_extent_inline_len(leaf, item);
4041 extent_offset = page_offset(page) + pg_offset - extent_start;
4042 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4043 size - extent_offset);
4044 em->start = extent_start + extent_offset;
4045 em->len = (copy_size + root->sectorsize - 1) &
4046 ~((u64)root->sectorsize - 1);
4047 em->orig_start = EXTENT_MAP_INLINE;
4049 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4050 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4051 if (create == 0 && !PageUptodate(page)) {
4052 if (btrfs_file_extent_compression(leaf, item) ==
4053 BTRFS_COMPRESS_ZLIB) {
4054 ret = uncompress_inline(path, inode, page,
4056 extent_offset, item);
4060 read_extent_buffer(leaf, map + pg_offset, ptr,
4064 flush_dcache_page(page);
4065 } else if (create && PageUptodate(page)) {
4068 free_extent_map(em);
4070 btrfs_release_path(root, path);
4071 trans = btrfs_join_transaction(root, 1);
4075 write_extent_buffer(leaf, map + pg_offset, ptr,
4078 btrfs_mark_buffer_dirty(leaf);
4080 set_extent_uptodate(io_tree, em->start,
4081 extent_map_end(em) - 1, GFP_NOFS);
4084 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4091 em->block_start = EXTENT_MAP_HOLE;
4092 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4094 btrfs_release_path(root, path);
4095 if (em->start > start || extent_map_end(em) <= start) {
4096 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4097 "[%llu %llu]\n", (unsigned long long)em->start,
4098 (unsigned long long)em->len,
4099 (unsigned long long)start,
4100 (unsigned long long)len);
4106 spin_lock(&em_tree->lock);
4107 ret = add_extent_mapping(em_tree, em);
4108 /* it is possible that someone inserted the extent into the tree
4109 * while we had the lock dropped. It is also possible that
4110 * an overlapping map exists in the tree
4112 if (ret == -EEXIST) {
4113 struct extent_map *existing;
4117 existing = lookup_extent_mapping(em_tree, start, len);
4118 if (existing && (existing->start > start ||
4119 existing->start + existing->len <= start)) {
4120 free_extent_map(existing);
4124 existing = lookup_extent_mapping(em_tree, em->start,
4127 err = merge_extent_mapping(em_tree, existing,
4130 free_extent_map(existing);
4132 free_extent_map(em);
4137 free_extent_map(em);
4141 free_extent_map(em);
4146 spin_unlock(&em_tree->lock);
4149 btrfs_free_path(path);
4151 ret = btrfs_end_transaction(trans, root);
4156 free_extent_map(em);
4158 return ERR_PTR(err);
4163 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4164 const struct iovec *iov, loff_t offset,
4165 unsigned long nr_segs)
4170 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4171 __u64 start, __u64 len)
4173 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4176 int btrfs_readpage(struct file *file, struct page *page)
4178 struct extent_io_tree *tree;
4179 tree = &BTRFS_I(page->mapping->host)->io_tree;
4180 return extent_read_full_page(tree, page, btrfs_get_extent);
4183 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4185 struct extent_io_tree *tree;
4188 if (current->flags & PF_MEMALLOC) {
4189 redirty_page_for_writepage(wbc, page);
4193 tree = &BTRFS_I(page->mapping->host)->io_tree;
4194 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4197 int btrfs_writepages(struct address_space *mapping,
4198 struct writeback_control *wbc)
4200 struct extent_io_tree *tree;
4202 tree = &BTRFS_I(mapping->host)->io_tree;
4203 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4207 btrfs_readpages(struct file *file, struct address_space *mapping,
4208 struct list_head *pages, unsigned nr_pages)
4210 struct extent_io_tree *tree;
4211 tree = &BTRFS_I(mapping->host)->io_tree;
4212 return extent_readpages(tree, mapping, pages, nr_pages,
4215 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4217 struct extent_io_tree *tree;
4218 struct extent_map_tree *map;
4221 tree = &BTRFS_I(page->mapping->host)->io_tree;
4222 map = &BTRFS_I(page->mapping->host)->extent_tree;
4223 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4225 ClearPagePrivate(page);
4226 set_page_private(page, 0);
4227 page_cache_release(page);
4232 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4234 if (PageWriteback(page) || PageDirty(page))
4236 return __btrfs_releasepage(page, gfp_flags);
4239 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4241 struct extent_io_tree *tree;
4242 struct btrfs_ordered_extent *ordered;
4243 u64 page_start = page_offset(page);
4244 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4246 wait_on_page_writeback(page);
4247 tree = &BTRFS_I(page->mapping->host)->io_tree;
4249 btrfs_releasepage(page, GFP_NOFS);
4253 lock_extent(tree, page_start, page_end, GFP_NOFS);
4254 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4258 * IO on this page will never be started, so we need
4259 * to account for any ordered extents now
4261 clear_extent_bit(tree, page_start, page_end,
4262 EXTENT_DIRTY | EXTENT_DELALLOC |
4263 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4264 btrfs_finish_ordered_io(page->mapping->host,
4265 page_start, page_end);
4266 btrfs_put_ordered_extent(ordered);
4267 lock_extent(tree, page_start, page_end, GFP_NOFS);
4269 clear_extent_bit(tree, page_start, page_end,
4270 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4273 __btrfs_releasepage(page, GFP_NOFS);
4275 ClearPageChecked(page);
4276 if (PagePrivate(page)) {
4277 ClearPagePrivate(page);
4278 set_page_private(page, 0);
4279 page_cache_release(page);
4284 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4285 * called from a page fault handler when a page is first dirtied. Hence we must
4286 * be careful to check for EOF conditions here. We set the page up correctly
4287 * for a written page which means we get ENOSPC checking when writing into
4288 * holes and correct delalloc and unwritten extent mapping on filesystems that
4289 * support these features.
4291 * We are not allowed to take the i_mutex here so we have to play games to
4292 * protect against truncate races as the page could now be beyond EOF. Because
4293 * vmtruncate() writes the inode size before removing pages, once we have the
4294 * page lock we can determine safely if the page is beyond EOF. If it is not
4295 * beyond EOF, then the page is guaranteed safe against truncation until we
4298 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4300 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4301 struct btrfs_root *root = BTRFS_I(inode)->root;
4302 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4303 struct btrfs_ordered_extent *ordered;
4305 unsigned long zero_start;
4311 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
4318 size = i_size_read(inode);
4319 page_start = page_offset(page);
4320 page_end = page_start + PAGE_CACHE_SIZE - 1;
4322 if ((page->mapping != inode->i_mapping) ||
4323 (page_start >= size)) {
4324 /* page got truncated out from underneath us */
4327 wait_on_page_writeback(page);
4329 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4330 set_page_extent_mapped(page);
4333 * we can't set the delalloc bits if there are pending ordered
4334 * extents. Drop our locks and wait for them to finish
4336 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4338 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4340 btrfs_start_ordered_extent(inode, ordered, 1);
4341 btrfs_put_ordered_extent(ordered);
4345 btrfs_set_extent_delalloc(inode, page_start, page_end);
4348 /* page is wholly or partially inside EOF */
4349 if (page_start + PAGE_CACHE_SIZE > size)
4350 zero_start = size & ~PAGE_CACHE_MASK;
4352 zero_start = PAGE_CACHE_SIZE;
4354 if (zero_start != PAGE_CACHE_SIZE) {
4356 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4357 flush_dcache_page(page);
4360 ClearPageChecked(page);
4361 set_page_dirty(page);
4362 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4370 static void btrfs_truncate(struct inode *inode)
4372 struct btrfs_root *root = BTRFS_I(inode)->root;
4374 struct btrfs_trans_handle *trans;
4376 u64 mask = root->sectorsize - 1;
4378 if (!S_ISREG(inode->i_mode))
4380 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4383 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4384 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4386 trans = btrfs_start_transaction(root, 1);
4387 btrfs_set_trans_block_group(trans, inode);
4388 btrfs_i_size_write(inode, inode->i_size);
4390 ret = btrfs_orphan_add(trans, inode);
4393 /* FIXME, add redo link to tree so we don't leak on crash */
4394 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4395 BTRFS_EXTENT_DATA_KEY);
4396 btrfs_update_inode(trans, root, inode);
4398 ret = btrfs_orphan_del(trans, inode);
4402 nr = trans->blocks_used;
4403 ret = btrfs_end_transaction_throttle(trans, root);
4405 btrfs_btree_balance_dirty(root, nr);
4409 * create a new subvolume directory/inode (helper for the ioctl).
4411 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4412 struct btrfs_root *new_root, struct dentry *dentry,
4413 u64 new_dirid, u64 alloc_hint)
4415 struct inode *inode;
4419 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4420 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4422 return PTR_ERR(inode);
4423 inode->i_op = &btrfs_dir_inode_operations;
4424 inode->i_fop = &btrfs_dir_file_operations;
4427 btrfs_i_size_write(inode, 0);
4429 error = btrfs_update_inode(trans, new_root, inode);
4433 d_instantiate(dentry, inode);
4437 /* helper function for file defrag and space balancing. This
4438 * forces readahead on a given range of bytes in an inode
4440 unsigned long btrfs_force_ra(struct address_space *mapping,
4441 struct file_ra_state *ra, struct file *file,
4442 pgoff_t offset, pgoff_t last_index)
4444 pgoff_t req_size = last_index - offset + 1;
4446 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4447 return offset + req_size;
4450 struct inode *btrfs_alloc_inode(struct super_block *sb)
4452 struct btrfs_inode *ei;
4454 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4458 ei->logged_trans = 0;
4459 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4460 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4461 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4462 INIT_LIST_HEAD(&ei->i_orphan);
4463 return &ei->vfs_inode;
4466 void btrfs_destroy_inode(struct inode *inode)
4468 struct btrfs_ordered_extent *ordered;
4469 WARN_ON(!list_empty(&inode->i_dentry));
4470 WARN_ON(inode->i_data.nrpages);
4472 if (BTRFS_I(inode)->i_acl &&
4473 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4474 posix_acl_release(BTRFS_I(inode)->i_acl);
4475 if (BTRFS_I(inode)->i_default_acl &&
4476 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4477 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4479 spin_lock(&BTRFS_I(inode)->root->list_lock);
4480 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4481 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4482 " list\n", inode->i_ino);
4485 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4488 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4492 printk(KERN_ERR "btrfs found ordered "
4493 "extent %llu %llu on inode cleanup\n",
4494 (unsigned long long)ordered->file_offset,
4495 (unsigned long long)ordered->len);
4496 btrfs_remove_ordered_extent(inode, ordered);
4497 btrfs_put_ordered_extent(ordered);
4498 btrfs_put_ordered_extent(ordered);
4501 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4502 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4505 static void init_once(void *foo)
4507 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4509 inode_init_once(&ei->vfs_inode);
4512 void btrfs_destroy_cachep(void)
4514 if (btrfs_inode_cachep)
4515 kmem_cache_destroy(btrfs_inode_cachep);
4516 if (btrfs_trans_handle_cachep)
4517 kmem_cache_destroy(btrfs_trans_handle_cachep);
4518 if (btrfs_transaction_cachep)
4519 kmem_cache_destroy(btrfs_transaction_cachep);
4520 if (btrfs_bit_radix_cachep)
4521 kmem_cache_destroy(btrfs_bit_radix_cachep);
4522 if (btrfs_path_cachep)
4523 kmem_cache_destroy(btrfs_path_cachep);
4526 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4527 unsigned long extra_flags,
4528 void (*ctor)(void *))
4530 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4531 SLAB_MEM_SPREAD | extra_flags), ctor);
4534 int btrfs_init_cachep(void)
4536 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4537 sizeof(struct btrfs_inode),
4539 if (!btrfs_inode_cachep)
4541 btrfs_trans_handle_cachep =
4542 btrfs_cache_create("btrfs_trans_handle_cache",
4543 sizeof(struct btrfs_trans_handle),
4545 if (!btrfs_trans_handle_cachep)
4547 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4548 sizeof(struct btrfs_transaction),
4550 if (!btrfs_transaction_cachep)
4552 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4553 sizeof(struct btrfs_path),
4555 if (!btrfs_path_cachep)
4557 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4558 SLAB_DESTROY_BY_RCU, NULL);
4559 if (!btrfs_bit_radix_cachep)
4563 btrfs_destroy_cachep();
4567 static int btrfs_getattr(struct vfsmount *mnt,
4568 struct dentry *dentry, struct kstat *stat)
4570 struct inode *inode = dentry->d_inode;
4571 generic_fillattr(inode, stat);
4572 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4573 stat->blksize = PAGE_CACHE_SIZE;
4574 stat->blocks = (inode_get_bytes(inode) +
4575 BTRFS_I(inode)->delalloc_bytes) >> 9;
4579 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4580 struct inode *new_dir, struct dentry *new_dentry)
4582 struct btrfs_trans_handle *trans;
4583 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4584 struct inode *new_inode = new_dentry->d_inode;
4585 struct inode *old_inode = old_dentry->d_inode;
4586 struct timespec ctime = CURRENT_TIME;
4590 /* we're not allowed to rename between subvolumes */
4591 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4592 BTRFS_I(new_dir)->root->root_key.objectid)
4595 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4596 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4600 /* to rename a snapshot or subvolume, we need to juggle the
4601 * backrefs. This isn't coded yet
4603 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4606 ret = btrfs_check_free_space(root, 1, 0);
4610 trans = btrfs_start_transaction(root, 1);
4612 btrfs_set_trans_block_group(trans, new_dir);
4614 btrfs_inc_nlink(old_dentry->d_inode);
4615 old_dir->i_ctime = old_dir->i_mtime = ctime;
4616 new_dir->i_ctime = new_dir->i_mtime = ctime;
4617 old_inode->i_ctime = ctime;
4619 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4620 old_dentry->d_name.name,
4621 old_dentry->d_name.len);
4626 new_inode->i_ctime = CURRENT_TIME;
4627 ret = btrfs_unlink_inode(trans, root, new_dir,
4628 new_dentry->d_inode,
4629 new_dentry->d_name.name,
4630 new_dentry->d_name.len);
4633 if (new_inode->i_nlink == 0) {
4634 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4640 ret = btrfs_set_inode_index(new_dir, &index);
4644 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4645 old_inode, new_dentry->d_name.name,
4646 new_dentry->d_name.len, 1, index);
4651 btrfs_end_transaction_throttle(trans, root);
4657 * some fairly slow code that needs optimization. This walks the list
4658 * of all the inodes with pending delalloc and forces them to disk.
4660 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4662 struct list_head *head = &root->fs_info->delalloc_inodes;
4663 struct btrfs_inode *binode;
4664 struct inode *inode;
4666 if (root->fs_info->sb->s_flags & MS_RDONLY)
4669 spin_lock(&root->fs_info->delalloc_lock);
4670 while (!list_empty(head)) {
4671 binode = list_entry(head->next, struct btrfs_inode,
4673 inode = igrab(&binode->vfs_inode);
4675 list_del_init(&binode->delalloc_inodes);
4676 spin_unlock(&root->fs_info->delalloc_lock);
4678 filemap_flush(inode->i_mapping);
4682 spin_lock(&root->fs_info->delalloc_lock);
4684 spin_unlock(&root->fs_info->delalloc_lock);
4686 /* the filemap_flush will queue IO into the worker threads, but
4687 * we have to make sure the IO is actually started and that
4688 * ordered extents get created before we return
4690 atomic_inc(&root->fs_info->async_submit_draining);
4691 while (atomic_read(&root->fs_info->nr_async_submits) ||
4692 atomic_read(&root->fs_info->async_delalloc_pages)) {
4693 wait_event(root->fs_info->async_submit_wait,
4694 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4695 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4697 atomic_dec(&root->fs_info->async_submit_draining);
4701 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4702 const char *symname)
4704 struct btrfs_trans_handle *trans;
4705 struct btrfs_root *root = BTRFS_I(dir)->root;
4706 struct btrfs_path *path;
4707 struct btrfs_key key;
4708 struct inode *inode = NULL;
4716 struct btrfs_file_extent_item *ei;
4717 struct extent_buffer *leaf;
4718 unsigned long nr = 0;
4720 name_len = strlen(symname) + 1;
4721 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4722 return -ENAMETOOLONG;
4724 err = btrfs_check_free_space(root, 1, 0);
4728 trans = btrfs_start_transaction(root, 1);
4729 btrfs_set_trans_block_group(trans, dir);
4731 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4737 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4739 dentry->d_parent->d_inode->i_ino, objectid,
4740 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4742 err = PTR_ERR(inode);
4746 err = btrfs_init_inode_security(inode, dir);
4752 btrfs_set_trans_block_group(trans, inode);
4753 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4757 inode->i_mapping->a_ops = &btrfs_aops;
4758 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4759 inode->i_fop = &btrfs_file_operations;
4760 inode->i_op = &btrfs_file_inode_operations;
4761 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4763 dir->i_sb->s_dirt = 1;
4764 btrfs_update_inode_block_group(trans, inode);
4765 btrfs_update_inode_block_group(trans, dir);
4769 path = btrfs_alloc_path();
4771 key.objectid = inode->i_ino;
4773 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4774 datasize = btrfs_file_extent_calc_inline_size(name_len);
4775 err = btrfs_insert_empty_item(trans, root, path, &key,
4781 leaf = path->nodes[0];
4782 ei = btrfs_item_ptr(leaf, path->slots[0],
4783 struct btrfs_file_extent_item);
4784 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4785 btrfs_set_file_extent_type(leaf, ei,
4786 BTRFS_FILE_EXTENT_INLINE);
4787 btrfs_set_file_extent_encryption(leaf, ei, 0);
4788 btrfs_set_file_extent_compression(leaf, ei, 0);
4789 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4790 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4792 ptr = btrfs_file_extent_inline_start(ei);
4793 write_extent_buffer(leaf, symname, ptr, name_len);
4794 btrfs_mark_buffer_dirty(leaf);
4795 btrfs_free_path(path);
4797 inode->i_op = &btrfs_symlink_inode_operations;
4798 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4799 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4800 inode_set_bytes(inode, name_len);
4801 btrfs_i_size_write(inode, name_len - 1);
4802 err = btrfs_update_inode(trans, root, inode);
4807 nr = trans->blocks_used;
4808 btrfs_end_transaction_throttle(trans, root);
4811 inode_dec_link_count(inode);
4814 btrfs_btree_balance_dirty(root, nr);
4818 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4819 u64 alloc_hint, int mode)
4821 struct btrfs_trans_handle *trans;
4822 struct btrfs_root *root = BTRFS_I(inode)->root;
4823 struct btrfs_key ins;
4825 u64 cur_offset = start;
4826 u64 num_bytes = end - start;
4829 trans = btrfs_join_transaction(root, 1);
4831 btrfs_set_trans_block_group(trans, inode);
4833 while (num_bytes > 0) {
4834 alloc_size = min(num_bytes, root->fs_info->max_extent);
4835 ret = btrfs_reserve_extent(trans, root, alloc_size,
4836 root->sectorsize, 0, alloc_hint,
4842 ret = insert_reserved_file_extent(trans, inode,
4843 cur_offset, ins.objectid,
4844 ins.offset, ins.offset,
4845 ins.offset, 0, 0, 0,
4846 BTRFS_FILE_EXTENT_PREALLOC);
4848 num_bytes -= ins.offset;
4849 cur_offset += ins.offset;
4850 alloc_hint = ins.objectid + ins.offset;
4853 if (cur_offset > start) {
4854 inode->i_ctime = CURRENT_TIME;
4855 btrfs_set_flag(inode, PREALLOC);
4856 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4857 cur_offset > i_size_read(inode))
4858 btrfs_i_size_write(inode, cur_offset);
4859 ret = btrfs_update_inode(trans, root, inode);
4863 btrfs_end_transaction(trans, root);
4867 static long btrfs_fallocate(struct inode *inode, int mode,
4868 loff_t offset, loff_t len)
4875 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
4876 struct extent_map *em;
4879 alloc_start = offset & ~mask;
4880 alloc_end = (offset + len + mask) & ~mask;
4882 mutex_lock(&inode->i_mutex);
4883 if (alloc_start > inode->i_size) {
4884 ret = btrfs_cont_expand(inode, alloc_start);
4890 struct btrfs_ordered_extent *ordered;
4891 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
4892 alloc_end - 1, GFP_NOFS);
4893 ordered = btrfs_lookup_first_ordered_extent(inode,
4896 ordered->file_offset + ordered->len > alloc_start &&
4897 ordered->file_offset < alloc_end) {
4898 btrfs_put_ordered_extent(ordered);
4899 unlock_extent(&BTRFS_I(inode)->io_tree,
4900 alloc_start, alloc_end - 1, GFP_NOFS);
4901 btrfs_wait_ordered_range(inode, alloc_start,
4902 alloc_end - alloc_start);
4905 btrfs_put_ordered_extent(ordered);
4910 cur_offset = alloc_start;
4912 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4913 alloc_end - cur_offset, 0);
4914 BUG_ON(IS_ERR(em) || !em);
4915 last_byte = min(extent_map_end(em), alloc_end);
4916 last_byte = (last_byte + mask) & ~mask;
4917 if (em->block_start == EXTENT_MAP_HOLE) {
4918 ret = prealloc_file_range(inode, cur_offset,
4919 last_byte, alloc_hint, mode);
4921 free_extent_map(em);
4925 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
4926 alloc_hint = em->block_start;
4927 free_extent_map(em);
4929 cur_offset = last_byte;
4930 if (cur_offset >= alloc_end) {
4935 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
4938 mutex_unlock(&inode->i_mutex);
4942 static int btrfs_set_page_dirty(struct page *page)
4944 return __set_page_dirty_nobuffers(page);
4947 static int btrfs_permission(struct inode *inode, int mask)
4949 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4951 return generic_permission(inode, mask, btrfs_check_acl);
4954 static struct inode_operations btrfs_dir_inode_operations = {
4955 .getattr = btrfs_getattr,
4956 .lookup = btrfs_lookup,
4957 .create = btrfs_create,
4958 .unlink = btrfs_unlink,
4960 .mkdir = btrfs_mkdir,
4961 .rmdir = btrfs_rmdir,
4962 .rename = btrfs_rename,
4963 .symlink = btrfs_symlink,
4964 .setattr = btrfs_setattr,
4965 .mknod = btrfs_mknod,
4966 .setxattr = btrfs_setxattr,
4967 .getxattr = btrfs_getxattr,
4968 .listxattr = btrfs_listxattr,
4969 .removexattr = btrfs_removexattr,
4970 .permission = btrfs_permission,
4972 static struct inode_operations btrfs_dir_ro_inode_operations = {
4973 .lookup = btrfs_lookup,
4974 .permission = btrfs_permission,
4976 static struct file_operations btrfs_dir_file_operations = {
4977 .llseek = generic_file_llseek,
4978 .read = generic_read_dir,
4979 .readdir = btrfs_real_readdir,
4980 .unlocked_ioctl = btrfs_ioctl,
4981 #ifdef CONFIG_COMPAT
4982 .compat_ioctl = btrfs_ioctl,
4984 .release = btrfs_release_file,
4985 .fsync = btrfs_sync_file,
4988 static struct extent_io_ops btrfs_extent_io_ops = {
4989 .fill_delalloc = run_delalloc_range,
4990 .submit_bio_hook = btrfs_submit_bio_hook,
4991 .merge_bio_hook = btrfs_merge_bio_hook,
4992 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
4993 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
4994 .writepage_start_hook = btrfs_writepage_start_hook,
4995 .readpage_io_failed_hook = btrfs_io_failed_hook,
4996 .set_bit_hook = btrfs_set_bit_hook,
4997 .clear_bit_hook = btrfs_clear_bit_hook,
5001 * btrfs doesn't support the bmap operation because swapfiles
5002 * use bmap to make a mapping of extents in the file. They assume
5003 * these extents won't change over the life of the file and they
5004 * use the bmap result to do IO directly to the drive.
5006 * the btrfs bmap call would return logical addresses that aren't
5007 * suitable for IO and they also will change frequently as COW
5008 * operations happen. So, swapfile + btrfs == corruption.
5010 * For now we're avoiding this by dropping bmap.
5012 static struct address_space_operations btrfs_aops = {
5013 .readpage = btrfs_readpage,
5014 .writepage = btrfs_writepage,
5015 .writepages = btrfs_writepages,
5016 .readpages = btrfs_readpages,
5017 .sync_page = block_sync_page,
5018 .direct_IO = btrfs_direct_IO,
5019 .invalidatepage = btrfs_invalidatepage,
5020 .releasepage = btrfs_releasepage,
5021 .set_page_dirty = btrfs_set_page_dirty,
5024 static struct address_space_operations btrfs_symlink_aops = {
5025 .readpage = btrfs_readpage,
5026 .writepage = btrfs_writepage,
5027 .invalidatepage = btrfs_invalidatepage,
5028 .releasepage = btrfs_releasepage,
5031 static struct inode_operations btrfs_file_inode_operations = {
5032 .truncate = btrfs_truncate,
5033 .getattr = btrfs_getattr,
5034 .setattr = btrfs_setattr,
5035 .setxattr = btrfs_setxattr,
5036 .getxattr = btrfs_getxattr,
5037 .listxattr = btrfs_listxattr,
5038 .removexattr = btrfs_removexattr,
5039 .permission = btrfs_permission,
5040 .fallocate = btrfs_fallocate,
5041 .fiemap = btrfs_fiemap,
5043 static struct inode_operations btrfs_special_inode_operations = {
5044 .getattr = btrfs_getattr,
5045 .setattr = btrfs_setattr,
5046 .permission = btrfs_permission,
5047 .setxattr = btrfs_setxattr,
5048 .getxattr = btrfs_getxattr,
5049 .listxattr = btrfs_listxattr,
5050 .removexattr = btrfs_removexattr,
5052 static struct inode_operations btrfs_symlink_inode_operations = {
5053 .readlink = generic_readlink,
5054 .follow_link = page_follow_link_light,
5055 .put_link = page_put_link,
5056 .permission = btrfs_permission,
5057 .setxattr = btrfs_setxattr,
5058 .getxattr = btrfs_getxattr,
5059 .listxattr = btrfs_listxattr,
5060 .removexattr = btrfs_removexattr,