2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include <linux/posix_acl.h>
40 #include <linux/falloc.h>
44 #include "transaction.h"
45 #include "btrfs_inode.h"
47 #include "print-tree.h"
49 #include "ordered-data.h"
52 #include "ref-cache.h"
53 #include "compression.h"
55 struct btrfs_iget_args {
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89 static noinline int cow_file_range(struct inode *inode,
90 struct page *locked_page,
91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock);
95 * a very lame attempt at stopping writes when the FS is 85% full. There
96 * are countless ways this is incorrect, but it is better than nothing.
98 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
107 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
108 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
109 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
117 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
119 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
124 * this does all the hard work for inserting an inline extent into
125 * the btree. The caller should have done a btrfs_drop_extents so that
126 * no overlapping inline items exist in the btree
128 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
129 struct btrfs_root *root, struct inode *inode,
130 u64 start, size_t size, size_t compressed_size,
131 struct page **compressed_pages)
133 struct btrfs_key key;
134 struct btrfs_path *path;
135 struct extent_buffer *leaf;
136 struct page *page = NULL;
139 struct btrfs_file_extent_item *ei;
142 size_t cur_size = size;
144 unsigned long offset;
145 int use_compress = 0;
147 if (compressed_size && compressed_pages) {
149 cur_size = compressed_size;
152 path = btrfs_alloc_path(); if (!path)
155 btrfs_set_trans_block_group(trans, inode);
157 key.objectid = inode->i_ino;
159 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
160 inode_add_bytes(inode, size);
161 datasize = btrfs_file_extent_calc_inline_size(cur_size);
163 inode_add_bytes(inode, size);
164 ret = btrfs_insert_empty_item(trans, root, path, &key,
169 printk("got bad ret %d\n", ret);
172 leaf = path->nodes[0];
173 ei = btrfs_item_ptr(leaf, path->slots[0],
174 struct btrfs_file_extent_item);
175 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
176 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
177 btrfs_set_file_extent_encryption(leaf, ei, 0);
178 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
179 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
180 ptr = btrfs_file_extent_inline_start(ei);
185 while(compressed_size > 0) {
186 cpage = compressed_pages[i];
187 cur_size = min_t(unsigned long, compressed_size,
191 write_extent_buffer(leaf, kaddr, ptr, cur_size);
196 compressed_size -= cur_size;
198 btrfs_set_file_extent_compression(leaf, ei,
199 BTRFS_COMPRESS_ZLIB);
201 page = find_get_page(inode->i_mapping,
202 start >> PAGE_CACHE_SHIFT);
203 btrfs_set_file_extent_compression(leaf, ei, 0);
204 kaddr = kmap_atomic(page, KM_USER0);
205 offset = start & (PAGE_CACHE_SIZE - 1);
206 write_extent_buffer(leaf, kaddr + offset, ptr, size);
207 kunmap_atomic(kaddr, KM_USER0);
208 page_cache_release(page);
210 btrfs_mark_buffer_dirty(leaf);
211 btrfs_free_path(path);
213 BTRFS_I(inode)->disk_i_size = inode->i_size;
214 btrfs_update_inode(trans, root, inode);
217 btrfs_free_path(path);
223 * conditionally insert an inline extent into the file. This
224 * does the checks required to make sure the data is small enough
225 * to fit as an inline extent.
227 static int cow_file_range_inline(struct btrfs_trans_handle *trans,
228 struct btrfs_root *root,
229 struct inode *inode, u64 start, u64 end,
230 size_t compressed_size,
231 struct page **compressed_pages)
233 u64 isize = i_size_read(inode);
234 u64 actual_end = min(end + 1, isize);
235 u64 inline_len = actual_end - start;
236 u64 aligned_end = (end + root->sectorsize - 1) &
237 ~((u64)root->sectorsize - 1);
239 u64 data_len = inline_len;
243 data_len = compressed_size;
246 actual_end >= PAGE_CACHE_SIZE ||
247 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
249 (actual_end & (root->sectorsize - 1)) == 0) ||
251 data_len > root->fs_info->max_inline) {
255 ret = btrfs_drop_extents(trans, root, inode, start,
256 aligned_end, start, &hint_byte);
259 if (isize > actual_end)
260 inline_len = min_t(u64, isize, actual_end);
261 ret = insert_inline_extent(trans, root, inode, start,
262 inline_len, compressed_size,
265 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
269 struct async_extent {
274 unsigned long nr_pages;
275 struct list_head list;
280 struct btrfs_root *root;
281 struct page *locked_page;
284 struct list_head extents;
285 struct btrfs_work work;
288 static noinline int add_async_extent(struct async_cow *cow,
289 u64 start, u64 ram_size,
292 unsigned long nr_pages)
294 struct async_extent *async_extent;
296 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
297 async_extent->start = start;
298 async_extent->ram_size = ram_size;
299 async_extent->compressed_size = compressed_size;
300 async_extent->pages = pages;
301 async_extent->nr_pages = nr_pages;
302 list_add_tail(&async_extent->list, &cow->extents);
307 * we create compressed extents in two phases. The first
308 * phase compresses a range of pages that have already been
309 * locked (both pages and state bits are locked).
311 * This is done inside an ordered work queue, and the compression
312 * is spread across many cpus. The actual IO submission is step
313 * two, and the ordered work queue takes care of making sure that
314 * happens in the same order things were put onto the queue by
315 * writepages and friends.
317 * If this code finds it can't get good compression, it puts an
318 * entry onto the work queue to write the uncompressed bytes. This
319 * makes sure that both compressed inodes and uncompressed inodes
320 * are written in the same order that pdflush sent them down.
322 static noinline int compress_file_range(struct inode *inode,
323 struct page *locked_page,
325 struct async_cow *async_cow,
328 struct btrfs_root *root = BTRFS_I(inode)->root;
329 struct btrfs_trans_handle *trans;
333 u64 blocksize = root->sectorsize;
335 u64 isize = i_size_read(inode);
337 struct page **pages = NULL;
338 unsigned long nr_pages;
339 unsigned long nr_pages_ret = 0;
340 unsigned long total_compressed = 0;
341 unsigned long total_in = 0;
342 unsigned long max_compressed = 128 * 1024;
343 unsigned long max_uncompressed = 128 * 1024;
349 actual_end = min_t(u64, isize, end + 1);
352 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
353 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
355 total_compressed = actual_end - start;
357 /* we want to make sure that amount of ram required to uncompress
358 * an extent is reasonable, so we limit the total size in ram
359 * of a compressed extent to 128k. This is a crucial number
360 * because it also controls how easily we can spread reads across
361 * cpus for decompression.
363 * We also want to make sure the amount of IO required to do
364 * a random read is reasonably small, so we limit the size of
365 * a compressed extent to 128k.
367 total_compressed = min(total_compressed, max_uncompressed);
368 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
369 num_bytes = max(blocksize, num_bytes);
370 disk_num_bytes = num_bytes;
375 * we do compression for mount -o compress and when the
376 * inode has not been flagged as nocompress. This flag can
377 * change at any time if we discover bad compression ratios.
379 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
380 btrfs_test_opt(root, COMPRESS)) {
382 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
384 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
385 total_compressed, pages,
386 nr_pages, &nr_pages_ret,
392 unsigned long offset = total_compressed &
393 (PAGE_CACHE_SIZE - 1);
394 struct page *page = pages[nr_pages_ret - 1];
397 /* zero the tail end of the last page, we might be
398 * sending it down to disk
401 kaddr = kmap_atomic(page, KM_USER0);
402 memset(kaddr + offset, 0,
403 PAGE_CACHE_SIZE - offset);
404 kunmap_atomic(kaddr, KM_USER0);
410 trans = btrfs_join_transaction(root, 1);
412 btrfs_set_trans_block_group(trans, inode);
414 /* lets try to make an inline extent */
415 if (ret || total_in < (actual_end - start)) {
416 /* we didn't compress the entire range, try
417 * to make an uncompressed inline extent.
419 ret = cow_file_range_inline(trans, root, inode,
420 start, end, 0, NULL);
422 /* try making a compressed inline extent */
423 ret = cow_file_range_inline(trans, root, inode,
425 total_compressed, pages);
427 btrfs_end_transaction(trans, root);
430 * inline extent creation worked, we don't need
431 * to create any more async work items. Unlock
432 * and free up our temp pages.
434 extent_clear_unlock_delalloc(inode,
435 &BTRFS_I(inode)->io_tree,
436 start, end, NULL, 1, 0,
445 * we aren't doing an inline extent round the compressed size
446 * up to a block size boundary so the allocator does sane
449 total_compressed = (total_compressed + blocksize - 1) &
453 * one last check to make sure the compression is really a
454 * win, compare the page count read with the blocks on disk
456 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
457 ~(PAGE_CACHE_SIZE - 1);
458 if (total_compressed >= total_in) {
461 disk_num_bytes = total_compressed;
462 num_bytes = total_in;
465 if (!will_compress && pages) {
467 * the compression code ran but failed to make things smaller,
468 * free any pages it allocated and our page pointer array
470 for (i = 0; i < nr_pages_ret; i++) {
471 WARN_ON(pages[i]->mapping);
472 page_cache_release(pages[i]);
476 total_compressed = 0;
479 /* flag the file so we don't compress in the future */
480 btrfs_set_flag(inode, NOCOMPRESS);
485 /* the async work queues will take care of doing actual
486 * allocation on disk for these compressed pages,
487 * and will submit them to the elevator.
489 add_async_extent(async_cow, start, num_bytes,
490 total_compressed, pages, nr_pages_ret);
492 if (start + num_bytes < end && start + num_bytes < actual_end) {
500 * No compression, but we still need to write the pages in
501 * the file we've been given so far. redirty the locked
502 * page if it corresponds to our extent and set things up
503 * for the async work queue to run cow_file_range to do
504 * the normal delalloc dance
506 if (page_offset(locked_page) >= start &&
507 page_offset(locked_page) <= end) {
508 __set_page_dirty_nobuffers(locked_page);
509 /* unlocked later on in the async handlers */
511 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
519 for (i = 0; i < nr_pages_ret; i++) {
520 WARN_ON(pages[i]->mapping);
521 page_cache_release(pages[i]);
530 * phase two of compressed writeback. This is the ordered portion
531 * of the code, which only gets called in the order the work was
532 * queued. We walk all the async extents created by compress_file_range
533 * and send them down to the disk.
535 static noinline int submit_compressed_extents(struct inode *inode,
536 struct async_cow *async_cow)
538 struct async_extent *async_extent;
540 struct btrfs_trans_handle *trans;
541 struct btrfs_key ins;
542 struct extent_map *em;
543 struct btrfs_root *root = BTRFS_I(inode)->root;
544 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
545 struct extent_io_tree *io_tree;
548 if (list_empty(&async_cow->extents))
551 trans = btrfs_join_transaction(root, 1);
553 while(!list_empty(&async_cow->extents)) {
554 async_extent = list_entry(async_cow->extents.next,
555 struct async_extent, list);
556 list_del(&async_extent->list);
558 io_tree = &BTRFS_I(inode)->io_tree;
560 /* did the compression code fall back to uncompressed IO? */
561 if (!async_extent->pages) {
562 int page_started = 0;
563 unsigned long nr_written = 0;
565 lock_extent(io_tree, async_extent->start,
566 async_extent->start + async_extent->ram_size - 1,
569 /* allocate blocks */
570 cow_file_range(inode, async_cow->locked_page,
572 async_extent->start +
573 async_extent->ram_size - 1,
574 &page_started, &nr_written, 0);
577 * if page_started, cow_file_range inserted an
578 * inline extent and took care of all the unlocking
579 * and IO for us. Otherwise, we need to submit
580 * all those pages down to the drive.
583 extent_write_locked_range(io_tree,
584 inode, async_extent->start,
585 async_extent->start +
586 async_extent->ram_size - 1,
594 lock_extent(io_tree, async_extent->start,
595 async_extent->start + async_extent->ram_size - 1,
598 * here we're doing allocation and writeback of the
601 btrfs_drop_extent_cache(inode, async_extent->start,
602 async_extent->start +
603 async_extent->ram_size - 1, 0);
605 ret = btrfs_reserve_extent(trans, root,
606 async_extent->compressed_size,
607 async_extent->compressed_size,
611 em = alloc_extent_map(GFP_NOFS);
612 em->start = async_extent->start;
613 em->len = async_extent->ram_size;
614 em->orig_start = em->start;
616 em->block_start = ins.objectid;
617 em->block_len = ins.offset;
618 em->bdev = root->fs_info->fs_devices->latest_bdev;
619 set_bit(EXTENT_FLAG_PINNED, &em->flags);
620 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
623 spin_lock(&em_tree->lock);
624 ret = add_extent_mapping(em_tree, em);
625 spin_unlock(&em_tree->lock);
626 if (ret != -EEXIST) {
630 btrfs_drop_extent_cache(inode, async_extent->start,
631 async_extent->start +
632 async_extent->ram_size - 1, 0);
635 ret = btrfs_add_ordered_extent(inode, async_extent->start,
637 async_extent->ram_size,
639 BTRFS_ORDERED_COMPRESSED);
642 btrfs_end_transaction(trans, root);
645 * clear dirty, set writeback and unlock the pages.
647 extent_clear_unlock_delalloc(inode,
648 &BTRFS_I(inode)->io_tree,
650 async_extent->start +
651 async_extent->ram_size - 1,
652 NULL, 1, 1, 0, 1, 1, 0);
654 ret = btrfs_submit_compressed_write(inode,
656 async_extent->ram_size,
658 ins.offset, async_extent->pages,
659 async_extent->nr_pages);
662 trans = btrfs_join_transaction(root, 1);
663 alloc_hint = ins.objectid + ins.offset;
668 btrfs_end_transaction(trans, root);
673 * when extent_io.c finds a delayed allocation range in the file,
674 * the call backs end up in this code. The basic idea is to
675 * allocate extents on disk for the range, and create ordered data structs
676 * in ram to track those extents.
678 * locked_page is the page that writepage had locked already. We use
679 * it to make sure we don't do extra locks or unlocks.
681 * *page_started is set to one if we unlock locked_page and do everything
682 * required to start IO on it. It may be clean and already done with
685 static noinline int cow_file_range(struct inode *inode,
686 struct page *locked_page,
687 u64 start, u64 end, int *page_started,
688 unsigned long *nr_written,
691 struct btrfs_root *root = BTRFS_I(inode)->root;
692 struct btrfs_trans_handle *trans;
695 unsigned long ram_size;
698 u64 blocksize = root->sectorsize;
700 u64 isize = i_size_read(inode);
701 struct btrfs_key ins;
702 struct extent_map *em;
703 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
706 trans = btrfs_join_transaction(root, 1);
708 btrfs_set_trans_block_group(trans, inode);
710 actual_end = min_t(u64, isize, end + 1);
712 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
713 num_bytes = max(blocksize, num_bytes);
714 disk_num_bytes = num_bytes;
718 /* lets try to make an inline extent */
719 ret = cow_file_range_inline(trans, root, inode,
720 start, end, 0, NULL);
722 extent_clear_unlock_delalloc(inode,
723 &BTRFS_I(inode)->io_tree,
724 start, end, NULL, 1, 1,
726 *nr_written = *nr_written +
727 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
734 BUG_ON(disk_num_bytes >
735 btrfs_super_total_bytes(&root->fs_info->super_copy));
737 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
739 while(disk_num_bytes > 0) {
740 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
741 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
742 root->sectorsize, 0, alloc_hint,
747 em = alloc_extent_map(GFP_NOFS);
749 em->orig_start = em->start;
751 ram_size = ins.offset;
752 em->len = ins.offset;
754 em->block_start = ins.objectid;
755 em->block_len = ins.offset;
756 em->bdev = root->fs_info->fs_devices->latest_bdev;
757 set_bit(EXTENT_FLAG_PINNED, &em->flags);
760 spin_lock(&em_tree->lock);
761 ret = add_extent_mapping(em_tree, em);
762 spin_unlock(&em_tree->lock);
763 if (ret != -EEXIST) {
767 btrfs_drop_extent_cache(inode, start,
768 start + ram_size - 1, 0);
771 cur_alloc_size = ins.offset;
772 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
773 ram_size, cur_alloc_size, 0);
776 if (root->root_key.objectid ==
777 BTRFS_DATA_RELOC_TREE_OBJECTID) {
778 ret = btrfs_reloc_clone_csums(inode, start,
783 if (disk_num_bytes < cur_alloc_size) {
784 printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes,
788 /* we're not doing compressed IO, don't unlock the first
789 * page (which the caller expects to stay locked), don't
790 * clear any dirty bits and don't set any writeback bits
792 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
793 start, start + ram_size - 1,
794 locked_page, unlock, 1,
796 disk_num_bytes -= cur_alloc_size;
797 num_bytes -= cur_alloc_size;
798 alloc_hint = ins.objectid + ins.offset;
799 start += cur_alloc_size;
803 btrfs_end_transaction(trans, root);
809 * work queue call back to started compression on a file and pages
811 static noinline void async_cow_start(struct btrfs_work *work)
813 struct async_cow *async_cow;
815 async_cow = container_of(work, struct async_cow, work);
817 compress_file_range(async_cow->inode, async_cow->locked_page,
818 async_cow->start, async_cow->end, async_cow,
821 async_cow->inode = NULL;
825 * work queue call back to submit previously compressed pages
827 static noinline void async_cow_submit(struct btrfs_work *work)
829 struct async_cow *async_cow;
830 struct btrfs_root *root;
831 unsigned long nr_pages;
833 async_cow = container_of(work, struct async_cow, work);
835 root = async_cow->root;
836 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
839 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
841 if (atomic_read(&root->fs_info->async_delalloc_pages) <
843 waitqueue_active(&root->fs_info->async_submit_wait))
844 wake_up(&root->fs_info->async_submit_wait);
846 if (async_cow->inode) {
847 submit_compressed_extents(async_cow->inode, async_cow);
851 static noinline void async_cow_free(struct btrfs_work *work)
853 struct async_cow *async_cow;
854 async_cow = container_of(work, struct async_cow, work);
858 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
859 u64 start, u64 end, int *page_started,
860 unsigned long *nr_written)
862 struct async_cow *async_cow;
863 struct btrfs_root *root = BTRFS_I(inode)->root;
864 unsigned long nr_pages;
866 int limit = 10 * 1024 * 1042;
868 if (!btrfs_test_opt(root, COMPRESS)) {
869 return cow_file_range(inode, locked_page, start, end,
870 page_started, nr_written, 1);
873 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
874 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
876 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
877 async_cow->inode = inode;
878 async_cow->root = root;
879 async_cow->locked_page = locked_page;
880 async_cow->start = start;
882 if (btrfs_test_flag(inode, NOCOMPRESS))
885 cur_end = min(end, start + 512 * 1024 - 1);
887 async_cow->end = cur_end;
888 INIT_LIST_HEAD(&async_cow->extents);
890 async_cow->work.func = async_cow_start;
891 async_cow->work.ordered_func = async_cow_submit;
892 async_cow->work.ordered_free = async_cow_free;
893 async_cow->work.flags = 0;
895 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
897 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
899 btrfs_queue_worker(&root->fs_info->delalloc_workers,
902 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
903 wait_event(root->fs_info->async_submit_wait,
904 (atomic_read(&root->fs_info->async_delalloc_pages) <
908 while(atomic_read(&root->fs_info->async_submit_draining) &&
909 atomic_read(&root->fs_info->async_delalloc_pages)) {
910 wait_event(root->fs_info->async_submit_wait,
911 (atomic_read(&root->fs_info->async_delalloc_pages) ==
915 *nr_written += nr_pages;
922 static int noinline csum_exist_in_range(struct btrfs_root *root,
923 u64 bytenr, u64 num_bytes)
926 struct btrfs_ordered_sum *sums;
929 ret = btrfs_lookup_csums_range(root, bytenr, bytenr + num_bytes - 1,
931 if (ret == 0 && list_empty(&list))
934 while (!list_empty(&list)) {
935 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
936 list_del(&sums->list);
943 * when nowcow writeback call back. This checks for snapshots or COW copies
944 * of the extents that exist in the file, and COWs the file as required.
946 * If no cow copies or snapshots exist, we write directly to the existing
949 static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
950 u64 start, u64 end, int *page_started, int force,
951 unsigned long *nr_written)
953 struct btrfs_root *root = BTRFS_I(inode)->root;
954 struct btrfs_trans_handle *trans;
955 struct extent_buffer *leaf;
956 struct btrfs_path *path;
957 struct btrfs_file_extent_item *fi;
958 struct btrfs_key found_key;
970 path = btrfs_alloc_path();
972 trans = btrfs_join_transaction(root, 1);
978 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
981 if (ret > 0 && path->slots[0] > 0 && check_prev) {
982 leaf = path->nodes[0];
983 btrfs_item_key_to_cpu(leaf, &found_key,
985 if (found_key.objectid == inode->i_ino &&
986 found_key.type == BTRFS_EXTENT_DATA_KEY)
991 leaf = path->nodes[0];
992 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
993 ret = btrfs_next_leaf(root, path);
998 leaf = path->nodes[0];
1004 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1006 if (found_key.objectid > inode->i_ino ||
1007 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1008 found_key.offset > end)
1011 if (found_key.offset > cur_offset) {
1012 extent_end = found_key.offset;
1016 fi = btrfs_item_ptr(leaf, path->slots[0],
1017 struct btrfs_file_extent_item);
1018 extent_type = btrfs_file_extent_type(leaf, fi);
1020 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1021 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1022 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1023 extent_end = found_key.offset +
1024 btrfs_file_extent_num_bytes(leaf, fi);
1025 if (extent_end <= start) {
1029 if (disk_bytenr == 0)
1031 if (btrfs_file_extent_compression(leaf, fi) ||
1032 btrfs_file_extent_encryption(leaf, fi) ||
1033 btrfs_file_extent_other_encoding(leaf, fi))
1035 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1037 if (btrfs_extent_readonly(root, disk_bytenr))
1039 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1042 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1043 disk_bytenr += cur_offset - found_key.offset;
1044 num_bytes = min(end + 1, extent_end) - cur_offset;
1046 * force cow if csum exists in the range.
1047 * this ensure that csum for a given extent are
1048 * either valid or do not exist.
1050 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1053 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1054 extent_end = found_key.offset +
1055 btrfs_file_extent_inline_len(leaf, fi);
1056 extent_end = ALIGN(extent_end, root->sectorsize);
1061 if (extent_end <= start) {
1066 if (cow_start == (u64)-1)
1067 cow_start = cur_offset;
1068 cur_offset = extent_end;
1069 if (cur_offset > end)
1075 btrfs_release_path(root, path);
1076 if (cow_start != (u64)-1) {
1077 ret = cow_file_range(inode, locked_page, cow_start,
1078 found_key.offset - 1, page_started,
1081 cow_start = (u64)-1;
1084 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1085 struct extent_map *em;
1086 struct extent_map_tree *em_tree;
1087 em_tree = &BTRFS_I(inode)->extent_tree;
1088 em = alloc_extent_map(GFP_NOFS);
1089 em->start = cur_offset;
1090 em->orig_start = em->start;
1091 em->len = num_bytes;
1092 em->block_len = num_bytes;
1093 em->block_start = disk_bytenr;
1094 em->bdev = root->fs_info->fs_devices->latest_bdev;
1095 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1097 spin_lock(&em_tree->lock);
1098 ret = add_extent_mapping(em_tree, em);
1099 spin_unlock(&em_tree->lock);
1100 if (ret != -EEXIST) {
1101 free_extent_map(em);
1104 btrfs_drop_extent_cache(inode, em->start,
1105 em->start + em->len - 1, 0);
1107 type = BTRFS_ORDERED_PREALLOC;
1109 type = BTRFS_ORDERED_NOCOW;
1112 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1113 num_bytes, num_bytes, type);
1116 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1117 cur_offset, cur_offset + num_bytes - 1,
1118 locked_page, 1, 1, 1, 0, 0, 0);
1119 cur_offset = extent_end;
1120 if (cur_offset > end)
1123 btrfs_release_path(root, path);
1125 if (cur_offset <= end && cow_start == (u64)-1)
1126 cow_start = cur_offset;
1127 if (cow_start != (u64)-1) {
1128 ret = cow_file_range(inode, locked_page, cow_start, end,
1129 page_started, nr_written, 1);
1133 ret = btrfs_end_transaction(trans, root);
1135 btrfs_free_path(path);
1140 * extent_io.c call back to do delayed allocation processing
1142 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1143 u64 start, u64 end, int *page_started,
1144 unsigned long *nr_written)
1148 if (btrfs_test_flag(inode, NODATACOW))
1149 ret = run_delalloc_nocow(inode, locked_page, start, end,
1150 page_started, 1, nr_written);
1151 else if (btrfs_test_flag(inode, PREALLOC))
1152 ret = run_delalloc_nocow(inode, locked_page, start, end,
1153 page_started, 0, nr_written);
1155 ret = cow_file_range_async(inode, locked_page, start, end,
1156 page_started, nr_written);
1162 * extent_io.c set_bit_hook, used to track delayed allocation
1163 * bytes in this file, and to maintain the list of inodes that
1164 * have pending delalloc work to be done.
1166 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1167 unsigned long old, unsigned long bits)
1169 unsigned long flags;
1170 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1171 struct btrfs_root *root = BTRFS_I(inode)->root;
1172 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
1173 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1174 root->fs_info->delalloc_bytes += end - start + 1;
1175 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1176 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1177 &root->fs_info->delalloc_inodes);
1179 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
1185 * extent_io.c clear_bit_hook, see set_bit_hook for why
1187 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1188 unsigned long old, unsigned long bits)
1190 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1191 struct btrfs_root *root = BTRFS_I(inode)->root;
1192 unsigned long flags;
1194 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
1195 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1196 printk("warning: delalloc account %Lu %Lu\n",
1197 end - start + 1, root->fs_info->delalloc_bytes);
1198 root->fs_info->delalloc_bytes = 0;
1199 BTRFS_I(inode)->delalloc_bytes = 0;
1201 root->fs_info->delalloc_bytes -= end - start + 1;
1202 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1204 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1205 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1206 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1208 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
1214 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1215 * we don't create bios that span stripes or chunks
1217 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1218 size_t size, struct bio *bio,
1219 unsigned long bio_flags)
1221 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1222 struct btrfs_mapping_tree *map_tree;
1223 u64 logical = (u64)bio->bi_sector << 9;
1228 if (bio_flags & EXTENT_BIO_COMPRESSED)
1231 length = bio->bi_size;
1232 map_tree = &root->fs_info->mapping_tree;
1233 map_length = length;
1234 ret = btrfs_map_block(map_tree, READ, logical,
1235 &map_length, NULL, 0);
1237 if (map_length < length + size) {
1244 * in order to insert checksums into the metadata in large chunks,
1245 * we wait until bio submission time. All the pages in the bio are
1246 * checksummed and sums are attached onto the ordered extent record.
1248 * At IO completion time the cums attached on the ordered extent record
1249 * are inserted into the btree
1251 static int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio,
1252 int mirror_num, unsigned long bio_flags)
1254 struct btrfs_root *root = BTRFS_I(inode)->root;
1257 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1263 * in order to insert checksums into the metadata in large chunks,
1264 * we wait until bio submission time. All the pages in the bio are
1265 * checksummed and sums are attached onto the ordered extent record.
1267 * At IO completion time the cums attached on the ordered extent record
1268 * are inserted into the btree
1270 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1271 int mirror_num, unsigned long bio_flags)
1273 struct btrfs_root *root = BTRFS_I(inode)->root;
1274 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1278 * extent_io.c submission hook. This does the right thing for csum calculation on write,
1279 * or reading the csums from the tree before a read
1281 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1282 int mirror_num, unsigned long bio_flags)
1284 struct btrfs_root *root = BTRFS_I(inode)->root;
1288 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1291 skip_sum = btrfs_test_flag(inode, NODATASUM);
1293 if (!(rw & (1 << BIO_RW))) {
1294 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1295 return btrfs_submit_compressed_read(inode, bio,
1296 mirror_num, bio_flags);
1297 } else if (!skip_sum)
1298 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1300 } else if (!skip_sum) {
1301 /* csum items have already been cloned */
1302 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1304 /* we're doing a write, do the async checksumming */
1305 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1306 inode, rw, bio, mirror_num,
1307 bio_flags, __btrfs_submit_bio_start,
1308 __btrfs_submit_bio_done);
1312 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1316 * given a list of ordered sums record them in the inode. This happens
1317 * at IO completion time based on sums calculated at bio submission time.
1319 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1320 struct inode *inode, u64 file_offset,
1321 struct list_head *list)
1323 struct list_head *cur;
1324 struct btrfs_ordered_sum *sum;
1326 btrfs_set_trans_block_group(trans, inode);
1327 list_for_each(cur, list) {
1328 sum = list_entry(cur, struct btrfs_ordered_sum, list);
1329 btrfs_csum_file_blocks(trans,
1330 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1335 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1337 if ((end & (PAGE_CACHE_SIZE - 1)) == 0) {
1340 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1344 /* see btrfs_writepage_start_hook for details on why this is required */
1345 struct btrfs_writepage_fixup {
1347 struct btrfs_work work;
1350 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1352 struct btrfs_writepage_fixup *fixup;
1353 struct btrfs_ordered_extent *ordered;
1355 struct inode *inode;
1359 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1363 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1364 ClearPageChecked(page);
1368 inode = page->mapping->host;
1369 page_start = page_offset(page);
1370 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1372 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1374 /* already ordered? We're done */
1375 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1376 EXTENT_ORDERED, 0)) {
1380 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1382 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1383 page_end, GFP_NOFS);
1385 btrfs_start_ordered_extent(inode, ordered, 1);
1389 btrfs_set_extent_delalloc(inode, page_start, page_end);
1390 ClearPageChecked(page);
1392 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1395 page_cache_release(page);
1399 * There are a few paths in the higher layers of the kernel that directly
1400 * set the page dirty bit without asking the filesystem if it is a
1401 * good idea. This causes problems because we want to make sure COW
1402 * properly happens and the data=ordered rules are followed.
1404 * In our case any range that doesn't have the ORDERED bit set
1405 * hasn't been properly setup for IO. We kick off an async process
1406 * to fix it up. The async helper will wait for ordered extents, set
1407 * the delalloc bit and make it safe to write the page.
1409 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1411 struct inode *inode = page->mapping->host;
1412 struct btrfs_writepage_fixup *fixup;
1413 struct btrfs_root *root = BTRFS_I(inode)->root;
1416 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1421 if (PageChecked(page))
1424 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1428 SetPageChecked(page);
1429 page_cache_get(page);
1430 fixup->work.func = btrfs_writepage_fixup_worker;
1432 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1436 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1437 struct inode *inode, u64 file_pos,
1438 u64 disk_bytenr, u64 disk_num_bytes,
1439 u64 num_bytes, u64 ram_bytes,
1440 u8 compression, u8 encryption,
1441 u16 other_encoding, int extent_type)
1443 struct btrfs_root *root = BTRFS_I(inode)->root;
1444 struct btrfs_file_extent_item *fi;
1445 struct btrfs_path *path;
1446 struct extent_buffer *leaf;
1447 struct btrfs_key ins;
1451 path = btrfs_alloc_path();
1454 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1455 file_pos + num_bytes, file_pos, &hint);
1458 ins.objectid = inode->i_ino;
1459 ins.offset = file_pos;
1460 ins.type = BTRFS_EXTENT_DATA_KEY;
1461 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1463 leaf = path->nodes[0];
1464 fi = btrfs_item_ptr(leaf, path->slots[0],
1465 struct btrfs_file_extent_item);
1466 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1467 btrfs_set_file_extent_type(leaf, fi, extent_type);
1468 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1469 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1470 btrfs_set_file_extent_offset(leaf, fi, 0);
1471 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1472 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1473 btrfs_set_file_extent_compression(leaf, fi, compression);
1474 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1475 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1476 btrfs_mark_buffer_dirty(leaf);
1478 inode_add_bytes(inode, num_bytes);
1479 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1481 ins.objectid = disk_bytenr;
1482 ins.offset = disk_num_bytes;
1483 ins.type = BTRFS_EXTENT_ITEM_KEY;
1484 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1485 root->root_key.objectid,
1486 trans->transid, inode->i_ino, &ins);
1489 btrfs_free_path(path);
1493 /* as ordered data IO finishes, this gets called so we can finish
1494 * an ordered extent if the range of bytes in the file it covers are
1497 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1499 struct btrfs_root *root = BTRFS_I(inode)->root;
1500 struct btrfs_trans_handle *trans;
1501 struct btrfs_ordered_extent *ordered_extent;
1502 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1506 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1510 trans = btrfs_join_transaction(root, 1);
1512 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1513 BUG_ON(!ordered_extent);
1514 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1517 lock_extent(io_tree, ordered_extent->file_offset,
1518 ordered_extent->file_offset + ordered_extent->len - 1,
1521 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1523 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1525 ret = btrfs_mark_extent_written(trans, root, inode,
1526 ordered_extent->file_offset,
1527 ordered_extent->file_offset +
1528 ordered_extent->len);
1531 ret = insert_reserved_file_extent(trans, inode,
1532 ordered_extent->file_offset,
1533 ordered_extent->start,
1534 ordered_extent->disk_len,
1535 ordered_extent->len,
1536 ordered_extent->len,
1538 BTRFS_FILE_EXTENT_REG);
1541 unlock_extent(io_tree, ordered_extent->file_offset,
1542 ordered_extent->file_offset + ordered_extent->len - 1,
1545 add_pending_csums(trans, inode, ordered_extent->file_offset,
1546 &ordered_extent->list);
1548 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1549 btrfs_ordered_update_i_size(inode, ordered_extent);
1550 btrfs_update_inode(trans, root, inode);
1551 btrfs_remove_ordered_extent(inode, ordered_extent);
1552 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1555 btrfs_put_ordered_extent(ordered_extent);
1556 /* once for the tree */
1557 btrfs_put_ordered_extent(ordered_extent);
1559 btrfs_end_transaction(trans, root);
1563 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1564 struct extent_state *state, int uptodate)
1566 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1570 * When IO fails, either with EIO or csum verification fails, we
1571 * try other mirrors that might have a good copy of the data. This
1572 * io_failure_record is used to record state as we go through all the
1573 * mirrors. If another mirror has good data, the page is set up to date
1574 * and things continue. If a good mirror can't be found, the original
1575 * bio end_io callback is called to indicate things have failed.
1577 struct io_failure_record {
1582 unsigned long bio_flags;
1586 static int btrfs_io_failed_hook(struct bio *failed_bio,
1587 struct page *page, u64 start, u64 end,
1588 struct extent_state *state)
1590 struct io_failure_record *failrec = NULL;
1592 struct extent_map *em;
1593 struct inode *inode = page->mapping->host;
1594 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1595 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1602 ret = get_state_private(failure_tree, start, &private);
1604 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1607 failrec->start = start;
1608 failrec->len = end - start + 1;
1609 failrec->last_mirror = 0;
1610 failrec->bio_flags = 0;
1612 spin_lock(&em_tree->lock);
1613 em = lookup_extent_mapping(em_tree, start, failrec->len);
1614 if (em->start > start || em->start + em->len < start) {
1615 free_extent_map(em);
1618 spin_unlock(&em_tree->lock);
1620 if (!em || IS_ERR(em)) {
1624 logical = start - em->start;
1625 logical = em->block_start + logical;
1626 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1627 logical = em->block_start;
1628 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1630 failrec->logical = logical;
1631 free_extent_map(em);
1632 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1633 EXTENT_DIRTY, GFP_NOFS);
1634 set_state_private(failure_tree, start,
1635 (u64)(unsigned long)failrec);
1637 failrec = (struct io_failure_record *)(unsigned long)private;
1639 num_copies = btrfs_num_copies(
1640 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1641 failrec->logical, failrec->len);
1642 failrec->last_mirror++;
1644 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
1645 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1648 if (state && state->start != failrec->start)
1650 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
1652 if (!state || failrec->last_mirror > num_copies) {
1653 set_state_private(failure_tree, failrec->start, 0);
1654 clear_extent_bits(failure_tree, failrec->start,
1655 failrec->start + failrec->len - 1,
1656 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1660 bio = bio_alloc(GFP_NOFS, 1);
1661 bio->bi_private = state;
1662 bio->bi_end_io = failed_bio->bi_end_io;
1663 bio->bi_sector = failrec->logical >> 9;
1664 bio->bi_bdev = failed_bio->bi_bdev;
1667 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1668 if (failed_bio->bi_rw & (1 << BIO_RW))
1673 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1674 failrec->last_mirror,
1675 failrec->bio_flags);
1680 * each time an IO finishes, we do a fast check in the IO failure tree
1681 * to see if we need to process or clean up an io_failure_record
1683 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1686 u64 private_failure;
1687 struct io_failure_record *failure;
1691 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1692 (u64)-1, 1, EXTENT_DIRTY)) {
1693 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1694 start, &private_failure);
1696 failure = (struct io_failure_record *)(unsigned long)
1698 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1700 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1702 failure->start + failure->len - 1,
1703 EXTENT_DIRTY | EXTENT_LOCKED,
1712 * when reads are done, we need to check csums to verify the data is correct
1713 * if there's a match, we allow the bio to finish. If not, we go through
1714 * the io_failure_record routines to find good copies
1716 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1717 struct extent_state *state)
1719 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1720 struct inode *inode = page->mapping->host;
1721 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1723 u64 private = ~(u32)0;
1725 struct btrfs_root *root = BTRFS_I(inode)->root;
1727 unsigned long flags;
1729 if (PageChecked(page)) {
1730 ClearPageChecked(page);
1733 if (btrfs_test_flag(inode, NODATASUM))
1736 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1737 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1738 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1743 if (state && state->start == start) {
1744 private = state->private;
1747 ret = get_state_private(io_tree, start, &private);
1749 local_irq_save(flags);
1750 kaddr = kmap_atomic(page, KM_IRQ0);
1754 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1755 btrfs_csum_final(csum, (char *)&csum);
1756 if (csum != private) {
1759 kunmap_atomic(kaddr, KM_IRQ0);
1760 local_irq_restore(flags);
1762 /* if the io failure tree for this inode is non-empty,
1763 * check to see if we've recovered from a failed IO
1765 btrfs_clean_io_failures(inode, start);
1769 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
1770 page->mapping->host->i_ino, (unsigned long long)start, csum,
1772 memset(kaddr + offset, 1, end - start + 1);
1773 flush_dcache_page(page);
1774 kunmap_atomic(kaddr, KM_IRQ0);
1775 local_irq_restore(flags);
1782 * This creates an orphan entry for the given inode in case something goes
1783 * wrong in the middle of an unlink/truncate.
1785 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1787 struct btrfs_root *root = BTRFS_I(inode)->root;
1790 spin_lock(&root->list_lock);
1792 /* already on the orphan list, we're good */
1793 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1794 spin_unlock(&root->list_lock);
1798 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1800 spin_unlock(&root->list_lock);
1803 * insert an orphan item to track this unlinked/truncated file
1805 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1811 * We have done the truncate/delete so we can go ahead and remove the orphan
1812 * item for this particular inode.
1814 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1816 struct btrfs_root *root = BTRFS_I(inode)->root;
1819 spin_lock(&root->list_lock);
1821 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1822 spin_unlock(&root->list_lock);
1826 list_del_init(&BTRFS_I(inode)->i_orphan);
1828 spin_unlock(&root->list_lock);
1832 spin_unlock(&root->list_lock);
1834 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1840 * this cleans up any orphans that may be left on the list from the last use
1843 void btrfs_orphan_cleanup(struct btrfs_root *root)
1845 struct btrfs_path *path;
1846 struct extent_buffer *leaf;
1847 struct btrfs_item *item;
1848 struct btrfs_key key, found_key;
1849 struct btrfs_trans_handle *trans;
1850 struct inode *inode;
1851 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1853 path = btrfs_alloc_path();
1858 key.objectid = BTRFS_ORPHAN_OBJECTID;
1859 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1860 key.offset = (u64)-1;
1864 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1866 printk(KERN_ERR "Error searching slot for orphan: %d"
1872 * if ret == 0 means we found what we were searching for, which
1873 * is weird, but possible, so only screw with path if we didnt
1874 * find the key and see if we have stuff that matches
1877 if (path->slots[0] == 0)
1882 /* pull out the item */
1883 leaf = path->nodes[0];
1884 item = btrfs_item_nr(leaf, path->slots[0]);
1885 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1887 /* make sure the item matches what we want */
1888 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1890 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1893 /* release the path since we're done with it */
1894 btrfs_release_path(root, path);
1897 * this is where we are basically btrfs_lookup, without the
1898 * crossing root thing. we store the inode number in the
1899 * offset of the orphan item.
1901 inode = btrfs_iget_locked(root->fs_info->sb,
1902 found_key.offset, root);
1906 if (inode->i_state & I_NEW) {
1907 BTRFS_I(inode)->root = root;
1909 /* have to set the location manually */
1910 BTRFS_I(inode)->location.objectid = inode->i_ino;
1911 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1912 BTRFS_I(inode)->location.offset = 0;
1914 btrfs_read_locked_inode(inode);
1915 unlock_new_inode(inode);
1919 * add this inode to the orphan list so btrfs_orphan_del does
1920 * the proper thing when we hit it
1922 spin_lock(&root->list_lock);
1923 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1924 spin_unlock(&root->list_lock);
1927 * if this is a bad inode, means we actually succeeded in
1928 * removing the inode, but not the orphan record, which means
1929 * we need to manually delete the orphan since iput will just
1930 * do a destroy_inode
1932 if (is_bad_inode(inode)) {
1933 trans = btrfs_start_transaction(root, 1);
1934 btrfs_orphan_del(trans, inode);
1935 btrfs_end_transaction(trans, root);
1940 /* if we have links, this was a truncate, lets do that */
1941 if (inode->i_nlink) {
1943 btrfs_truncate(inode);
1948 /* this will do delete_inode and everything for us */
1953 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1955 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1957 btrfs_free_path(path);
1961 * read an inode from the btree into the in-memory inode
1963 void btrfs_read_locked_inode(struct inode *inode)
1965 struct btrfs_path *path;
1966 struct extent_buffer *leaf;
1967 struct btrfs_inode_item *inode_item;
1968 struct btrfs_timespec *tspec;
1969 struct btrfs_root *root = BTRFS_I(inode)->root;
1970 struct btrfs_key location;
1971 u64 alloc_group_block;
1975 path = btrfs_alloc_path();
1977 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1979 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1983 leaf = path->nodes[0];
1984 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1985 struct btrfs_inode_item);
1987 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
1988 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
1989 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
1990 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
1991 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
1993 tspec = btrfs_inode_atime(inode_item);
1994 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1995 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1997 tspec = btrfs_inode_mtime(inode_item);
1998 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1999 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2001 tspec = btrfs_inode_ctime(inode_item);
2002 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2003 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2005 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2006 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2007 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2008 inode->i_generation = BTRFS_I(inode)->generation;
2010 rdev = btrfs_inode_rdev(leaf, inode_item);
2012 BTRFS_I(inode)->index_cnt = (u64)-1;
2013 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2015 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2016 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2017 alloc_group_block, 0);
2018 btrfs_free_path(path);
2021 switch (inode->i_mode & S_IFMT) {
2023 inode->i_mapping->a_ops = &btrfs_aops;
2024 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2025 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2026 inode->i_fop = &btrfs_file_operations;
2027 inode->i_op = &btrfs_file_inode_operations;
2030 inode->i_fop = &btrfs_dir_file_operations;
2031 if (root == root->fs_info->tree_root)
2032 inode->i_op = &btrfs_dir_ro_inode_operations;
2034 inode->i_op = &btrfs_dir_inode_operations;
2037 inode->i_op = &btrfs_symlink_inode_operations;
2038 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2039 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2042 init_special_inode(inode, inode->i_mode, rdev);
2048 btrfs_free_path(path);
2049 make_bad_inode(inode);
2053 * given a leaf and an inode, copy the inode fields into the leaf
2055 static void fill_inode_item(struct btrfs_trans_handle *trans,
2056 struct extent_buffer *leaf,
2057 struct btrfs_inode_item *item,
2058 struct inode *inode)
2060 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2061 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2062 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2063 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2064 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2066 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2067 inode->i_atime.tv_sec);
2068 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2069 inode->i_atime.tv_nsec);
2071 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2072 inode->i_mtime.tv_sec);
2073 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2074 inode->i_mtime.tv_nsec);
2076 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2077 inode->i_ctime.tv_sec);
2078 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2079 inode->i_ctime.tv_nsec);
2081 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2082 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2083 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2084 btrfs_set_inode_transid(leaf, item, trans->transid);
2085 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2086 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2087 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2091 * copy everything in the in-memory inode into the btree.
2093 int noinline btrfs_update_inode(struct btrfs_trans_handle *trans,
2094 struct btrfs_root *root,
2095 struct inode *inode)
2097 struct btrfs_inode_item *inode_item;
2098 struct btrfs_path *path;
2099 struct extent_buffer *leaf;
2102 path = btrfs_alloc_path();
2104 ret = btrfs_lookup_inode(trans, root, path,
2105 &BTRFS_I(inode)->location, 1);
2112 leaf = path->nodes[0];
2113 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2114 struct btrfs_inode_item);
2116 fill_inode_item(trans, leaf, inode_item, inode);
2117 btrfs_mark_buffer_dirty(leaf);
2118 btrfs_set_inode_last_trans(trans, inode);
2121 btrfs_free_path(path);
2127 * unlink helper that gets used here in inode.c and in the tree logging
2128 * recovery code. It remove a link in a directory with a given name, and
2129 * also drops the back refs in the inode to the directory
2131 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2132 struct btrfs_root *root,
2133 struct inode *dir, struct inode *inode,
2134 const char *name, int name_len)
2136 struct btrfs_path *path;
2138 struct extent_buffer *leaf;
2139 struct btrfs_dir_item *di;
2140 struct btrfs_key key;
2143 path = btrfs_alloc_path();
2149 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2150 name, name_len, -1);
2159 leaf = path->nodes[0];
2160 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2161 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2164 btrfs_release_path(root, path);
2166 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2168 dir->i_ino, &index);
2170 printk("failed to delete reference to %.*s, "
2171 "inode %lu parent %lu\n", name_len, name,
2172 inode->i_ino, dir->i_ino);
2176 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2177 index, name, name_len, -1);
2186 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2187 btrfs_release_path(root, path);
2189 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2191 BUG_ON(ret != 0 && ret != -ENOENT);
2193 BTRFS_I(dir)->log_dirty_trans = trans->transid;
2195 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2199 btrfs_free_path(path);
2203 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2204 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2205 btrfs_update_inode(trans, root, dir);
2206 btrfs_drop_nlink(inode);
2207 ret = btrfs_update_inode(trans, root, inode);
2208 dir->i_sb->s_dirt = 1;
2213 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2215 struct btrfs_root *root;
2216 struct btrfs_trans_handle *trans;
2217 struct inode *inode = dentry->d_inode;
2219 unsigned long nr = 0;
2221 root = BTRFS_I(dir)->root;
2223 ret = btrfs_check_free_space(root, 1, 1);
2227 trans = btrfs_start_transaction(root, 1);
2229 btrfs_set_trans_block_group(trans, dir);
2230 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2231 dentry->d_name.name, dentry->d_name.len);
2233 if (inode->i_nlink == 0)
2234 ret = btrfs_orphan_add(trans, inode);
2236 nr = trans->blocks_used;
2238 btrfs_end_transaction_throttle(trans, root);
2240 btrfs_btree_balance_dirty(root, nr);
2244 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2246 struct inode *inode = dentry->d_inode;
2249 struct btrfs_root *root = BTRFS_I(dir)->root;
2250 struct btrfs_trans_handle *trans;
2251 unsigned long nr = 0;
2254 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2255 * the root of a subvolume or snapshot
2257 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2258 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2262 ret = btrfs_check_free_space(root, 1, 1);
2266 trans = btrfs_start_transaction(root, 1);
2267 btrfs_set_trans_block_group(trans, dir);
2269 err = btrfs_orphan_add(trans, inode);
2273 /* now the directory is empty */
2274 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2275 dentry->d_name.name, dentry->d_name.len);
2277 btrfs_i_size_write(inode, 0);
2281 nr = trans->blocks_used;
2282 ret = btrfs_end_transaction_throttle(trans, root);
2284 btrfs_btree_balance_dirty(root, nr);
2293 * when truncating bytes in a file, it is possible to avoid reading
2294 * the leaves that contain only checksum items. This can be the
2295 * majority of the IO required to delete a large file, but it must
2296 * be done carefully.
2298 * The keys in the level just above the leaves are checked to make sure
2299 * the lowest key in a given leaf is a csum key, and starts at an offset
2300 * after the new size.
2302 * Then the key for the next leaf is checked to make sure it also has
2303 * a checksum item for the same file. If it does, we know our target leaf
2304 * contains only checksum items, and it can be safely freed without reading
2307 * This is just an optimization targeted at large files. It may do
2308 * nothing. It will return 0 unless things went badly.
2310 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2311 struct btrfs_root *root,
2312 struct btrfs_path *path,
2313 struct inode *inode, u64 new_size)
2315 struct btrfs_key key;
2318 struct btrfs_key found_key;
2319 struct btrfs_key other_key;
2320 struct btrfs_leaf_ref *ref;
2324 path->lowest_level = 1;
2325 key.objectid = inode->i_ino;
2326 key.type = BTRFS_CSUM_ITEM_KEY;
2327 key.offset = new_size;
2329 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2333 if (path->nodes[1] == NULL) {
2338 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2339 nritems = btrfs_header_nritems(path->nodes[1]);
2344 if (path->slots[1] >= nritems)
2347 /* did we find a key greater than anything we want to delete? */
2348 if (found_key.objectid > inode->i_ino ||
2349 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2352 /* we check the next key in the node to make sure the leave contains
2353 * only checksum items. This comparison doesn't work if our
2354 * leaf is the last one in the node
2356 if (path->slots[1] + 1 >= nritems) {
2358 /* search forward from the last key in the node, this
2359 * will bring us into the next node in the tree
2361 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2363 /* unlikely, but we inc below, so check to be safe */
2364 if (found_key.offset == (u64)-1)
2367 /* search_forward needs a path with locks held, do the
2368 * search again for the original key. It is possible
2369 * this will race with a balance and return a path that
2370 * we could modify, but this drop is just an optimization
2371 * and is allowed to miss some leaves.
2373 btrfs_release_path(root, path);
2376 /* setup a max key for search_forward */
2377 other_key.offset = (u64)-1;
2378 other_key.type = key.type;
2379 other_key.objectid = key.objectid;
2381 path->keep_locks = 1;
2382 ret = btrfs_search_forward(root, &found_key, &other_key,
2384 path->keep_locks = 0;
2385 if (ret || found_key.objectid != key.objectid ||
2386 found_key.type != key.type) {
2391 key.offset = found_key.offset;
2392 btrfs_release_path(root, path);
2397 /* we know there's one more slot after us in the tree,
2398 * read that key so we can verify it is also a checksum item
2400 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2402 if (found_key.objectid < inode->i_ino)
2405 if (found_key.type != key.type || found_key.offset < new_size)
2409 * if the key for the next leaf isn't a csum key from this objectid,
2410 * we can't be sure there aren't good items inside this leaf.
2413 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2416 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2417 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2419 * it is safe to delete this leaf, it contains only
2420 * csum items from this inode at an offset >= new_size
2422 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2425 if (root->ref_cows && leaf_gen < trans->transid) {
2426 ref = btrfs_alloc_leaf_ref(root, 0);
2428 ref->root_gen = root->root_key.offset;
2429 ref->bytenr = leaf_start;
2431 ref->generation = leaf_gen;
2434 ret = btrfs_add_leaf_ref(root, ref, 0);
2436 btrfs_free_leaf_ref(root, ref);
2442 btrfs_release_path(root, path);
2444 if (other_key.objectid == inode->i_ino &&
2445 other_key.type == key.type && other_key.offset > key.offset) {
2446 key.offset = other_key.offset;
2452 /* fixup any changes we've made to the path */
2453 path->lowest_level = 0;
2454 path->keep_locks = 0;
2455 btrfs_release_path(root, path);
2462 * this can truncate away extent items, csum items and directory items.
2463 * It starts at a high offset and removes keys until it can't find
2464 * any higher than new_size
2466 * csum items that cross the new i_size are truncated to the new size
2469 * min_type is the minimum key type to truncate down to. If set to 0, this
2470 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2472 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root,
2474 struct inode *inode,
2475 u64 new_size, u32 min_type)
2478 struct btrfs_path *path;
2479 struct btrfs_key key;
2480 struct btrfs_key found_key;
2482 struct extent_buffer *leaf;
2483 struct btrfs_file_extent_item *fi;
2484 u64 extent_start = 0;
2485 u64 extent_num_bytes = 0;
2491 int pending_del_nr = 0;
2492 int pending_del_slot = 0;
2493 int extent_type = -1;
2495 u64 mask = root->sectorsize - 1;
2498 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2499 path = btrfs_alloc_path();
2503 /* FIXME, add redo link to tree so we don't leak on crash */
2504 key.objectid = inode->i_ino;
2505 key.offset = (u64)-1;
2508 btrfs_init_path(path);
2511 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2516 /* there are no items in the tree for us to truncate, we're
2519 if (path->slots[0] == 0) {
2528 leaf = path->nodes[0];
2529 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2530 found_type = btrfs_key_type(&found_key);
2533 if (found_key.objectid != inode->i_ino)
2536 if (found_type < min_type)
2539 item_end = found_key.offset;
2540 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2541 fi = btrfs_item_ptr(leaf, path->slots[0],
2542 struct btrfs_file_extent_item);
2543 extent_type = btrfs_file_extent_type(leaf, fi);
2544 encoding = btrfs_file_extent_compression(leaf, fi);
2545 encoding |= btrfs_file_extent_encryption(leaf, fi);
2546 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2548 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2550 btrfs_file_extent_num_bytes(leaf, fi);
2551 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2552 item_end += btrfs_file_extent_inline_len(leaf,
2557 if (item_end < new_size) {
2558 if (found_type == BTRFS_DIR_ITEM_KEY) {
2559 found_type = BTRFS_INODE_ITEM_KEY;
2560 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
2561 found_type = BTRFS_EXTENT_DATA_KEY;
2562 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
2563 found_type = BTRFS_XATTR_ITEM_KEY;
2564 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
2565 found_type = BTRFS_INODE_REF_KEY;
2566 } else if (found_type) {
2571 btrfs_set_key_type(&key, found_type);
2574 if (found_key.offset >= new_size)
2580 /* FIXME, shrink the extent if the ref count is only 1 */
2581 if (found_type != BTRFS_EXTENT_DATA_KEY)
2584 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2586 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2587 if (!del_item && !encoding) {
2588 u64 orig_num_bytes =
2589 btrfs_file_extent_num_bytes(leaf, fi);
2590 extent_num_bytes = new_size -
2591 found_key.offset + root->sectorsize - 1;
2592 extent_num_bytes = extent_num_bytes &
2593 ~((u64)root->sectorsize - 1);
2594 btrfs_set_file_extent_num_bytes(leaf, fi,
2596 num_dec = (orig_num_bytes -
2598 if (root->ref_cows && extent_start != 0)
2599 inode_sub_bytes(inode, num_dec);
2600 btrfs_mark_buffer_dirty(leaf);
2603 btrfs_file_extent_disk_num_bytes(leaf,
2605 /* FIXME blocksize != 4096 */
2606 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2607 if (extent_start != 0) {
2610 inode_sub_bytes(inode, num_dec);
2612 root_gen = btrfs_header_generation(leaf);
2613 root_owner = btrfs_header_owner(leaf);
2615 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2617 * we can't truncate inline items that have had
2621 btrfs_file_extent_compression(leaf, fi) == 0 &&
2622 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2623 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2624 u32 size = new_size - found_key.offset;
2626 if (root->ref_cows) {
2627 inode_sub_bytes(inode, item_end + 1 -
2631 btrfs_file_extent_calc_inline_size(size);
2632 ret = btrfs_truncate_item(trans, root, path,
2635 } else if (root->ref_cows) {
2636 inode_sub_bytes(inode, item_end + 1 -
2642 if (!pending_del_nr) {
2643 /* no pending yet, add ourselves */
2644 pending_del_slot = path->slots[0];
2646 } else if (pending_del_nr &&
2647 path->slots[0] + 1 == pending_del_slot) {
2648 /* hop on the pending chunk */
2650 pending_del_slot = path->slots[0];
2652 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
2658 ret = btrfs_free_extent(trans, root, extent_start,
2660 leaf->start, root_owner,
2661 root_gen, inode->i_ino, 0);
2665 if (path->slots[0] == 0) {
2668 btrfs_release_path(root, path);
2673 if (pending_del_nr &&
2674 path->slots[0] + 1 != pending_del_slot) {
2675 struct btrfs_key debug;
2677 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2679 ret = btrfs_del_items(trans, root, path,
2684 btrfs_release_path(root, path);
2690 if (pending_del_nr) {
2691 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2694 btrfs_free_path(path);
2695 inode->i_sb->s_dirt = 1;
2700 * taken from block_truncate_page, but does cow as it zeros out
2701 * any bytes left in the last page in the file.
2703 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2705 struct inode *inode = mapping->host;
2706 struct btrfs_root *root = BTRFS_I(inode)->root;
2707 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2708 struct btrfs_ordered_extent *ordered;
2710 u32 blocksize = root->sectorsize;
2711 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2712 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2718 if ((offset & (blocksize - 1)) == 0)
2723 page = grab_cache_page(mapping, index);
2727 page_start = page_offset(page);
2728 page_end = page_start + PAGE_CACHE_SIZE - 1;
2730 if (!PageUptodate(page)) {
2731 ret = btrfs_readpage(NULL, page);
2733 if (page->mapping != mapping) {
2735 page_cache_release(page);
2738 if (!PageUptodate(page)) {
2743 wait_on_page_writeback(page);
2745 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2746 set_page_extent_mapped(page);
2748 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2750 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2752 page_cache_release(page);
2753 btrfs_start_ordered_extent(inode, ordered, 1);
2754 btrfs_put_ordered_extent(ordered);
2758 btrfs_set_extent_delalloc(inode, page_start, page_end);
2760 if (offset != PAGE_CACHE_SIZE) {
2762 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2763 flush_dcache_page(page);
2766 ClearPageChecked(page);
2767 set_page_dirty(page);
2768 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2772 page_cache_release(page);
2777 int btrfs_cont_expand(struct inode *inode, loff_t size)
2779 struct btrfs_trans_handle *trans;
2780 struct btrfs_root *root = BTRFS_I(inode)->root;
2781 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2782 struct extent_map *em;
2783 u64 mask = root->sectorsize - 1;
2784 u64 hole_start = (inode->i_size + mask) & ~mask;
2785 u64 block_end = (size + mask) & ~mask;
2791 if (size <= hole_start)
2794 err = btrfs_check_free_space(root, 1, 0);
2798 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2801 struct btrfs_ordered_extent *ordered;
2802 btrfs_wait_ordered_range(inode, hole_start,
2803 block_end - hole_start);
2804 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2805 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2808 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2809 btrfs_put_ordered_extent(ordered);
2812 trans = btrfs_start_transaction(root, 1);
2813 btrfs_set_trans_block_group(trans, inode);
2815 cur_offset = hole_start;
2817 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2818 block_end - cur_offset, 0);
2819 BUG_ON(IS_ERR(em) || !em);
2820 last_byte = min(extent_map_end(em), block_end);
2821 last_byte = (last_byte + mask) & ~mask;
2822 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2824 hole_size = last_byte - cur_offset;
2825 err = btrfs_drop_extents(trans, root, inode,
2827 cur_offset + hole_size,
2828 cur_offset, &hint_byte);
2831 err = btrfs_insert_file_extent(trans, root,
2832 inode->i_ino, cur_offset, 0,
2833 0, hole_size, 0, hole_size,
2835 btrfs_drop_extent_cache(inode, hole_start,
2838 free_extent_map(em);
2839 cur_offset = last_byte;
2840 if (err || cur_offset >= block_end)
2844 btrfs_end_transaction(trans, root);
2845 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2849 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2851 struct inode *inode = dentry->d_inode;
2854 err = inode_change_ok(inode, attr);
2858 if (S_ISREG(inode->i_mode) &&
2859 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2860 err = btrfs_cont_expand(inode, attr->ia_size);
2865 err = inode_setattr(inode, attr);
2867 if (!err && ((attr->ia_valid & ATTR_MODE)))
2868 err = btrfs_acl_chmod(inode);
2872 void btrfs_delete_inode(struct inode *inode)
2874 struct btrfs_trans_handle *trans;
2875 struct btrfs_root *root = BTRFS_I(inode)->root;
2879 truncate_inode_pages(&inode->i_data, 0);
2880 if (is_bad_inode(inode)) {
2881 btrfs_orphan_del(NULL, inode);
2884 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2886 btrfs_i_size_write(inode, 0);
2887 trans = btrfs_start_transaction(root, 1);
2889 btrfs_set_trans_block_group(trans, inode);
2890 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2892 btrfs_orphan_del(NULL, inode);
2893 goto no_delete_lock;
2896 btrfs_orphan_del(trans, inode);
2898 nr = trans->blocks_used;
2901 btrfs_end_transaction(trans, root);
2902 btrfs_btree_balance_dirty(root, nr);
2906 nr = trans->blocks_used;
2907 btrfs_end_transaction(trans, root);
2908 btrfs_btree_balance_dirty(root, nr);
2914 * this returns the key found in the dir entry in the location pointer.
2915 * If no dir entries were found, location->objectid is 0.
2917 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2918 struct btrfs_key *location)
2920 const char *name = dentry->d_name.name;
2921 int namelen = dentry->d_name.len;
2922 struct btrfs_dir_item *di;
2923 struct btrfs_path *path;
2924 struct btrfs_root *root = BTRFS_I(dir)->root;
2927 path = btrfs_alloc_path();
2930 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2934 if (!di || IS_ERR(di)) {
2937 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2939 btrfs_free_path(path);
2942 location->objectid = 0;
2947 * when we hit a tree root in a directory, the btrfs part of the inode
2948 * needs to be changed to reflect the root directory of the tree root. This
2949 * is kind of like crossing a mount point.
2951 static int fixup_tree_root_location(struct btrfs_root *root,
2952 struct btrfs_key *location,
2953 struct btrfs_root **sub_root,
2954 struct dentry *dentry)
2956 struct btrfs_root_item *ri;
2958 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2960 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2963 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2964 dentry->d_name.name,
2965 dentry->d_name.len);
2966 if (IS_ERR(*sub_root))
2967 return PTR_ERR(*sub_root);
2969 ri = &(*sub_root)->root_item;
2970 location->objectid = btrfs_root_dirid(ri);
2971 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2972 location->offset = 0;
2977 static noinline void init_btrfs_i(struct inode *inode)
2979 struct btrfs_inode *bi = BTRFS_I(inode);
2982 bi->i_default_acl = NULL;
2987 bi->logged_trans = 0;
2988 bi->delalloc_bytes = 0;
2989 bi->disk_i_size = 0;
2991 bi->index_cnt = (u64)-1;
2992 bi->log_dirty_trans = 0;
2993 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2994 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2995 inode->i_mapping, GFP_NOFS);
2996 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2997 inode->i_mapping, GFP_NOFS);
2998 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
2999 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3000 mutex_init(&BTRFS_I(inode)->extent_mutex);
3001 mutex_init(&BTRFS_I(inode)->log_mutex);
3004 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3006 struct btrfs_iget_args *args = p;
3007 inode->i_ino = args->ino;
3008 init_btrfs_i(inode);
3009 BTRFS_I(inode)->root = args->root;
3013 static int btrfs_find_actor(struct inode *inode, void *opaque)
3015 struct btrfs_iget_args *args = opaque;
3016 return (args->ino == inode->i_ino &&
3017 args->root == BTRFS_I(inode)->root);
3020 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3021 struct btrfs_root *root, int wait)
3023 struct inode *inode;
3024 struct btrfs_iget_args args;
3025 args.ino = objectid;
3029 inode = ilookup5(s, objectid, btrfs_find_actor,
3032 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3038 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3039 struct btrfs_root *root)
3041 struct inode *inode;
3042 struct btrfs_iget_args args;
3043 args.ino = objectid;
3046 inode = iget5_locked(s, objectid, btrfs_find_actor,
3047 btrfs_init_locked_inode,
3052 /* Get an inode object given its location and corresponding root.
3053 * Returns in *is_new if the inode was read from disk
3055 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3056 struct btrfs_root *root, int *is_new)
3058 struct inode *inode;
3060 inode = btrfs_iget_locked(s, location->objectid, root);
3062 return ERR_PTR(-EACCES);
3064 if (inode->i_state & I_NEW) {
3065 BTRFS_I(inode)->root = root;
3066 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3067 btrfs_read_locked_inode(inode);
3068 unlock_new_inode(inode);
3079 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3081 struct inode * inode;
3082 struct btrfs_inode *bi = BTRFS_I(dir);
3083 struct btrfs_root *root = bi->root;
3084 struct btrfs_root *sub_root = root;
3085 struct btrfs_key location;
3088 if (dentry->d_name.len > BTRFS_NAME_LEN)
3089 return ERR_PTR(-ENAMETOOLONG);
3091 ret = btrfs_inode_by_name(dir, dentry, &location);
3094 return ERR_PTR(ret);
3097 if (location.objectid) {
3098 ret = fixup_tree_root_location(root, &location, &sub_root,
3101 return ERR_PTR(ret);
3103 return ERR_PTR(-ENOENT);
3104 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3106 return ERR_CAST(inode);
3111 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3112 struct nameidata *nd)
3114 struct inode *inode;
3116 if (dentry->d_name.len > BTRFS_NAME_LEN)
3117 return ERR_PTR(-ENAMETOOLONG);
3119 inode = btrfs_lookup_dentry(dir, dentry);
3121 return ERR_CAST(inode);
3123 return d_splice_alias(inode, dentry);
3126 static unsigned char btrfs_filetype_table[] = {
3127 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3130 static int btrfs_real_readdir(struct file *filp, void *dirent,
3133 struct inode *inode = filp->f_dentry->d_inode;
3134 struct btrfs_root *root = BTRFS_I(inode)->root;
3135 struct btrfs_item *item;
3136 struct btrfs_dir_item *di;
3137 struct btrfs_key key;
3138 struct btrfs_key found_key;
3139 struct btrfs_path *path;
3142 struct extent_buffer *leaf;
3145 unsigned char d_type;
3150 int key_type = BTRFS_DIR_INDEX_KEY;
3155 /* FIXME, use a real flag for deciding about the key type */
3156 if (root->fs_info->tree_root == root)
3157 key_type = BTRFS_DIR_ITEM_KEY;
3159 /* special case for "." */
3160 if (filp->f_pos == 0) {
3161 over = filldir(dirent, ".", 1,
3168 /* special case for .., just use the back ref */
3169 if (filp->f_pos == 1) {
3170 u64 pino = parent_ino(filp->f_path.dentry);
3171 over = filldir(dirent, "..", 2,
3177 path = btrfs_alloc_path();
3180 btrfs_set_key_type(&key, key_type);
3181 key.offset = filp->f_pos;
3182 key.objectid = inode->i_ino;
3184 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3190 leaf = path->nodes[0];
3191 nritems = btrfs_header_nritems(leaf);
3192 slot = path->slots[0];
3193 if (advance || slot >= nritems) {
3194 if (slot >= nritems - 1) {
3195 ret = btrfs_next_leaf(root, path);
3198 leaf = path->nodes[0];
3199 nritems = btrfs_header_nritems(leaf);
3200 slot = path->slots[0];
3208 item = btrfs_item_nr(leaf, slot);
3209 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3211 if (found_key.objectid != key.objectid)
3213 if (btrfs_key_type(&found_key) != key_type)
3215 if (found_key.offset < filp->f_pos)
3218 filp->f_pos = found_key.offset;
3220 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3222 di_total = btrfs_item_size(leaf, item);
3224 while (di_cur < di_total) {
3225 struct btrfs_key location;
3227 name_len = btrfs_dir_name_len(leaf, di);
3228 if (name_len <= sizeof(tmp_name)) {
3229 name_ptr = tmp_name;
3231 name_ptr = kmalloc(name_len, GFP_NOFS);
3237 read_extent_buffer(leaf, name_ptr,
3238 (unsigned long)(di + 1), name_len);
3240 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3241 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3243 /* is this a reference to our own snapshot? If so
3246 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3247 location.objectid == root->root_key.objectid) {
3251 over = filldir(dirent, name_ptr, name_len,
3252 found_key.offset, location.objectid,
3256 if (name_ptr != tmp_name)
3261 di_len = btrfs_dir_name_len(leaf, di) +
3262 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3264 di = (struct btrfs_dir_item *)((char *)di + di_len);
3268 /* Reached end of directory/root. Bump pos past the last item. */
3269 if (key_type == BTRFS_DIR_INDEX_KEY)
3270 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
3276 btrfs_free_path(path);
3280 int btrfs_write_inode(struct inode *inode, int wait)
3282 struct btrfs_root *root = BTRFS_I(inode)->root;
3283 struct btrfs_trans_handle *trans;
3286 if (root->fs_info->btree_inode == inode)
3290 trans = btrfs_join_transaction(root, 1);
3291 btrfs_set_trans_block_group(trans, inode);
3292 ret = btrfs_commit_transaction(trans, root);
3298 * This is somewhat expensive, updating the tree every time the
3299 * inode changes. But, it is most likely to find the inode in cache.
3300 * FIXME, needs more benchmarking...there are no reasons other than performance
3301 * to keep or drop this code.
3303 void btrfs_dirty_inode(struct inode *inode)
3305 struct btrfs_root *root = BTRFS_I(inode)->root;
3306 struct btrfs_trans_handle *trans;
3308 trans = btrfs_join_transaction(root, 1);
3309 btrfs_set_trans_block_group(trans, inode);
3310 btrfs_update_inode(trans, root, inode);
3311 btrfs_end_transaction(trans, root);
3315 * find the highest existing sequence number in a directory
3316 * and then set the in-memory index_cnt variable to reflect
3317 * free sequence numbers
3319 static int btrfs_set_inode_index_count(struct inode *inode)
3321 struct btrfs_root *root = BTRFS_I(inode)->root;
3322 struct btrfs_key key, found_key;
3323 struct btrfs_path *path;
3324 struct extent_buffer *leaf;
3327 key.objectid = inode->i_ino;
3328 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3329 key.offset = (u64)-1;
3331 path = btrfs_alloc_path();
3335 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3338 /* FIXME: we should be able to handle this */
3344 * MAGIC NUMBER EXPLANATION:
3345 * since we search a directory based on f_pos we have to start at 2
3346 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3347 * else has to start at 2
3349 if (path->slots[0] == 0) {
3350 BTRFS_I(inode)->index_cnt = 2;
3356 leaf = path->nodes[0];
3357 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3359 if (found_key.objectid != inode->i_ino ||
3360 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3361 BTRFS_I(inode)->index_cnt = 2;
3365 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3367 btrfs_free_path(path);
3372 * helper to find a free sequence number in a given directory. This current
3373 * code is very simple, later versions will do smarter things in the btree
3375 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3379 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3380 ret = btrfs_set_inode_index_count(dir);
3386 *index = BTRFS_I(dir)->index_cnt;
3387 BTRFS_I(dir)->index_cnt++;
3392 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3393 struct btrfs_root *root,
3395 const char *name, int name_len,
3396 u64 ref_objectid, u64 objectid,
3397 u64 alloc_hint, int mode, u64 *index)
3399 struct inode *inode;
3400 struct btrfs_inode_item *inode_item;
3401 struct btrfs_key *location;
3402 struct btrfs_path *path;
3403 struct btrfs_inode_ref *ref;
3404 struct btrfs_key key[2];
3410 path = btrfs_alloc_path();
3413 inode = new_inode(root->fs_info->sb);
3415 return ERR_PTR(-ENOMEM);
3418 ret = btrfs_set_inode_index(dir, index);
3420 return ERR_PTR(ret);
3423 * index_cnt is ignored for everything but a dir,
3424 * btrfs_get_inode_index_count has an explanation for the magic
3427 init_btrfs_i(inode);
3428 BTRFS_I(inode)->index_cnt = 2;
3429 BTRFS_I(inode)->root = root;
3430 BTRFS_I(inode)->generation = trans->transid;
3436 BTRFS_I(inode)->block_group =
3437 btrfs_find_block_group(root, 0, alloc_hint, owner);
3438 if ((mode & S_IFREG)) {
3439 if (btrfs_test_opt(root, NODATASUM))
3440 btrfs_set_flag(inode, NODATASUM);
3441 if (btrfs_test_opt(root, NODATACOW))
3442 btrfs_set_flag(inode, NODATACOW);
3445 key[0].objectid = objectid;
3446 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3449 key[1].objectid = objectid;
3450 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3451 key[1].offset = ref_objectid;
3453 sizes[0] = sizeof(struct btrfs_inode_item);
3454 sizes[1] = name_len + sizeof(*ref);
3456 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3460 if (objectid > root->highest_inode)
3461 root->highest_inode = objectid;
3463 inode->i_uid = current_fsuid();
3464 inode->i_gid = current_fsgid();
3465 inode->i_mode = mode;
3466 inode->i_ino = objectid;
3467 inode_set_bytes(inode, 0);
3468 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3469 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3470 struct btrfs_inode_item);
3471 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3473 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3474 struct btrfs_inode_ref);
3475 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3476 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3477 ptr = (unsigned long)(ref + 1);
3478 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3480 btrfs_mark_buffer_dirty(path->nodes[0]);
3481 btrfs_free_path(path);
3483 location = &BTRFS_I(inode)->location;
3484 location->objectid = objectid;
3485 location->offset = 0;
3486 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3488 insert_inode_hash(inode);
3492 BTRFS_I(dir)->index_cnt--;
3493 btrfs_free_path(path);
3494 return ERR_PTR(ret);
3497 static inline u8 btrfs_inode_type(struct inode *inode)
3499 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3503 * utility function to add 'inode' into 'parent_inode' with
3504 * a give name and a given sequence number.
3505 * if 'add_backref' is true, also insert a backref from the
3506 * inode to the parent directory.
3508 int btrfs_add_link(struct btrfs_trans_handle *trans,
3509 struct inode *parent_inode, struct inode *inode,
3510 const char *name, int name_len, int add_backref, u64 index)
3513 struct btrfs_key key;
3514 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3516 key.objectid = inode->i_ino;
3517 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3520 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3521 parent_inode->i_ino,
3522 &key, btrfs_inode_type(inode),
3526 ret = btrfs_insert_inode_ref(trans, root,
3529 parent_inode->i_ino,
3532 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3534 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3535 ret = btrfs_update_inode(trans, root, parent_inode);
3540 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3541 struct dentry *dentry, struct inode *inode,
3542 int backref, u64 index)
3544 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3545 inode, dentry->d_name.name,
3546 dentry->d_name.len, backref, index);
3548 d_instantiate(dentry, inode);
3556 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3557 int mode, dev_t rdev)
3559 struct btrfs_trans_handle *trans;
3560 struct btrfs_root *root = BTRFS_I(dir)->root;
3561 struct inode *inode = NULL;
3565 unsigned long nr = 0;
3568 if (!new_valid_dev(rdev))
3571 err = btrfs_check_free_space(root, 1, 0);
3575 trans = btrfs_start_transaction(root, 1);
3576 btrfs_set_trans_block_group(trans, dir);
3578 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3584 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3586 dentry->d_parent->d_inode->i_ino, objectid,
3587 BTRFS_I(dir)->block_group, mode, &index);
3588 err = PTR_ERR(inode);
3592 err = btrfs_init_acl(inode, dir);
3598 btrfs_set_trans_block_group(trans, inode);
3599 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3603 inode->i_op = &btrfs_special_inode_operations;
3604 init_special_inode(inode, inode->i_mode, rdev);
3605 btrfs_update_inode(trans, root, inode);
3607 dir->i_sb->s_dirt = 1;
3608 btrfs_update_inode_block_group(trans, inode);
3609 btrfs_update_inode_block_group(trans, dir);
3611 nr = trans->blocks_used;
3612 btrfs_end_transaction_throttle(trans, root);
3615 inode_dec_link_count(inode);
3618 btrfs_btree_balance_dirty(root, nr);
3622 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3623 int mode, struct nameidata *nd)
3625 struct btrfs_trans_handle *trans;
3626 struct btrfs_root *root = BTRFS_I(dir)->root;
3627 struct inode *inode = NULL;
3630 unsigned long nr = 0;
3634 err = btrfs_check_free_space(root, 1, 0);
3637 trans = btrfs_start_transaction(root, 1);
3638 btrfs_set_trans_block_group(trans, dir);
3640 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3646 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3648 dentry->d_parent->d_inode->i_ino,
3649 objectid, BTRFS_I(dir)->block_group, mode,
3651 err = PTR_ERR(inode);
3655 err = btrfs_init_acl(inode, dir);
3661 btrfs_set_trans_block_group(trans, inode);
3662 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3666 inode->i_mapping->a_ops = &btrfs_aops;
3667 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3668 inode->i_fop = &btrfs_file_operations;
3669 inode->i_op = &btrfs_file_inode_operations;
3670 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3672 dir->i_sb->s_dirt = 1;
3673 btrfs_update_inode_block_group(trans, inode);
3674 btrfs_update_inode_block_group(trans, dir);
3676 nr = trans->blocks_used;
3677 btrfs_end_transaction_throttle(trans, root);
3680 inode_dec_link_count(inode);
3683 btrfs_btree_balance_dirty(root, nr);
3687 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3688 struct dentry *dentry)
3690 struct btrfs_trans_handle *trans;
3691 struct btrfs_root *root = BTRFS_I(dir)->root;
3692 struct inode *inode = old_dentry->d_inode;
3694 unsigned long nr = 0;
3698 if (inode->i_nlink == 0)
3701 btrfs_inc_nlink(inode);
3702 err = btrfs_check_free_space(root, 1, 0);
3705 err = btrfs_set_inode_index(dir, &index);
3709 trans = btrfs_start_transaction(root, 1);
3711 btrfs_set_trans_block_group(trans, dir);
3712 atomic_inc(&inode->i_count);
3714 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3719 dir->i_sb->s_dirt = 1;
3720 btrfs_update_inode_block_group(trans, dir);
3721 err = btrfs_update_inode(trans, root, inode);
3726 nr = trans->blocks_used;
3727 btrfs_end_transaction_throttle(trans, root);
3730 inode_dec_link_count(inode);
3733 btrfs_btree_balance_dirty(root, nr);
3737 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3739 struct inode *inode = NULL;
3740 struct btrfs_trans_handle *trans;
3741 struct btrfs_root *root = BTRFS_I(dir)->root;
3743 int drop_on_err = 0;
3746 unsigned long nr = 1;
3748 err = btrfs_check_free_space(root, 1, 0);
3752 trans = btrfs_start_transaction(root, 1);
3753 btrfs_set_trans_block_group(trans, dir);
3755 if (IS_ERR(trans)) {
3756 err = PTR_ERR(trans);
3760 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3766 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3768 dentry->d_parent->d_inode->i_ino, objectid,
3769 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3771 if (IS_ERR(inode)) {
3772 err = PTR_ERR(inode);
3778 err = btrfs_init_acl(inode, dir);
3782 inode->i_op = &btrfs_dir_inode_operations;
3783 inode->i_fop = &btrfs_dir_file_operations;
3784 btrfs_set_trans_block_group(trans, inode);
3786 btrfs_i_size_write(inode, 0);
3787 err = btrfs_update_inode(trans, root, inode);
3791 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3792 inode, dentry->d_name.name,
3793 dentry->d_name.len, 0, index);
3797 d_instantiate(dentry, inode);
3799 dir->i_sb->s_dirt = 1;
3800 btrfs_update_inode_block_group(trans, inode);
3801 btrfs_update_inode_block_group(trans, dir);
3804 nr = trans->blocks_used;
3805 btrfs_end_transaction_throttle(trans, root);
3810 btrfs_btree_balance_dirty(root, nr);
3814 /* helper for btfs_get_extent. Given an existing extent in the tree,
3815 * and an extent that you want to insert, deal with overlap and insert
3816 * the new extent into the tree.
3818 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3819 struct extent_map *existing,
3820 struct extent_map *em,
3821 u64 map_start, u64 map_len)
3825 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3826 start_diff = map_start - em->start;
3827 em->start = map_start;
3829 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3830 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3831 em->block_start += start_diff;
3832 em->block_len -= start_diff;
3834 return add_extent_mapping(em_tree, em);
3837 static noinline int uncompress_inline(struct btrfs_path *path,
3838 struct inode *inode, struct page *page,
3839 size_t pg_offset, u64 extent_offset,
3840 struct btrfs_file_extent_item *item)
3843 struct extent_buffer *leaf = path->nodes[0];
3846 unsigned long inline_size;
3849 WARN_ON(pg_offset != 0);
3850 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3851 inline_size = btrfs_file_extent_inline_item_len(leaf,
3852 btrfs_item_nr(leaf, path->slots[0]));
3853 tmp = kmalloc(inline_size, GFP_NOFS);
3854 ptr = btrfs_file_extent_inline_start(item);
3856 read_extent_buffer(leaf, tmp, ptr, inline_size);
3858 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3859 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3860 inline_size, max_size);
3862 char *kaddr = kmap_atomic(page, KM_USER0);
3863 unsigned long copy_size = min_t(u64,
3864 PAGE_CACHE_SIZE - pg_offset,
3865 max_size - extent_offset);
3866 memset(kaddr + pg_offset, 0, copy_size);
3867 kunmap_atomic(kaddr, KM_USER0);
3874 * a bit scary, this does extent mapping from logical file offset to the disk.
3875 * the ugly parts come from merging extents from the disk with the
3876 * in-ram representation. This gets more complex because of the data=ordered code,
3877 * where the in-ram extents might be locked pending data=ordered completion.
3879 * This also copies inline extents directly into the page.
3881 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3882 size_t pg_offset, u64 start, u64 len,
3888 u64 extent_start = 0;
3890 u64 objectid = inode->i_ino;
3892 struct btrfs_path *path = NULL;
3893 struct btrfs_root *root = BTRFS_I(inode)->root;
3894 struct btrfs_file_extent_item *item;
3895 struct extent_buffer *leaf;
3896 struct btrfs_key found_key;
3897 struct extent_map *em = NULL;
3898 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3899 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3900 struct btrfs_trans_handle *trans = NULL;
3904 spin_lock(&em_tree->lock);
3905 em = lookup_extent_mapping(em_tree, start, len);
3907 em->bdev = root->fs_info->fs_devices->latest_bdev;
3908 spin_unlock(&em_tree->lock);
3911 if (em->start > start || em->start + em->len <= start)
3912 free_extent_map(em);
3913 else if (em->block_start == EXTENT_MAP_INLINE && page)
3914 free_extent_map(em);
3918 em = alloc_extent_map(GFP_NOFS);
3923 em->bdev = root->fs_info->fs_devices->latest_bdev;
3924 em->start = EXTENT_MAP_HOLE;
3925 em->orig_start = EXTENT_MAP_HOLE;
3927 em->block_len = (u64)-1;
3930 path = btrfs_alloc_path();
3934 ret = btrfs_lookup_file_extent(trans, root, path,
3935 objectid, start, trans != NULL);
3942 if (path->slots[0] == 0)
3947 leaf = path->nodes[0];
3948 item = btrfs_item_ptr(leaf, path->slots[0],
3949 struct btrfs_file_extent_item);
3950 /* are we inside the extent that was found? */
3951 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3952 found_type = btrfs_key_type(&found_key);
3953 if (found_key.objectid != objectid ||
3954 found_type != BTRFS_EXTENT_DATA_KEY) {
3958 found_type = btrfs_file_extent_type(leaf, item);
3959 extent_start = found_key.offset;
3960 compressed = btrfs_file_extent_compression(leaf, item);
3961 if (found_type == BTRFS_FILE_EXTENT_REG ||
3962 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3963 extent_end = extent_start +
3964 btrfs_file_extent_num_bytes(leaf, item);
3965 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3967 size = btrfs_file_extent_inline_len(leaf, item);
3968 extent_end = (extent_start + size + root->sectorsize - 1) &
3969 ~((u64)root->sectorsize - 1);
3972 if (start >= extent_end) {
3974 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3975 ret = btrfs_next_leaf(root, path);
3982 leaf = path->nodes[0];
3984 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3985 if (found_key.objectid != objectid ||
3986 found_key.type != BTRFS_EXTENT_DATA_KEY)
3988 if (start + len <= found_key.offset)
3991 em->len = found_key.offset - start;
3995 if (found_type == BTRFS_FILE_EXTENT_REG ||
3996 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3997 em->start = extent_start;
3998 em->len = extent_end - extent_start;
3999 em->orig_start = extent_start -
4000 btrfs_file_extent_offset(leaf, item);
4001 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4003 em->block_start = EXTENT_MAP_HOLE;
4007 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4008 em->block_start = bytenr;
4009 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4012 bytenr += btrfs_file_extent_offset(leaf, item);
4013 em->block_start = bytenr;
4014 em->block_len = em->len;
4015 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4016 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4019 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4023 size_t extent_offset;
4026 em->block_start = EXTENT_MAP_INLINE;
4027 if (!page || create) {
4028 em->start = extent_start;
4029 em->len = extent_end - extent_start;
4033 size = btrfs_file_extent_inline_len(leaf, item);
4034 extent_offset = page_offset(page) + pg_offset - extent_start;
4035 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4036 size - extent_offset);
4037 em->start = extent_start + extent_offset;
4038 em->len = (copy_size + root->sectorsize - 1) &
4039 ~((u64)root->sectorsize - 1);
4040 em->orig_start = EXTENT_MAP_INLINE;
4042 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4043 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4044 if (create == 0 && !PageUptodate(page)) {
4045 if (btrfs_file_extent_compression(leaf, item) ==
4046 BTRFS_COMPRESS_ZLIB) {
4047 ret = uncompress_inline(path, inode, page,
4049 extent_offset, item);
4053 read_extent_buffer(leaf, map + pg_offset, ptr,
4057 flush_dcache_page(page);
4058 } else if (create && PageUptodate(page)) {
4061 free_extent_map(em);
4063 btrfs_release_path(root, path);
4064 trans = btrfs_join_transaction(root, 1);
4068 write_extent_buffer(leaf, map + pg_offset, ptr,
4071 btrfs_mark_buffer_dirty(leaf);
4073 set_extent_uptodate(io_tree, em->start,
4074 extent_map_end(em) - 1, GFP_NOFS);
4077 printk("unkknown found_type %d\n", found_type);
4084 em->block_start = EXTENT_MAP_HOLE;
4085 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4087 btrfs_release_path(root, path);
4088 if (em->start > start || extent_map_end(em) <= start) {
4089 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
4095 spin_lock(&em_tree->lock);
4096 ret = add_extent_mapping(em_tree, em);
4097 /* it is possible that someone inserted the extent into the tree
4098 * while we had the lock dropped. It is also possible that
4099 * an overlapping map exists in the tree
4101 if (ret == -EEXIST) {
4102 struct extent_map *existing;
4106 existing = lookup_extent_mapping(em_tree, start, len);
4107 if (existing && (existing->start > start ||
4108 existing->start + existing->len <= start)) {
4109 free_extent_map(existing);
4113 existing = lookup_extent_mapping(em_tree, em->start,
4116 err = merge_extent_mapping(em_tree, existing,
4119 free_extent_map(existing);
4121 free_extent_map(em);
4126 printk("failing to insert %Lu %Lu\n",
4128 free_extent_map(em);
4132 free_extent_map(em);
4137 spin_unlock(&em_tree->lock);
4140 btrfs_free_path(path);
4142 ret = btrfs_end_transaction(trans, root);
4148 free_extent_map(em);
4150 return ERR_PTR(err);
4155 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4156 const struct iovec *iov, loff_t offset,
4157 unsigned long nr_segs)
4162 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
4164 return extent_bmap(mapping, iblock, btrfs_get_extent);
4167 int btrfs_readpage(struct file *file, struct page *page)
4169 struct extent_io_tree *tree;
4170 tree = &BTRFS_I(page->mapping->host)->io_tree;
4171 return extent_read_full_page(tree, page, btrfs_get_extent);
4174 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4176 struct extent_io_tree *tree;
4179 if (current->flags & PF_MEMALLOC) {
4180 redirty_page_for_writepage(wbc, page);
4184 tree = &BTRFS_I(page->mapping->host)->io_tree;
4185 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4188 int btrfs_writepages(struct address_space *mapping,
4189 struct writeback_control *wbc)
4191 struct extent_io_tree *tree;
4193 tree = &BTRFS_I(mapping->host)->io_tree;
4194 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4198 btrfs_readpages(struct file *file, struct address_space *mapping,
4199 struct list_head *pages, unsigned nr_pages)
4201 struct extent_io_tree *tree;
4202 tree = &BTRFS_I(mapping->host)->io_tree;
4203 return extent_readpages(tree, mapping, pages, nr_pages,
4206 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4208 struct extent_io_tree *tree;
4209 struct extent_map_tree *map;
4212 tree = &BTRFS_I(page->mapping->host)->io_tree;
4213 map = &BTRFS_I(page->mapping->host)->extent_tree;
4214 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4216 ClearPagePrivate(page);
4217 set_page_private(page, 0);
4218 page_cache_release(page);
4223 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4225 if (PageWriteback(page) || PageDirty(page))
4227 return __btrfs_releasepage(page, gfp_flags);
4230 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4232 struct extent_io_tree *tree;
4233 struct btrfs_ordered_extent *ordered;
4234 u64 page_start = page_offset(page);
4235 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4237 wait_on_page_writeback(page);
4238 tree = &BTRFS_I(page->mapping->host)->io_tree;
4240 btrfs_releasepage(page, GFP_NOFS);
4244 lock_extent(tree, page_start, page_end, GFP_NOFS);
4245 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4249 * IO on this page will never be started, so we need
4250 * to account for any ordered extents now
4252 clear_extent_bit(tree, page_start, page_end,
4253 EXTENT_DIRTY | EXTENT_DELALLOC |
4254 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4255 btrfs_finish_ordered_io(page->mapping->host,
4256 page_start, page_end);
4257 btrfs_put_ordered_extent(ordered);
4258 lock_extent(tree, page_start, page_end, GFP_NOFS);
4260 clear_extent_bit(tree, page_start, page_end,
4261 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4264 __btrfs_releasepage(page, GFP_NOFS);
4266 ClearPageChecked(page);
4267 if (PagePrivate(page)) {
4268 ClearPagePrivate(page);
4269 set_page_private(page, 0);
4270 page_cache_release(page);
4275 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4276 * called from a page fault handler when a page is first dirtied. Hence we must
4277 * be careful to check for EOF conditions here. We set the page up correctly
4278 * for a written page which means we get ENOSPC checking when writing into
4279 * holes and correct delalloc and unwritten extent mapping on filesystems that
4280 * support these features.
4282 * We are not allowed to take the i_mutex here so we have to play games to
4283 * protect against truncate races as the page could now be beyond EOF. Because
4284 * vmtruncate() writes the inode size before removing pages, once we have the
4285 * page lock we can determine safely if the page is beyond EOF. If it is not
4286 * beyond EOF, then the page is guaranteed safe against truncation until we
4289 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4291 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4292 struct btrfs_root *root = BTRFS_I(inode)->root;
4293 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4294 struct btrfs_ordered_extent *ordered;
4296 unsigned long zero_start;
4302 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
4309 size = i_size_read(inode);
4310 page_start = page_offset(page);
4311 page_end = page_start + PAGE_CACHE_SIZE - 1;
4313 if ((page->mapping != inode->i_mapping) ||
4314 (page_start >= size)) {
4315 /* page got truncated out from underneath us */
4318 wait_on_page_writeback(page);
4320 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4321 set_page_extent_mapped(page);
4324 * we can't set the delalloc bits if there are pending ordered
4325 * extents. Drop our locks and wait for them to finish
4327 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4329 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4331 btrfs_start_ordered_extent(inode, ordered, 1);
4332 btrfs_put_ordered_extent(ordered);
4336 btrfs_set_extent_delalloc(inode, page_start, page_end);
4339 /* page is wholly or partially inside EOF */
4340 if (page_start + PAGE_CACHE_SIZE > size)
4341 zero_start = size & ~PAGE_CACHE_MASK;
4343 zero_start = PAGE_CACHE_SIZE;
4345 if (zero_start != PAGE_CACHE_SIZE) {
4347 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4348 flush_dcache_page(page);
4351 ClearPageChecked(page);
4352 set_page_dirty(page);
4353 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4361 static void btrfs_truncate(struct inode *inode)
4363 struct btrfs_root *root = BTRFS_I(inode)->root;
4365 struct btrfs_trans_handle *trans;
4367 u64 mask = root->sectorsize - 1;
4369 if (!S_ISREG(inode->i_mode))
4371 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4374 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4375 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4377 trans = btrfs_start_transaction(root, 1);
4378 btrfs_set_trans_block_group(trans, inode);
4379 btrfs_i_size_write(inode, inode->i_size);
4381 ret = btrfs_orphan_add(trans, inode);
4384 /* FIXME, add redo link to tree so we don't leak on crash */
4385 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4386 BTRFS_EXTENT_DATA_KEY);
4387 btrfs_update_inode(trans, root, inode);
4389 ret = btrfs_orphan_del(trans, inode);
4393 nr = trans->blocks_used;
4394 ret = btrfs_end_transaction_throttle(trans, root);
4396 btrfs_btree_balance_dirty(root, nr);
4400 * create a new subvolume directory/inode (helper for the ioctl).
4402 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4403 struct btrfs_root *new_root, struct dentry *dentry,
4404 u64 new_dirid, u64 alloc_hint)
4406 struct inode *inode;
4410 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4411 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4413 return PTR_ERR(inode);
4414 inode->i_op = &btrfs_dir_inode_operations;
4415 inode->i_fop = &btrfs_dir_file_operations;
4418 btrfs_i_size_write(inode, 0);
4420 error = btrfs_update_inode(trans, new_root, inode);
4424 d_instantiate(dentry, inode);
4428 /* helper function for file defrag and space balancing. This
4429 * forces readahead on a given range of bytes in an inode
4431 unsigned long btrfs_force_ra(struct address_space *mapping,
4432 struct file_ra_state *ra, struct file *file,
4433 pgoff_t offset, pgoff_t last_index)
4435 pgoff_t req_size = last_index - offset + 1;
4437 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4438 return offset + req_size;
4441 struct inode *btrfs_alloc_inode(struct super_block *sb)
4443 struct btrfs_inode *ei;
4445 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4449 ei->logged_trans = 0;
4450 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4451 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4452 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4453 INIT_LIST_HEAD(&ei->i_orphan);
4454 return &ei->vfs_inode;
4457 void btrfs_destroy_inode(struct inode *inode)
4459 struct btrfs_ordered_extent *ordered;
4460 WARN_ON(!list_empty(&inode->i_dentry));
4461 WARN_ON(inode->i_data.nrpages);
4463 if (BTRFS_I(inode)->i_acl &&
4464 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4465 posix_acl_release(BTRFS_I(inode)->i_acl);
4466 if (BTRFS_I(inode)->i_default_acl &&
4467 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4468 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4470 spin_lock(&BTRFS_I(inode)->root->list_lock);
4471 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4472 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4473 " list\n", inode->i_ino);
4476 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4479 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4483 printk("found ordered extent %Lu %Lu\n",
4484 ordered->file_offset, ordered->len);
4485 btrfs_remove_ordered_extent(inode, ordered);
4486 btrfs_put_ordered_extent(ordered);
4487 btrfs_put_ordered_extent(ordered);
4490 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4491 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4494 static void init_once(void *foo)
4496 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4498 inode_init_once(&ei->vfs_inode);
4501 void btrfs_destroy_cachep(void)
4503 if (btrfs_inode_cachep)
4504 kmem_cache_destroy(btrfs_inode_cachep);
4505 if (btrfs_trans_handle_cachep)
4506 kmem_cache_destroy(btrfs_trans_handle_cachep);
4507 if (btrfs_transaction_cachep)
4508 kmem_cache_destroy(btrfs_transaction_cachep);
4509 if (btrfs_bit_radix_cachep)
4510 kmem_cache_destroy(btrfs_bit_radix_cachep);
4511 if (btrfs_path_cachep)
4512 kmem_cache_destroy(btrfs_path_cachep);
4515 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4516 unsigned long extra_flags,
4517 void (*ctor)(void *))
4519 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4520 SLAB_MEM_SPREAD | extra_flags), ctor);
4523 int btrfs_init_cachep(void)
4525 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4526 sizeof(struct btrfs_inode),
4528 if (!btrfs_inode_cachep)
4530 btrfs_trans_handle_cachep =
4531 btrfs_cache_create("btrfs_trans_handle_cache",
4532 sizeof(struct btrfs_trans_handle),
4534 if (!btrfs_trans_handle_cachep)
4536 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4537 sizeof(struct btrfs_transaction),
4539 if (!btrfs_transaction_cachep)
4541 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4542 sizeof(struct btrfs_path),
4544 if (!btrfs_path_cachep)
4546 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4547 SLAB_DESTROY_BY_RCU, NULL);
4548 if (!btrfs_bit_radix_cachep)
4552 btrfs_destroy_cachep();
4556 static int btrfs_getattr(struct vfsmount *mnt,
4557 struct dentry *dentry, struct kstat *stat)
4559 struct inode *inode = dentry->d_inode;
4560 generic_fillattr(inode, stat);
4561 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4562 stat->blksize = PAGE_CACHE_SIZE;
4563 stat->blocks = (inode_get_bytes(inode) +
4564 BTRFS_I(inode)->delalloc_bytes) >> 9;
4568 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
4569 struct inode * new_dir,struct dentry *new_dentry)
4571 struct btrfs_trans_handle *trans;
4572 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4573 struct inode *new_inode = new_dentry->d_inode;
4574 struct inode *old_inode = old_dentry->d_inode;
4575 struct timespec ctime = CURRENT_TIME;
4579 /* we're not allowed to rename between subvolumes */
4580 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4581 BTRFS_I(new_dir)->root->root_key.objectid)
4584 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4585 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4589 /* to rename a snapshot or subvolume, we need to juggle the
4590 * backrefs. This isn't coded yet
4592 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4595 ret = btrfs_check_free_space(root, 1, 0);
4599 trans = btrfs_start_transaction(root, 1);
4601 btrfs_set_trans_block_group(trans, new_dir);
4603 btrfs_inc_nlink(old_dentry->d_inode);
4604 old_dir->i_ctime = old_dir->i_mtime = ctime;
4605 new_dir->i_ctime = new_dir->i_mtime = ctime;
4606 old_inode->i_ctime = ctime;
4608 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4609 old_dentry->d_name.name,
4610 old_dentry->d_name.len);
4615 new_inode->i_ctime = CURRENT_TIME;
4616 ret = btrfs_unlink_inode(trans, root, new_dir,
4617 new_dentry->d_inode,
4618 new_dentry->d_name.name,
4619 new_dentry->d_name.len);
4622 if (new_inode->i_nlink == 0) {
4623 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4629 ret = btrfs_set_inode_index(new_dir, &index);
4633 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4634 old_inode, new_dentry->d_name.name,
4635 new_dentry->d_name.len, 1, index);
4640 btrfs_end_transaction_throttle(trans, root);
4646 * some fairly slow code that needs optimization. This walks the list
4647 * of all the inodes with pending delalloc and forces them to disk.
4649 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4651 struct list_head *head = &root->fs_info->delalloc_inodes;
4652 struct btrfs_inode *binode;
4653 struct inode *inode;
4654 unsigned long flags;
4656 if (root->fs_info->sb->s_flags & MS_RDONLY)
4659 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4660 while(!list_empty(head)) {
4661 binode = list_entry(head->next, struct btrfs_inode,
4663 inode = igrab(&binode->vfs_inode);
4665 list_del_init(&binode->delalloc_inodes);
4666 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4668 filemap_flush(inode->i_mapping);
4672 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4674 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4676 /* the filemap_flush will queue IO into the worker threads, but
4677 * we have to make sure the IO is actually started and that
4678 * ordered extents get created before we return
4680 atomic_inc(&root->fs_info->async_submit_draining);
4681 while(atomic_read(&root->fs_info->nr_async_submits) ||
4682 atomic_read(&root->fs_info->async_delalloc_pages)) {
4683 wait_event(root->fs_info->async_submit_wait,
4684 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4685 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4687 atomic_dec(&root->fs_info->async_submit_draining);
4691 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4692 const char *symname)
4694 struct btrfs_trans_handle *trans;
4695 struct btrfs_root *root = BTRFS_I(dir)->root;
4696 struct btrfs_path *path;
4697 struct btrfs_key key;
4698 struct inode *inode = NULL;
4706 struct btrfs_file_extent_item *ei;
4707 struct extent_buffer *leaf;
4708 unsigned long nr = 0;
4710 name_len = strlen(symname) + 1;
4711 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4712 return -ENAMETOOLONG;
4714 err = btrfs_check_free_space(root, 1, 0);
4718 trans = btrfs_start_transaction(root, 1);
4719 btrfs_set_trans_block_group(trans, dir);
4721 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4727 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4729 dentry->d_parent->d_inode->i_ino, objectid,
4730 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4732 err = PTR_ERR(inode);
4736 err = btrfs_init_acl(inode, dir);
4742 btrfs_set_trans_block_group(trans, inode);
4743 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4747 inode->i_mapping->a_ops = &btrfs_aops;
4748 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4749 inode->i_fop = &btrfs_file_operations;
4750 inode->i_op = &btrfs_file_inode_operations;
4751 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4753 dir->i_sb->s_dirt = 1;
4754 btrfs_update_inode_block_group(trans, inode);
4755 btrfs_update_inode_block_group(trans, dir);
4759 path = btrfs_alloc_path();
4761 key.objectid = inode->i_ino;
4763 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4764 datasize = btrfs_file_extent_calc_inline_size(name_len);
4765 err = btrfs_insert_empty_item(trans, root, path, &key,
4771 leaf = path->nodes[0];
4772 ei = btrfs_item_ptr(leaf, path->slots[0],
4773 struct btrfs_file_extent_item);
4774 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4775 btrfs_set_file_extent_type(leaf, ei,
4776 BTRFS_FILE_EXTENT_INLINE);
4777 btrfs_set_file_extent_encryption(leaf, ei, 0);
4778 btrfs_set_file_extent_compression(leaf, ei, 0);
4779 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4780 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4782 ptr = btrfs_file_extent_inline_start(ei);
4783 write_extent_buffer(leaf, symname, ptr, name_len);
4784 btrfs_mark_buffer_dirty(leaf);
4785 btrfs_free_path(path);
4787 inode->i_op = &btrfs_symlink_inode_operations;
4788 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4789 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4790 inode_set_bytes(inode, name_len);
4791 btrfs_i_size_write(inode, name_len - 1);
4792 err = btrfs_update_inode(trans, root, inode);
4797 nr = trans->blocks_used;
4798 btrfs_end_transaction_throttle(trans, root);
4801 inode_dec_link_count(inode);
4804 btrfs_btree_balance_dirty(root, nr);
4808 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4809 u64 alloc_hint, int mode)
4811 struct btrfs_trans_handle *trans;
4812 struct btrfs_root *root = BTRFS_I(inode)->root;
4813 struct btrfs_key ins;
4815 u64 cur_offset = start;
4816 u64 num_bytes = end - start;
4819 trans = btrfs_join_transaction(root, 1);
4821 btrfs_set_trans_block_group(trans, inode);
4823 while (num_bytes > 0) {
4824 alloc_size = min(num_bytes, root->fs_info->max_extent);
4825 ret = btrfs_reserve_extent(trans, root, alloc_size,
4826 root->sectorsize, 0, alloc_hint,
4832 ret = insert_reserved_file_extent(trans, inode,
4833 cur_offset, ins.objectid,
4834 ins.offset, ins.offset,
4835 ins.offset, 0, 0, 0,
4836 BTRFS_FILE_EXTENT_PREALLOC);
4838 num_bytes -= ins.offset;
4839 cur_offset += ins.offset;
4840 alloc_hint = ins.objectid + ins.offset;
4843 if (cur_offset > start) {
4844 inode->i_ctime = CURRENT_TIME;
4845 btrfs_set_flag(inode, PREALLOC);
4846 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4847 cur_offset > i_size_read(inode))
4848 btrfs_i_size_write(inode, cur_offset);
4849 ret = btrfs_update_inode(trans, root, inode);
4853 btrfs_end_transaction(trans, root);
4857 static long btrfs_fallocate(struct inode *inode, int mode,
4858 loff_t offset, loff_t len)
4865 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
4866 struct extent_map *em;
4869 alloc_start = offset & ~mask;
4870 alloc_end = (offset + len + mask) & ~mask;
4872 mutex_lock(&inode->i_mutex);
4873 if (alloc_start > inode->i_size) {
4874 ret = btrfs_cont_expand(inode, alloc_start);
4880 struct btrfs_ordered_extent *ordered;
4881 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
4882 alloc_end - 1, GFP_NOFS);
4883 ordered = btrfs_lookup_first_ordered_extent(inode,
4886 ordered->file_offset + ordered->len > alloc_start &&
4887 ordered->file_offset < alloc_end) {
4888 btrfs_put_ordered_extent(ordered);
4889 unlock_extent(&BTRFS_I(inode)->io_tree,
4890 alloc_start, alloc_end - 1, GFP_NOFS);
4891 btrfs_wait_ordered_range(inode, alloc_start,
4892 alloc_end - alloc_start);
4895 btrfs_put_ordered_extent(ordered);
4900 cur_offset = alloc_start;
4902 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4903 alloc_end - cur_offset, 0);
4904 BUG_ON(IS_ERR(em) || !em);
4905 last_byte = min(extent_map_end(em), alloc_end);
4906 last_byte = (last_byte + mask) & ~mask;
4907 if (em->block_start == EXTENT_MAP_HOLE) {
4908 ret = prealloc_file_range(inode, cur_offset,
4909 last_byte, alloc_hint, mode);
4911 free_extent_map(em);
4915 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
4916 alloc_hint = em->block_start;
4917 free_extent_map(em);
4919 cur_offset = last_byte;
4920 if (cur_offset >= alloc_end) {
4925 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
4928 mutex_unlock(&inode->i_mutex);
4932 static int btrfs_set_page_dirty(struct page *page)
4934 return __set_page_dirty_nobuffers(page);
4937 static int btrfs_permission(struct inode *inode, int mask)
4939 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4941 return generic_permission(inode, mask, btrfs_check_acl);
4944 static struct inode_operations btrfs_dir_inode_operations = {
4945 .getattr = btrfs_getattr,
4946 .lookup = btrfs_lookup,
4947 .create = btrfs_create,
4948 .unlink = btrfs_unlink,
4950 .mkdir = btrfs_mkdir,
4951 .rmdir = btrfs_rmdir,
4952 .rename = btrfs_rename,
4953 .symlink = btrfs_symlink,
4954 .setattr = btrfs_setattr,
4955 .mknod = btrfs_mknod,
4956 .setxattr = btrfs_setxattr,
4957 .getxattr = btrfs_getxattr,
4958 .listxattr = btrfs_listxattr,
4959 .removexattr = btrfs_removexattr,
4960 .permission = btrfs_permission,
4962 static struct inode_operations btrfs_dir_ro_inode_operations = {
4963 .lookup = btrfs_lookup,
4964 .permission = btrfs_permission,
4966 static struct file_operations btrfs_dir_file_operations = {
4967 .llseek = generic_file_llseek,
4968 .read = generic_read_dir,
4969 .readdir = btrfs_real_readdir,
4970 .unlocked_ioctl = btrfs_ioctl,
4971 #ifdef CONFIG_COMPAT
4972 .compat_ioctl = btrfs_ioctl,
4974 .release = btrfs_release_file,
4975 .fsync = btrfs_sync_file,
4978 static struct extent_io_ops btrfs_extent_io_ops = {
4979 .fill_delalloc = run_delalloc_range,
4980 .submit_bio_hook = btrfs_submit_bio_hook,
4981 .merge_bio_hook = btrfs_merge_bio_hook,
4982 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
4983 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
4984 .writepage_start_hook = btrfs_writepage_start_hook,
4985 .readpage_io_failed_hook = btrfs_io_failed_hook,
4986 .set_bit_hook = btrfs_set_bit_hook,
4987 .clear_bit_hook = btrfs_clear_bit_hook,
4990 static struct address_space_operations btrfs_aops = {
4991 .readpage = btrfs_readpage,
4992 .writepage = btrfs_writepage,
4993 .writepages = btrfs_writepages,
4994 .readpages = btrfs_readpages,
4995 .sync_page = block_sync_page,
4997 .direct_IO = btrfs_direct_IO,
4998 .invalidatepage = btrfs_invalidatepage,
4999 .releasepage = btrfs_releasepage,
5000 .set_page_dirty = btrfs_set_page_dirty,
5003 static struct address_space_operations btrfs_symlink_aops = {
5004 .readpage = btrfs_readpage,
5005 .writepage = btrfs_writepage,
5006 .invalidatepage = btrfs_invalidatepage,
5007 .releasepage = btrfs_releasepage,
5010 static struct inode_operations btrfs_file_inode_operations = {
5011 .truncate = btrfs_truncate,
5012 .getattr = btrfs_getattr,
5013 .setattr = btrfs_setattr,
5014 .setxattr = btrfs_setxattr,
5015 .getxattr = btrfs_getxattr,
5016 .listxattr = btrfs_listxattr,
5017 .removexattr = btrfs_removexattr,
5018 .permission = btrfs_permission,
5019 .fallocate = btrfs_fallocate,
5021 static struct inode_operations btrfs_special_inode_operations = {
5022 .getattr = btrfs_getattr,
5023 .setattr = btrfs_setattr,
5024 .permission = btrfs_permission,
5025 .setxattr = btrfs_setxattr,
5026 .getxattr = btrfs_getxattr,
5027 .listxattr = btrfs_listxattr,
5028 .removexattr = btrfs_removexattr,
5030 static struct inode_operations btrfs_symlink_inode_operations = {
5031 .readlink = generic_readlink,
5032 .follow_link = page_follow_link_light,
5033 .put_link = page_put_link,
5034 .permission = btrfs_permission,