2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include <linux/posix_acl.h>
40 #include <linux/falloc.h>
43 #include "transaction.h"
44 #include "btrfs_inode.h"
46 #include "print-tree.h"
48 #include "ordered-data.h"
52 #include "ref-cache.h"
53 #include "compression.h"
55 struct btrfs_iget_args {
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
91 * a very lame attempt at stopping writes when the FS is 85% full. There
92 * are countless ways this is incorrect, but it is better than nothing.
94 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
103 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
104 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
105 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
113 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
115 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
120 * this does all the hard work for inserting an inline extent into
121 * the btree. The caller should have done a btrfs_drop_extents so that
122 * no overlapping inline items exist in the btree
124 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
125 struct btrfs_root *root, struct inode *inode,
126 u64 start, size_t size, size_t compressed_size,
127 struct page **compressed_pages)
129 struct btrfs_key key;
130 struct btrfs_path *path;
131 struct extent_buffer *leaf;
132 struct page *page = NULL;
135 struct btrfs_file_extent_item *ei;
138 size_t cur_size = size;
140 unsigned long offset;
141 int use_compress = 0;
143 if (compressed_size && compressed_pages) {
145 cur_size = compressed_size;
148 path = btrfs_alloc_path(); if (!path)
151 btrfs_set_trans_block_group(trans, inode);
153 key.objectid = inode->i_ino;
155 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
156 inode_add_bytes(inode, size);
157 datasize = btrfs_file_extent_calc_inline_size(cur_size);
159 inode_add_bytes(inode, size);
160 ret = btrfs_insert_empty_item(trans, root, path, &key,
165 printk("got bad ret %d\n", ret);
168 leaf = path->nodes[0];
169 ei = btrfs_item_ptr(leaf, path->slots[0],
170 struct btrfs_file_extent_item);
171 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
172 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
173 btrfs_set_file_extent_encryption(leaf, ei, 0);
174 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
175 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
176 ptr = btrfs_file_extent_inline_start(ei);
181 while(compressed_size > 0) {
182 cpage = compressed_pages[i];
183 cur_size = min(compressed_size,
187 write_extent_buffer(leaf, kaddr, ptr, cur_size);
192 compressed_size -= cur_size;
194 btrfs_set_file_extent_compression(leaf, ei,
195 BTRFS_COMPRESS_ZLIB);
197 page = find_get_page(inode->i_mapping,
198 start >> PAGE_CACHE_SHIFT);
199 btrfs_set_file_extent_compression(leaf, ei, 0);
200 kaddr = kmap_atomic(page, KM_USER0);
201 offset = start & (PAGE_CACHE_SIZE - 1);
202 write_extent_buffer(leaf, kaddr + offset, ptr, size);
203 kunmap_atomic(kaddr, KM_USER0);
204 page_cache_release(page);
206 btrfs_mark_buffer_dirty(leaf);
207 btrfs_free_path(path);
209 BTRFS_I(inode)->disk_i_size = inode->i_size;
210 btrfs_update_inode(trans, root, inode);
213 btrfs_free_path(path);
219 * conditionally insert an inline extent into the file. This
220 * does the checks required to make sure the data is small enough
221 * to fit as an inline extent.
223 static int cow_file_range_inline(struct btrfs_trans_handle *trans,
224 struct btrfs_root *root,
225 struct inode *inode, u64 start, u64 end,
226 size_t compressed_size,
227 struct page **compressed_pages)
229 u64 isize = i_size_read(inode);
230 u64 actual_end = min(end + 1, isize);
231 u64 inline_len = actual_end - start;
232 u64 aligned_end = (end + root->sectorsize - 1) &
233 ~((u64)root->sectorsize - 1);
235 u64 data_len = inline_len;
239 data_len = compressed_size;
242 actual_end >= PAGE_CACHE_SIZE ||
243 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
245 (actual_end & (root->sectorsize - 1)) == 0) ||
247 data_len > root->fs_info->max_inline) {
251 ret = btrfs_drop_extents(trans, root, inode, start,
252 aligned_end, start, &hint_byte);
255 if (isize > actual_end)
256 inline_len = min_t(u64, isize, actual_end);
257 ret = insert_inline_extent(trans, root, inode, start,
258 inline_len, compressed_size,
261 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
266 * when extent_io.c finds a delayed allocation range in the file,
267 * the call backs end up in this code. The basic idea is to
268 * allocate extents on disk for the range, and create ordered data structs
269 * in ram to track those extents.
271 * locked_page is the page that writepage had locked already. We use
272 * it to make sure we don't do extra locks or unlocks.
274 * *page_started is set to one if we unlock locked_page and do everything
275 * required to start IO on it. It may be clean and already done with
278 static int cow_file_range(struct inode *inode, struct page *locked_page,
279 u64 start, u64 end, int *page_started)
281 struct btrfs_root *root = BTRFS_I(inode)->root;
282 struct btrfs_trans_handle *trans;
285 unsigned long ram_size;
289 u64 blocksize = root->sectorsize;
291 struct btrfs_key ins;
292 struct extent_map *em;
293 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
295 struct page **pages = NULL;
296 unsigned long nr_pages;
297 unsigned long nr_pages_ret = 0;
298 unsigned long total_compressed = 0;
299 unsigned long total_in = 0;
300 unsigned long max_compressed = 128 * 1024;
301 unsigned long max_uncompressed = 256 * 1024;
306 trans = btrfs_join_transaction(root, 1);
308 btrfs_set_trans_block_group(trans, inode);
312 * compression made this loop a bit ugly, but the basic idea is to
313 * compress some pages but keep the total size of the compressed
314 * extent relatively small. If compression is off, this goto target
319 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
320 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
322 actual_end = min_t(u64, i_size_read(inode), end + 1);
323 total_compressed = actual_end - start;
325 /* we want to make sure that amount of ram required to uncompress
326 * an extent is reasonable, so we limit the total size in ram
327 * of a compressed extent to 256k
329 total_compressed = min(total_compressed, max_uncompressed);
330 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
331 num_bytes = max(blocksize, num_bytes);
332 disk_num_bytes = num_bytes;
336 /* we do compression for mount -o compress and when the
337 * inode has not been flagged as nocompress
339 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
340 btrfs_test_opt(root, COMPRESS)) {
342 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
344 /* we want to make sure the amount of IO required to satisfy
345 * a random read is reasonably small, so we limit the size
346 * of a compressed extent to 128k
348 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
349 total_compressed, pages,
350 nr_pages, &nr_pages_ret,
356 unsigned long offset = total_compressed &
357 (PAGE_CACHE_SIZE - 1);
358 struct page *page = pages[nr_pages_ret - 1];
361 /* zero the tail end of the last page, we might be
362 * sending it down to disk
365 kaddr = kmap_atomic(page, KM_USER0);
366 memset(kaddr + offset, 0,
367 PAGE_CACHE_SIZE - offset);
368 kunmap_atomic(kaddr, KM_USER0);
374 /* lets try to make an inline extent */
375 if (ret || total_in < (end - start + 1)) {
376 /* we didn't compress the entire range, try
377 * to make an uncompressed inline extent. This
378 * is almost sure to fail, but maybe inline sizes
379 * will get bigger later
381 ret = cow_file_range_inline(trans, root, inode,
382 start, end, 0, NULL);
384 ret = cow_file_range_inline(trans, root, inode,
386 total_compressed, pages);
389 extent_clear_unlock_delalloc(inode,
390 &BTRFS_I(inode)->io_tree,
401 * we aren't doing an inline extent round the compressed size
402 * up to a block size boundary so the allocator does sane
405 total_compressed = (total_compressed + blocksize - 1) &
409 * one last check to make sure the compression is really a
410 * win, compare the page count read with the blocks on disk
412 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
413 ~(PAGE_CACHE_SIZE - 1);
414 if (total_compressed >= total_in) {
417 disk_num_bytes = total_compressed;
418 num_bytes = total_in;
421 if (!will_compress && pages) {
423 * the compression code ran but failed to make things smaller,
424 * free any pages it allocated and our page pointer array
426 for (i = 0; i < nr_pages_ret; i++) {
427 WARN_ON(pages[i]->mapping);
428 page_cache_release(pages[i]);
432 total_compressed = 0;
435 /* flag the file so we don't compress in the future */
436 btrfs_set_flag(inode, NOCOMPRESS);
439 BUG_ON(disk_num_bytes >
440 btrfs_super_total_bytes(&root->fs_info->super_copy));
442 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
444 while(disk_num_bytes > 0) {
445 unsigned long min_bytes;
448 * the max size of a compressed extent is pretty small,
449 * make the code a little less complex by forcing
450 * the allocator to find a whole compressed extent at once
453 min_bytes = disk_num_bytes;
455 min_bytes = root->sectorsize;
457 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
458 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
459 min_bytes, 0, alloc_hint,
463 goto free_pages_out_fail;
465 em = alloc_extent_map(GFP_NOFS);
469 ram_size = num_bytes;
472 /* ramsize == disk size */
473 ram_size = ins.offset;
474 em->len = ins.offset;
477 em->block_start = ins.objectid;
478 em->block_len = ins.offset;
479 em->bdev = root->fs_info->fs_devices->latest_bdev;
480 set_bit(EXTENT_FLAG_PINNED, &em->flags);
483 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
486 spin_lock(&em_tree->lock);
487 ret = add_extent_mapping(em_tree, em);
488 spin_unlock(&em_tree->lock);
489 if (ret != -EEXIST) {
493 btrfs_drop_extent_cache(inode, start,
494 start + ram_size - 1, 0);
497 cur_alloc_size = ins.offset;
498 ordered_type = will_compress ? BTRFS_ORDERED_COMPRESSED : 0;
499 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
500 ram_size, cur_alloc_size,
504 if (disk_num_bytes < cur_alloc_size) {
505 printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes,
512 * we're doing compression, we and we need to
513 * submit the compressed extents down to the device.
515 * We lock down all the file pages, clearing their
516 * dirty bits and setting them writeback. Everyone
517 * that wants to modify the page will wait on the
518 * ordered extent above.
520 * The writeback bits on the file pages are
521 * cleared when the compressed pages are on disk
523 btrfs_end_transaction(trans, root);
525 if (start <= page_offset(locked_page) &&
526 page_offset(locked_page) < start + ram_size) {
530 extent_clear_unlock_delalloc(inode,
531 &BTRFS_I(inode)->io_tree,
533 start + ram_size - 1,
536 ret = btrfs_submit_compressed_write(inode, start,
537 ram_size, ins.objectid,
538 cur_alloc_size, pages,
542 trans = btrfs_join_transaction(root, 1);
543 if (start + ram_size < end) {
545 alloc_hint = ins.objectid + ins.offset;
546 /* pages will be freed at end_bio time */
550 /* we've written everything, time to go */
554 /* we're not doing compressed IO, don't unlock the first
555 * page (which the caller expects to stay locked), don't
556 * clear any dirty bits and don't set any writeback bits
558 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
559 start, start + ram_size - 1,
560 locked_page, 0, 0, 0);
561 disk_num_bytes -= cur_alloc_size;
562 num_bytes -= cur_alloc_size;
563 alloc_hint = ins.objectid + ins.offset;
564 start += cur_alloc_size;
569 btrfs_end_transaction(trans, root);
574 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
575 start, end, locked_page, 0, 0, 0);
577 for (i = 0; i < nr_pages_ret; i++) {
578 WARN_ON(pages[i]->mapping);
579 page_cache_release(pages[i]);
588 * when nowcow writeback call back. This checks for snapshots or COW copies
589 * of the extents that exist in the file, and COWs the file as required.
591 * If no cow copies or snapshots exist, we write directly to the existing
594 static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
595 u64 start, u64 end, int *page_started, int force)
597 struct btrfs_root *root = BTRFS_I(inode)->root;
598 struct btrfs_trans_handle *trans;
599 struct extent_buffer *leaf;
600 struct btrfs_path *path;
601 struct btrfs_file_extent_item *fi;
602 struct btrfs_key found_key;
614 path = btrfs_alloc_path();
616 trans = btrfs_join_transaction(root, 1);
622 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
625 if (ret > 0 && path->slots[0] > 0 && check_prev) {
626 leaf = path->nodes[0];
627 btrfs_item_key_to_cpu(leaf, &found_key,
629 if (found_key.objectid == inode->i_ino &&
630 found_key.type == BTRFS_EXTENT_DATA_KEY)
635 leaf = path->nodes[0];
636 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
637 ret = btrfs_next_leaf(root, path);
642 leaf = path->nodes[0];
647 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
649 if (found_key.objectid > inode->i_ino ||
650 found_key.type > BTRFS_EXTENT_DATA_KEY ||
651 found_key.offset > end)
654 if (found_key.offset > cur_offset) {
655 extent_end = found_key.offset;
659 fi = btrfs_item_ptr(leaf, path->slots[0],
660 struct btrfs_file_extent_item);
661 extent_type = btrfs_file_extent_type(leaf, fi);
663 if (extent_type == BTRFS_FILE_EXTENT_REG ||
664 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
665 struct btrfs_block_group_cache *block_group;
666 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
667 extent_end = found_key.offset +
668 btrfs_file_extent_num_bytes(leaf, fi);
669 if (extent_end <= start) {
673 if (btrfs_file_extent_compression(leaf, fi) ||
674 btrfs_file_extent_encryption(leaf, fi) ||
675 btrfs_file_extent_other_encoding(leaf, fi))
677 if (disk_bytenr == 0)
679 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
681 if (btrfs_cross_ref_exist(trans, root, disk_bytenr))
683 block_group = btrfs_lookup_block_group(root->fs_info,
685 if (!block_group || block_group->ro)
687 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
689 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
690 extent_end = found_key.offset +
691 btrfs_file_extent_inline_len(leaf, fi);
692 extent_end = ALIGN(extent_end, root->sectorsize);
697 if (extent_end <= start) {
702 if (cow_start == (u64)-1)
703 cow_start = cur_offset;
704 cur_offset = extent_end;
705 if (cur_offset > end)
711 btrfs_release_path(root, path);
712 if (cow_start != (u64)-1) {
713 ret = cow_file_range(inode, locked_page, cow_start,
714 found_key.offset - 1, page_started);
719 disk_bytenr += cur_offset - found_key.offset;
720 num_bytes = min(end + 1, extent_end) - cur_offset;
721 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
722 struct extent_map *em;
723 struct extent_map_tree *em_tree;
724 em_tree = &BTRFS_I(inode)->extent_tree;
725 em = alloc_extent_map(GFP_NOFS);
726 em->start = cur_offset;
728 em->block_len = num_bytes;
729 em->block_start = disk_bytenr;
730 em->bdev = root->fs_info->fs_devices->latest_bdev;
731 set_bit(EXTENT_FLAG_PINNED, &em->flags);
733 spin_lock(&em_tree->lock);
734 ret = add_extent_mapping(em_tree, em);
735 spin_unlock(&em_tree->lock);
736 if (ret != -EEXIST) {
740 btrfs_drop_extent_cache(inode, em->start,
741 em->start + em->len - 1, 0);
743 type = BTRFS_ORDERED_PREALLOC;
745 type = BTRFS_ORDERED_NOCOW;
748 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
749 num_bytes, num_bytes, type);
751 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
752 cur_offset, cur_offset + num_bytes - 1,
753 locked_page, 0, 0, 0);
754 cur_offset = extent_end;
755 if (cur_offset > end)
758 btrfs_release_path(root, path);
760 if (cur_offset <= end && cow_start == (u64)-1)
761 cow_start = cur_offset;
762 if (cow_start != (u64)-1) {
763 ret = cow_file_range(inode, locked_page, cow_start, end,
768 ret = btrfs_end_transaction(trans, root);
770 btrfs_free_path(path);
775 * extent_io.c call back to do delayed allocation processing
777 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
778 u64 start, u64 end, int *page_started)
780 struct btrfs_root *root = BTRFS_I(inode)->root;
783 if (btrfs_test_opt(root, NODATACOW) ||
784 btrfs_test_flag(inode, NODATACOW))
785 ret = run_delalloc_nocow(inode, locked_page, start, end,
787 else if (btrfs_test_flag(inode, PREALLOC))
788 ret = run_delalloc_nocow(inode, locked_page, start, end,
791 ret = cow_file_range(inode, locked_page, start, end,
798 * extent_io.c set_bit_hook, used to track delayed allocation
799 * bytes in this file, and to maintain the list of inodes that
800 * have pending delalloc work to be done.
802 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
803 unsigned long old, unsigned long bits)
806 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
807 struct btrfs_root *root = BTRFS_I(inode)->root;
808 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
809 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
810 root->fs_info->delalloc_bytes += end - start + 1;
811 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
812 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
813 &root->fs_info->delalloc_inodes);
815 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
821 * extent_io.c clear_bit_hook, see set_bit_hook for why
823 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
824 unsigned long old, unsigned long bits)
826 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
827 struct btrfs_root *root = BTRFS_I(inode)->root;
830 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
831 if (end - start + 1 > root->fs_info->delalloc_bytes) {
832 printk("warning: delalloc account %Lu %Lu\n",
833 end - start + 1, root->fs_info->delalloc_bytes);
834 root->fs_info->delalloc_bytes = 0;
835 BTRFS_I(inode)->delalloc_bytes = 0;
837 root->fs_info->delalloc_bytes -= end - start + 1;
838 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
840 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
841 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
842 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
844 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
850 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
851 * we don't create bios that span stripes or chunks
853 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
854 size_t size, struct bio *bio,
855 unsigned long bio_flags)
857 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
858 struct btrfs_mapping_tree *map_tree;
859 u64 logical = (u64)bio->bi_sector << 9;
864 length = bio->bi_size;
865 map_tree = &root->fs_info->mapping_tree;
867 ret = btrfs_map_block(map_tree, READ, logical,
868 &map_length, NULL, 0);
870 if (map_length < length + size) {
877 * in order to insert checksums into the metadata in large chunks,
878 * we wait until bio submission time. All the pages in the bio are
879 * checksummed and sums are attached onto the ordered extent record.
881 * At IO completion time the cums attached on the ordered extent record
882 * are inserted into the btree
884 int __btrfs_submit_bio_start(struct inode *inode, int rw, struct bio *bio,
885 int mirror_num, unsigned long bio_flags)
887 struct btrfs_root *root = BTRFS_I(inode)->root;
890 ret = btrfs_csum_one_bio(root, inode, bio);
896 * in order to insert checksums into the metadata in large chunks,
897 * we wait until bio submission time. All the pages in the bio are
898 * checksummed and sums are attached onto the ordered extent record.
900 * At IO completion time the cums attached on the ordered extent record
901 * are inserted into the btree
903 int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
904 int mirror_num, unsigned long bio_flags)
906 struct btrfs_root *root = BTRFS_I(inode)->root;
907 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
911 * extent_io.c submission hook. This does the right thing for csum calculation on write,
912 * or reading the csums from the tree before a read
914 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
915 int mirror_num, unsigned long bio_flags)
917 struct btrfs_root *root = BTRFS_I(inode)->root;
921 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
924 skip_sum = btrfs_test_opt(root, NODATASUM) ||
925 btrfs_test_flag(inode, NODATASUM);
927 if (!(rw & (1 << BIO_RW))) {
929 btrfs_lookup_bio_sums(root, inode, bio);
931 if (bio_flags & EXTENT_BIO_COMPRESSED)
932 return btrfs_submit_compressed_read(inode, bio,
933 mirror_num, bio_flags);
935 } else if (!skip_sum) {
936 /* we're doing a write, do the async checksumming */
937 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
938 inode, rw, bio, mirror_num,
939 bio_flags, __btrfs_submit_bio_start,
940 __btrfs_submit_bio_done);
944 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
948 * given a list of ordered sums record them in the inode. This happens
949 * at IO completion time based on sums calculated at bio submission time.
951 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
952 struct inode *inode, u64 file_offset,
953 struct list_head *list)
955 struct list_head *cur;
956 struct btrfs_ordered_sum *sum;
958 btrfs_set_trans_block_group(trans, inode);
959 list_for_each(cur, list) {
960 sum = list_entry(cur, struct btrfs_ordered_sum, list);
961 btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root,
967 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
969 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
973 /* see btrfs_writepage_start_hook for details on why this is required */
974 struct btrfs_writepage_fixup {
976 struct btrfs_work work;
979 void btrfs_writepage_fixup_worker(struct btrfs_work *work)
981 struct btrfs_writepage_fixup *fixup;
982 struct btrfs_ordered_extent *ordered;
988 fixup = container_of(work, struct btrfs_writepage_fixup, work);
992 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
993 ClearPageChecked(page);
997 inode = page->mapping->host;
998 page_start = page_offset(page);
999 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1001 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1003 /* already ordered? We're done */
1004 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1005 EXTENT_ORDERED, 0)) {
1009 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1011 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1012 page_end, GFP_NOFS);
1014 btrfs_start_ordered_extent(inode, ordered, 1);
1018 btrfs_set_extent_delalloc(inode, page_start, page_end);
1019 ClearPageChecked(page);
1021 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1024 page_cache_release(page);
1028 * There are a few paths in the higher layers of the kernel that directly
1029 * set the page dirty bit without asking the filesystem if it is a
1030 * good idea. This causes problems because we want to make sure COW
1031 * properly happens and the data=ordered rules are followed.
1033 * In our case any range that doesn't have the ORDERED bit set
1034 * hasn't been properly setup for IO. We kick off an async process
1035 * to fix it up. The async helper will wait for ordered extents, set
1036 * the delalloc bit and make it safe to write the page.
1038 int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1040 struct inode *inode = page->mapping->host;
1041 struct btrfs_writepage_fixup *fixup;
1042 struct btrfs_root *root = BTRFS_I(inode)->root;
1045 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1050 if (PageChecked(page))
1053 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1057 SetPageChecked(page);
1058 page_cache_get(page);
1059 fixup->work.func = btrfs_writepage_fixup_worker;
1061 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1065 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1066 struct inode *inode, u64 file_pos,
1067 u64 disk_bytenr, u64 disk_num_bytes,
1068 u64 num_bytes, u64 ram_bytes,
1069 u8 compression, u8 encryption,
1070 u16 other_encoding, int extent_type)
1072 struct btrfs_root *root = BTRFS_I(inode)->root;
1073 struct btrfs_file_extent_item *fi;
1074 struct btrfs_path *path;
1075 struct extent_buffer *leaf;
1076 struct btrfs_key ins;
1080 path = btrfs_alloc_path();
1083 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1084 file_pos + num_bytes, file_pos, &hint);
1087 ins.objectid = inode->i_ino;
1088 ins.offset = file_pos;
1089 ins.type = BTRFS_EXTENT_DATA_KEY;
1090 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1092 leaf = path->nodes[0];
1093 fi = btrfs_item_ptr(leaf, path->slots[0],
1094 struct btrfs_file_extent_item);
1095 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1096 btrfs_set_file_extent_type(leaf, fi, extent_type);
1097 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1098 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1099 btrfs_set_file_extent_offset(leaf, fi, 0);
1100 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1101 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1102 btrfs_set_file_extent_compression(leaf, fi, compression);
1103 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1104 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1105 btrfs_mark_buffer_dirty(leaf);
1107 inode_add_bytes(inode, num_bytes);
1108 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1110 ins.objectid = disk_bytenr;
1111 ins.offset = disk_num_bytes;
1112 ins.type = BTRFS_EXTENT_ITEM_KEY;
1113 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1114 root->root_key.objectid,
1115 trans->transid, inode->i_ino, &ins);
1118 btrfs_free_path(path);
1122 /* as ordered data IO finishes, this gets called so we can finish
1123 * an ordered extent if the range of bytes in the file it covers are
1126 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1128 struct btrfs_root *root = BTRFS_I(inode)->root;
1129 struct btrfs_trans_handle *trans;
1130 struct btrfs_ordered_extent *ordered_extent;
1131 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1135 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1139 trans = btrfs_join_transaction(root, 1);
1141 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1142 BUG_ON(!ordered_extent);
1143 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1146 lock_extent(io_tree, ordered_extent->file_offset,
1147 ordered_extent->file_offset + ordered_extent->len - 1,
1150 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1152 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1154 ret = btrfs_mark_extent_written(trans, root, inode,
1155 ordered_extent->file_offset,
1156 ordered_extent->file_offset +
1157 ordered_extent->len);
1160 ret = insert_reserved_file_extent(trans, inode,
1161 ordered_extent->file_offset,
1162 ordered_extent->start,
1163 ordered_extent->disk_len,
1164 ordered_extent->len,
1165 ordered_extent->len,
1167 BTRFS_FILE_EXTENT_REG);
1170 unlock_extent(io_tree, ordered_extent->file_offset,
1171 ordered_extent->file_offset + ordered_extent->len - 1,
1174 add_pending_csums(trans, inode, ordered_extent->file_offset,
1175 &ordered_extent->list);
1177 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1178 btrfs_ordered_update_i_size(inode, ordered_extent);
1179 btrfs_update_inode(trans, root, inode);
1180 btrfs_remove_ordered_extent(inode, ordered_extent);
1181 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1184 btrfs_put_ordered_extent(ordered_extent);
1185 /* once for the tree */
1186 btrfs_put_ordered_extent(ordered_extent);
1188 btrfs_end_transaction(trans, root);
1192 int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1193 struct extent_state *state, int uptodate)
1195 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1199 * When IO fails, either with EIO or csum verification fails, we
1200 * try other mirrors that might have a good copy of the data. This
1201 * io_failure_record is used to record state as we go through all the
1202 * mirrors. If another mirror has good data, the page is set up to date
1203 * and things continue. If a good mirror can't be found, the original
1204 * bio end_io callback is called to indicate things have failed.
1206 struct io_failure_record {
1214 int btrfs_io_failed_hook(struct bio *failed_bio,
1215 struct page *page, u64 start, u64 end,
1216 struct extent_state *state)
1218 struct io_failure_record *failrec = NULL;
1220 struct extent_map *em;
1221 struct inode *inode = page->mapping->host;
1222 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1223 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1229 unsigned long bio_flags = 0;
1231 ret = get_state_private(failure_tree, start, &private);
1233 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1236 failrec->start = start;
1237 failrec->len = end - start + 1;
1238 failrec->last_mirror = 0;
1240 spin_lock(&em_tree->lock);
1241 em = lookup_extent_mapping(em_tree, start, failrec->len);
1242 if (em->start > start || em->start + em->len < start) {
1243 free_extent_map(em);
1246 spin_unlock(&em_tree->lock);
1248 if (!em || IS_ERR(em)) {
1252 logical = start - em->start;
1253 logical = em->block_start + logical;
1254 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1255 bio_flags = EXTENT_BIO_COMPRESSED;
1256 failrec->logical = logical;
1257 free_extent_map(em);
1258 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1259 EXTENT_DIRTY, GFP_NOFS);
1260 set_state_private(failure_tree, start,
1261 (u64)(unsigned long)failrec);
1263 failrec = (struct io_failure_record *)(unsigned long)private;
1265 num_copies = btrfs_num_copies(
1266 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1267 failrec->logical, failrec->len);
1268 failrec->last_mirror++;
1270 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
1271 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1274 if (state && state->start != failrec->start)
1276 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
1278 if (!state || failrec->last_mirror > num_copies) {
1279 set_state_private(failure_tree, failrec->start, 0);
1280 clear_extent_bits(failure_tree, failrec->start,
1281 failrec->start + failrec->len - 1,
1282 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1286 bio = bio_alloc(GFP_NOFS, 1);
1287 bio->bi_private = state;
1288 bio->bi_end_io = failed_bio->bi_end_io;
1289 bio->bi_sector = failrec->logical >> 9;
1290 bio->bi_bdev = failed_bio->bi_bdev;
1292 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1293 if (failed_bio->bi_rw & (1 << BIO_RW))
1298 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1299 failrec->last_mirror,
1305 * each time an IO finishes, we do a fast check in the IO failure tree
1306 * to see if we need to process or clean up an io_failure_record
1308 int btrfs_clean_io_failures(struct inode *inode, u64 start)
1311 u64 private_failure;
1312 struct io_failure_record *failure;
1316 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1317 (u64)-1, 1, EXTENT_DIRTY)) {
1318 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1319 start, &private_failure);
1321 failure = (struct io_failure_record *)(unsigned long)
1323 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1325 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1327 failure->start + failure->len - 1,
1328 EXTENT_DIRTY | EXTENT_LOCKED,
1337 * when reads are done, we need to check csums to verify the data is correct
1338 * if there's a match, we allow the bio to finish. If not, we go through
1339 * the io_failure_record routines to find good copies
1341 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1342 struct extent_state *state)
1344 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1345 struct inode *inode = page->mapping->host;
1346 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1348 u64 private = ~(u32)0;
1350 struct btrfs_root *root = BTRFS_I(inode)->root;
1352 unsigned long flags;
1354 if (btrfs_test_opt(root, NODATASUM) ||
1355 btrfs_test_flag(inode, NODATASUM))
1357 if (state && state->start == start) {
1358 private = state->private;
1361 ret = get_state_private(io_tree, start, &private);
1363 local_irq_save(flags);
1364 kaddr = kmap_atomic(page, KM_IRQ0);
1368 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1369 btrfs_csum_final(csum, (char *)&csum);
1370 if (csum != private) {
1373 kunmap_atomic(kaddr, KM_IRQ0);
1374 local_irq_restore(flags);
1376 /* if the io failure tree for this inode is non-empty,
1377 * check to see if we've recovered from a failed IO
1379 btrfs_clean_io_failures(inode, start);
1383 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
1384 page->mapping->host->i_ino, (unsigned long long)start, csum,
1386 memset(kaddr + offset, 1, end - start + 1);
1387 flush_dcache_page(page);
1388 kunmap_atomic(kaddr, KM_IRQ0);
1389 local_irq_restore(flags);
1396 * This creates an orphan entry for the given inode in case something goes
1397 * wrong in the middle of an unlink/truncate.
1399 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1401 struct btrfs_root *root = BTRFS_I(inode)->root;
1404 spin_lock(&root->list_lock);
1406 /* already on the orphan list, we're good */
1407 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1408 spin_unlock(&root->list_lock);
1412 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1414 spin_unlock(&root->list_lock);
1417 * insert an orphan item to track this unlinked/truncated file
1419 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1425 * We have done the truncate/delete so we can go ahead and remove the orphan
1426 * item for this particular inode.
1428 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1430 struct btrfs_root *root = BTRFS_I(inode)->root;
1433 spin_lock(&root->list_lock);
1435 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1436 spin_unlock(&root->list_lock);
1440 list_del_init(&BTRFS_I(inode)->i_orphan);
1442 spin_unlock(&root->list_lock);
1446 spin_unlock(&root->list_lock);
1448 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1454 * this cleans up any orphans that may be left on the list from the last use
1457 void btrfs_orphan_cleanup(struct btrfs_root *root)
1459 struct btrfs_path *path;
1460 struct extent_buffer *leaf;
1461 struct btrfs_item *item;
1462 struct btrfs_key key, found_key;
1463 struct btrfs_trans_handle *trans;
1464 struct inode *inode;
1465 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1467 /* don't do orphan cleanup if the fs is readonly. */
1468 if (root->fs_info->sb->s_flags & MS_RDONLY)
1471 path = btrfs_alloc_path();
1476 key.objectid = BTRFS_ORPHAN_OBJECTID;
1477 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1478 key.offset = (u64)-1;
1482 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1484 printk(KERN_ERR "Error searching slot for orphan: %d"
1490 * if ret == 0 means we found what we were searching for, which
1491 * is weird, but possible, so only screw with path if we didnt
1492 * find the key and see if we have stuff that matches
1495 if (path->slots[0] == 0)
1500 /* pull out the item */
1501 leaf = path->nodes[0];
1502 item = btrfs_item_nr(leaf, path->slots[0]);
1503 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1505 /* make sure the item matches what we want */
1506 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1508 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1511 /* release the path since we're done with it */
1512 btrfs_release_path(root, path);
1515 * this is where we are basically btrfs_lookup, without the
1516 * crossing root thing. we store the inode number in the
1517 * offset of the orphan item.
1519 inode = btrfs_iget_locked(root->fs_info->sb,
1520 found_key.offset, root);
1524 if (inode->i_state & I_NEW) {
1525 BTRFS_I(inode)->root = root;
1527 /* have to set the location manually */
1528 BTRFS_I(inode)->location.objectid = inode->i_ino;
1529 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1530 BTRFS_I(inode)->location.offset = 0;
1532 btrfs_read_locked_inode(inode);
1533 unlock_new_inode(inode);
1537 * add this inode to the orphan list so btrfs_orphan_del does
1538 * the proper thing when we hit it
1540 spin_lock(&root->list_lock);
1541 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1542 spin_unlock(&root->list_lock);
1545 * if this is a bad inode, means we actually succeeded in
1546 * removing the inode, but not the orphan record, which means
1547 * we need to manually delete the orphan since iput will just
1548 * do a destroy_inode
1550 if (is_bad_inode(inode)) {
1551 trans = btrfs_start_transaction(root, 1);
1552 btrfs_orphan_del(trans, inode);
1553 btrfs_end_transaction(trans, root);
1558 /* if we have links, this was a truncate, lets do that */
1559 if (inode->i_nlink) {
1561 btrfs_truncate(inode);
1566 /* this will do delete_inode and everything for us */
1571 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1573 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1575 btrfs_free_path(path);
1579 * read an inode from the btree into the in-memory inode
1581 void btrfs_read_locked_inode(struct inode *inode)
1583 struct btrfs_path *path;
1584 struct extent_buffer *leaf;
1585 struct btrfs_inode_item *inode_item;
1586 struct btrfs_timespec *tspec;
1587 struct btrfs_root *root = BTRFS_I(inode)->root;
1588 struct btrfs_key location;
1589 u64 alloc_group_block;
1593 path = btrfs_alloc_path();
1595 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1597 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1601 leaf = path->nodes[0];
1602 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1603 struct btrfs_inode_item);
1605 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
1606 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
1607 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
1608 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
1609 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
1611 tspec = btrfs_inode_atime(inode_item);
1612 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1613 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1615 tspec = btrfs_inode_mtime(inode_item);
1616 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1617 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1619 tspec = btrfs_inode_ctime(inode_item);
1620 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1621 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1623 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
1624 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
1625 inode->i_generation = BTRFS_I(inode)->generation;
1627 rdev = btrfs_inode_rdev(leaf, inode_item);
1629 BTRFS_I(inode)->index_cnt = (u64)-1;
1631 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
1632 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
1634 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
1635 if (!BTRFS_I(inode)->block_group) {
1636 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
1638 BTRFS_BLOCK_GROUP_METADATA, 0);
1640 btrfs_free_path(path);
1643 switch (inode->i_mode & S_IFMT) {
1645 inode->i_mapping->a_ops = &btrfs_aops;
1646 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1647 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1648 inode->i_fop = &btrfs_file_operations;
1649 inode->i_op = &btrfs_file_inode_operations;
1652 inode->i_fop = &btrfs_dir_file_operations;
1653 if (root == root->fs_info->tree_root)
1654 inode->i_op = &btrfs_dir_ro_inode_operations;
1656 inode->i_op = &btrfs_dir_inode_operations;
1659 inode->i_op = &btrfs_symlink_inode_operations;
1660 inode->i_mapping->a_ops = &btrfs_symlink_aops;
1661 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1664 init_special_inode(inode, inode->i_mode, rdev);
1670 btrfs_free_path(path);
1671 make_bad_inode(inode);
1675 * given a leaf and an inode, copy the inode fields into the leaf
1677 static void fill_inode_item(struct btrfs_trans_handle *trans,
1678 struct extent_buffer *leaf,
1679 struct btrfs_inode_item *item,
1680 struct inode *inode)
1682 btrfs_set_inode_uid(leaf, item, inode->i_uid);
1683 btrfs_set_inode_gid(leaf, item, inode->i_gid);
1684 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
1685 btrfs_set_inode_mode(leaf, item, inode->i_mode);
1686 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
1688 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
1689 inode->i_atime.tv_sec);
1690 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
1691 inode->i_atime.tv_nsec);
1693 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
1694 inode->i_mtime.tv_sec);
1695 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
1696 inode->i_mtime.tv_nsec);
1698 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
1699 inode->i_ctime.tv_sec);
1700 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
1701 inode->i_ctime.tv_nsec);
1703 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
1704 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
1705 btrfs_set_inode_transid(leaf, item, trans->transid);
1706 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
1707 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
1708 btrfs_set_inode_block_group(leaf, item,
1709 BTRFS_I(inode)->block_group->key.objectid);
1713 * copy everything in the in-memory inode into the btree.
1715 int noinline btrfs_update_inode(struct btrfs_trans_handle *trans,
1716 struct btrfs_root *root,
1717 struct inode *inode)
1719 struct btrfs_inode_item *inode_item;
1720 struct btrfs_path *path;
1721 struct extent_buffer *leaf;
1724 path = btrfs_alloc_path();
1726 ret = btrfs_lookup_inode(trans, root, path,
1727 &BTRFS_I(inode)->location, 1);
1734 leaf = path->nodes[0];
1735 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1736 struct btrfs_inode_item);
1738 fill_inode_item(trans, leaf, inode_item, inode);
1739 btrfs_mark_buffer_dirty(leaf);
1740 btrfs_set_inode_last_trans(trans, inode);
1743 btrfs_free_path(path);
1749 * unlink helper that gets used here in inode.c and in the tree logging
1750 * recovery code. It remove a link in a directory with a given name, and
1751 * also drops the back refs in the inode to the directory
1753 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
1754 struct btrfs_root *root,
1755 struct inode *dir, struct inode *inode,
1756 const char *name, int name_len)
1758 struct btrfs_path *path;
1760 struct extent_buffer *leaf;
1761 struct btrfs_dir_item *di;
1762 struct btrfs_key key;
1765 path = btrfs_alloc_path();
1771 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
1772 name, name_len, -1);
1781 leaf = path->nodes[0];
1782 btrfs_dir_item_key_to_cpu(leaf, di, &key);
1783 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1786 btrfs_release_path(root, path);
1788 ret = btrfs_del_inode_ref(trans, root, name, name_len,
1790 dir->i_ino, &index);
1792 printk("failed to delete reference to %.*s, "
1793 "inode %lu parent %lu\n", name_len, name,
1794 inode->i_ino, dir->i_ino);
1798 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
1799 index, name, name_len, -1);
1808 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1809 btrfs_release_path(root, path);
1811 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
1813 BUG_ON(ret != 0 && ret != -ENOENT);
1815 BTRFS_I(dir)->log_dirty_trans = trans->transid;
1817 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
1821 btrfs_free_path(path);
1825 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
1826 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
1827 btrfs_update_inode(trans, root, dir);
1828 btrfs_drop_nlink(inode);
1829 ret = btrfs_update_inode(trans, root, inode);
1830 dir->i_sb->s_dirt = 1;
1835 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
1837 struct btrfs_root *root;
1838 struct btrfs_trans_handle *trans;
1839 struct inode *inode = dentry->d_inode;
1841 unsigned long nr = 0;
1843 root = BTRFS_I(dir)->root;
1845 ret = btrfs_check_free_space(root, 1, 1);
1849 trans = btrfs_start_transaction(root, 1);
1851 btrfs_set_trans_block_group(trans, dir);
1852 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1853 dentry->d_name.name, dentry->d_name.len);
1855 if (inode->i_nlink == 0)
1856 ret = btrfs_orphan_add(trans, inode);
1858 nr = trans->blocks_used;
1860 btrfs_end_transaction_throttle(trans, root);
1862 btrfs_btree_balance_dirty(root, nr);
1866 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
1868 struct inode *inode = dentry->d_inode;
1871 struct btrfs_root *root = BTRFS_I(dir)->root;
1872 struct btrfs_trans_handle *trans;
1873 unsigned long nr = 0;
1875 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
1879 ret = btrfs_check_free_space(root, 1, 1);
1883 trans = btrfs_start_transaction(root, 1);
1884 btrfs_set_trans_block_group(trans, dir);
1886 err = btrfs_orphan_add(trans, inode);
1890 /* now the directory is empty */
1891 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1892 dentry->d_name.name, dentry->d_name.len);
1894 btrfs_i_size_write(inode, 0);
1898 nr = trans->blocks_used;
1899 ret = btrfs_end_transaction_throttle(trans, root);
1901 btrfs_btree_balance_dirty(root, nr);
1909 * when truncating bytes in a file, it is possible to avoid reading
1910 * the leaves that contain only checksum items. This can be the
1911 * majority of the IO required to delete a large file, but it must
1912 * be done carefully.
1914 * The keys in the level just above the leaves are checked to make sure
1915 * the lowest key in a given leaf is a csum key, and starts at an offset
1916 * after the new size.
1918 * Then the key for the next leaf is checked to make sure it also has
1919 * a checksum item for the same file. If it does, we know our target leaf
1920 * contains only checksum items, and it can be safely freed without reading
1923 * This is just an optimization targeted at large files. It may do
1924 * nothing. It will return 0 unless things went badly.
1926 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
1927 struct btrfs_root *root,
1928 struct btrfs_path *path,
1929 struct inode *inode, u64 new_size)
1931 struct btrfs_key key;
1934 struct btrfs_key found_key;
1935 struct btrfs_key other_key;
1936 struct btrfs_leaf_ref *ref;
1940 path->lowest_level = 1;
1941 key.objectid = inode->i_ino;
1942 key.type = BTRFS_CSUM_ITEM_KEY;
1943 key.offset = new_size;
1945 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1949 if (path->nodes[1] == NULL) {
1954 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
1955 nritems = btrfs_header_nritems(path->nodes[1]);
1960 if (path->slots[1] >= nritems)
1963 /* did we find a key greater than anything we want to delete? */
1964 if (found_key.objectid > inode->i_ino ||
1965 (found_key.objectid == inode->i_ino && found_key.type > key.type))
1968 /* we check the next key in the node to make sure the leave contains
1969 * only checksum items. This comparison doesn't work if our
1970 * leaf is the last one in the node
1972 if (path->slots[1] + 1 >= nritems) {
1974 /* search forward from the last key in the node, this
1975 * will bring us into the next node in the tree
1977 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
1979 /* unlikely, but we inc below, so check to be safe */
1980 if (found_key.offset == (u64)-1)
1983 /* search_forward needs a path with locks held, do the
1984 * search again for the original key. It is possible
1985 * this will race with a balance and return a path that
1986 * we could modify, but this drop is just an optimization
1987 * and is allowed to miss some leaves.
1989 btrfs_release_path(root, path);
1992 /* setup a max key for search_forward */
1993 other_key.offset = (u64)-1;
1994 other_key.type = key.type;
1995 other_key.objectid = key.objectid;
1997 path->keep_locks = 1;
1998 ret = btrfs_search_forward(root, &found_key, &other_key,
2000 path->keep_locks = 0;
2001 if (ret || found_key.objectid != key.objectid ||
2002 found_key.type != key.type) {
2007 key.offset = found_key.offset;
2008 btrfs_release_path(root, path);
2013 /* we know there's one more slot after us in the tree,
2014 * read that key so we can verify it is also a checksum item
2016 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2018 if (found_key.objectid < inode->i_ino)
2021 if (found_key.type != key.type || found_key.offset < new_size)
2025 * if the key for the next leaf isn't a csum key from this objectid,
2026 * we can't be sure there aren't good items inside this leaf.
2029 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2032 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2033 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2035 * it is safe to delete this leaf, it contains only
2036 * csum items from this inode at an offset >= new_size
2038 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2041 if (root->ref_cows && leaf_gen < trans->transid) {
2042 ref = btrfs_alloc_leaf_ref(root, 0);
2044 ref->root_gen = root->root_key.offset;
2045 ref->bytenr = leaf_start;
2047 ref->generation = leaf_gen;
2050 ret = btrfs_add_leaf_ref(root, ref, 0);
2052 btrfs_free_leaf_ref(root, ref);
2058 btrfs_release_path(root, path);
2060 if (other_key.objectid == inode->i_ino &&
2061 other_key.type == key.type && other_key.offset > key.offset) {
2062 key.offset = other_key.offset;
2068 /* fixup any changes we've made to the path */
2069 path->lowest_level = 0;
2070 path->keep_locks = 0;
2071 btrfs_release_path(root, path);
2076 * this can truncate away extent items, csum items and directory items.
2077 * It starts at a high offset and removes keys until it can't find
2078 * any higher than new_size
2080 * csum items that cross the new i_size are truncated to the new size
2083 * min_type is the minimum key type to truncate down to. If set to 0, this
2084 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2086 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2087 struct btrfs_root *root,
2088 struct inode *inode,
2089 u64 new_size, u32 min_type)
2092 struct btrfs_path *path;
2093 struct btrfs_key key;
2094 struct btrfs_key found_key;
2096 struct extent_buffer *leaf;
2097 struct btrfs_file_extent_item *fi;
2098 u64 extent_start = 0;
2099 u64 extent_num_bytes = 0;
2105 int pending_del_nr = 0;
2106 int pending_del_slot = 0;
2107 int extent_type = -1;
2108 u64 mask = root->sectorsize - 1;
2111 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2112 path = btrfs_alloc_path();
2116 /* FIXME, add redo link to tree so we don't leak on crash */
2117 key.objectid = inode->i_ino;
2118 key.offset = (u64)-1;
2121 btrfs_init_path(path);
2123 ret = drop_csum_leaves(trans, root, path, inode, new_size);
2127 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2132 /* there are no items in the tree for us to truncate, we're
2135 if (path->slots[0] == 0) {
2144 leaf = path->nodes[0];
2145 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2146 found_type = btrfs_key_type(&found_key);
2148 if (found_key.objectid != inode->i_ino)
2151 if (found_type < min_type)
2154 item_end = found_key.offset;
2155 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2156 fi = btrfs_item_ptr(leaf, path->slots[0],
2157 struct btrfs_file_extent_item);
2158 extent_type = btrfs_file_extent_type(leaf, fi);
2159 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2161 btrfs_file_extent_num_bytes(leaf, fi);
2162 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2163 item_end += btrfs_file_extent_inline_len(leaf,
2168 if (found_type == BTRFS_CSUM_ITEM_KEY) {
2169 ret = btrfs_csum_truncate(trans, root, path,
2173 if (item_end < new_size) {
2174 if (found_type == BTRFS_DIR_ITEM_KEY) {
2175 found_type = BTRFS_INODE_ITEM_KEY;
2176 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
2177 found_type = BTRFS_CSUM_ITEM_KEY;
2178 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
2179 found_type = BTRFS_XATTR_ITEM_KEY;
2180 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
2181 found_type = BTRFS_INODE_REF_KEY;
2182 } else if (found_type) {
2187 btrfs_set_key_type(&key, found_type);
2190 if (found_key.offset >= new_size)
2196 /* FIXME, shrink the extent if the ref count is only 1 */
2197 if (found_type != BTRFS_EXTENT_DATA_KEY)
2200 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2202 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2204 u64 orig_num_bytes =
2205 btrfs_file_extent_num_bytes(leaf, fi);
2206 extent_num_bytes = new_size -
2207 found_key.offset + root->sectorsize - 1;
2208 extent_num_bytes = extent_num_bytes &
2209 ~((u64)root->sectorsize - 1);
2210 btrfs_set_file_extent_num_bytes(leaf, fi,
2212 num_dec = (orig_num_bytes -
2214 if (root->ref_cows && extent_start != 0)
2215 inode_sub_bytes(inode, num_dec);
2216 btrfs_mark_buffer_dirty(leaf);
2219 btrfs_file_extent_disk_num_bytes(leaf,
2221 /* FIXME blocksize != 4096 */
2222 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2223 if (extent_start != 0) {
2226 inode_sub_bytes(inode, num_dec);
2228 root_gen = btrfs_header_generation(leaf);
2229 root_owner = btrfs_header_owner(leaf);
2231 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2233 * we can't truncate inline items that have had
2237 btrfs_file_extent_compression(leaf, fi) == 0 &&
2238 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2239 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2240 u32 size = new_size - found_key.offset;
2242 if (root->ref_cows) {
2243 inode_sub_bytes(inode, item_end + 1 -
2247 btrfs_file_extent_calc_inline_size(size);
2248 ret = btrfs_truncate_item(trans, root, path,
2251 } else if (root->ref_cows) {
2252 inode_sub_bytes(inode, item_end + 1 -
2258 if (!pending_del_nr) {
2259 /* no pending yet, add ourselves */
2260 pending_del_slot = path->slots[0];
2262 } else if (pending_del_nr &&
2263 path->slots[0] + 1 == pending_del_slot) {
2264 /* hop on the pending chunk */
2266 pending_del_slot = path->slots[0];
2268 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
2274 ret = btrfs_free_extent(trans, root, extent_start,
2276 leaf->start, root_owner,
2277 root_gen, inode->i_ino, 0);
2281 if (path->slots[0] == 0) {
2284 btrfs_release_path(root, path);
2289 if (pending_del_nr &&
2290 path->slots[0] + 1 != pending_del_slot) {
2291 struct btrfs_key debug;
2293 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2295 ret = btrfs_del_items(trans, root, path,
2300 btrfs_release_path(root, path);
2306 if (pending_del_nr) {
2307 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2310 btrfs_free_path(path);
2311 inode->i_sb->s_dirt = 1;
2316 * taken from block_truncate_page, but does cow as it zeros out
2317 * any bytes left in the last page in the file.
2319 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2321 struct inode *inode = mapping->host;
2322 struct btrfs_root *root = BTRFS_I(inode)->root;
2323 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2324 struct btrfs_ordered_extent *ordered;
2326 u32 blocksize = root->sectorsize;
2327 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2328 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2334 if ((offset & (blocksize - 1)) == 0)
2339 page = grab_cache_page(mapping, index);
2343 page_start = page_offset(page);
2344 page_end = page_start + PAGE_CACHE_SIZE - 1;
2346 if (!PageUptodate(page)) {
2347 ret = btrfs_readpage(NULL, page);
2349 if (page->mapping != mapping) {
2351 page_cache_release(page);
2354 if (!PageUptodate(page)) {
2359 wait_on_page_writeback(page);
2361 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2362 set_page_extent_mapped(page);
2364 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2366 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2368 page_cache_release(page);
2369 btrfs_start_ordered_extent(inode, ordered, 1);
2370 btrfs_put_ordered_extent(ordered);
2374 btrfs_set_extent_delalloc(inode, page_start, page_end);
2376 if (offset != PAGE_CACHE_SIZE) {
2378 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2379 flush_dcache_page(page);
2382 ClearPageChecked(page);
2383 set_page_dirty(page);
2384 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2388 page_cache_release(page);
2393 int btrfs_cont_expand(struct inode *inode, loff_t size)
2395 struct btrfs_trans_handle *trans;
2396 struct btrfs_root *root = BTRFS_I(inode)->root;
2397 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2398 struct extent_map *em;
2399 u64 mask = root->sectorsize - 1;
2400 u64 hole_start = (inode->i_size + mask) & ~mask;
2401 u64 block_end = (size + mask) & ~mask;
2407 if (size <= hole_start)
2410 err = btrfs_check_free_space(root, 1, 0);
2414 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2417 struct btrfs_ordered_extent *ordered;
2418 btrfs_wait_ordered_range(inode, hole_start,
2419 block_end - hole_start);
2420 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2421 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2424 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2425 btrfs_put_ordered_extent(ordered);
2428 trans = btrfs_start_transaction(root, 1);
2429 btrfs_set_trans_block_group(trans, inode);
2431 cur_offset = hole_start;
2433 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2434 block_end - cur_offset, 0);
2435 BUG_ON(IS_ERR(em) || !em);
2436 last_byte = min(extent_map_end(em), block_end);
2437 last_byte = (last_byte + mask) & ~mask;
2438 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2439 hole_size = last_byte - cur_offset;
2440 err = btrfs_insert_file_extent(trans, root,
2441 inode->i_ino, cur_offset, 0,
2442 0, hole_size, 0, hole_size,
2444 btrfs_drop_extent_cache(inode, hole_start,
2447 free_extent_map(em);
2448 cur_offset = last_byte;
2449 if (err || cur_offset >= block_end)
2453 btrfs_end_transaction(trans, root);
2454 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2458 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2460 struct inode *inode = dentry->d_inode;
2463 err = inode_change_ok(inode, attr);
2467 if (S_ISREG(inode->i_mode) &&
2468 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2469 err = btrfs_cont_expand(inode, attr->ia_size);
2474 err = inode_setattr(inode, attr);
2476 if (!err && ((attr->ia_valid & ATTR_MODE)))
2477 err = btrfs_acl_chmod(inode);
2481 void btrfs_delete_inode(struct inode *inode)
2483 struct btrfs_trans_handle *trans;
2484 struct btrfs_root *root = BTRFS_I(inode)->root;
2488 truncate_inode_pages(&inode->i_data, 0);
2489 if (is_bad_inode(inode)) {
2490 btrfs_orphan_del(NULL, inode);
2493 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2495 btrfs_i_size_write(inode, 0);
2496 trans = btrfs_start_transaction(root, 1);
2498 btrfs_set_trans_block_group(trans, inode);
2499 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2501 btrfs_orphan_del(NULL, inode);
2502 goto no_delete_lock;
2505 btrfs_orphan_del(trans, inode);
2507 nr = trans->blocks_used;
2510 btrfs_end_transaction(trans, root);
2511 btrfs_btree_balance_dirty(root, nr);
2515 nr = trans->blocks_used;
2516 btrfs_end_transaction(trans, root);
2517 btrfs_btree_balance_dirty(root, nr);
2523 * this returns the key found in the dir entry in the location pointer.
2524 * If no dir entries were found, location->objectid is 0.
2526 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2527 struct btrfs_key *location)
2529 const char *name = dentry->d_name.name;
2530 int namelen = dentry->d_name.len;
2531 struct btrfs_dir_item *di;
2532 struct btrfs_path *path;
2533 struct btrfs_root *root = BTRFS_I(dir)->root;
2536 path = btrfs_alloc_path();
2539 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2543 if (!di || IS_ERR(di)) {
2546 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2548 btrfs_free_path(path);
2551 location->objectid = 0;
2556 * when we hit a tree root in a directory, the btrfs part of the inode
2557 * needs to be changed to reflect the root directory of the tree root. This
2558 * is kind of like crossing a mount point.
2560 static int fixup_tree_root_location(struct btrfs_root *root,
2561 struct btrfs_key *location,
2562 struct btrfs_root **sub_root,
2563 struct dentry *dentry)
2565 struct btrfs_root_item *ri;
2567 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2569 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2572 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2573 dentry->d_name.name,
2574 dentry->d_name.len);
2575 if (IS_ERR(*sub_root))
2576 return PTR_ERR(*sub_root);
2578 ri = &(*sub_root)->root_item;
2579 location->objectid = btrfs_root_dirid(ri);
2580 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2581 location->offset = 0;
2586 static noinline void init_btrfs_i(struct inode *inode)
2588 struct btrfs_inode *bi = BTRFS_I(inode);
2591 bi->i_default_acl = NULL;
2595 bi->logged_trans = 0;
2596 bi->delalloc_bytes = 0;
2597 bi->disk_i_size = 0;
2599 bi->index_cnt = (u64)-1;
2600 bi->log_dirty_trans = 0;
2601 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2602 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2603 inode->i_mapping, GFP_NOFS);
2604 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2605 inode->i_mapping, GFP_NOFS);
2606 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
2607 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
2608 mutex_init(&BTRFS_I(inode)->csum_mutex);
2609 mutex_init(&BTRFS_I(inode)->extent_mutex);
2610 mutex_init(&BTRFS_I(inode)->log_mutex);
2613 static int btrfs_init_locked_inode(struct inode *inode, void *p)
2615 struct btrfs_iget_args *args = p;
2616 inode->i_ino = args->ino;
2617 init_btrfs_i(inode);
2618 BTRFS_I(inode)->root = args->root;
2622 static int btrfs_find_actor(struct inode *inode, void *opaque)
2624 struct btrfs_iget_args *args = opaque;
2625 return (args->ino == inode->i_ino &&
2626 args->root == BTRFS_I(inode)->root);
2629 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
2630 struct btrfs_root *root, int wait)
2632 struct inode *inode;
2633 struct btrfs_iget_args args;
2634 args.ino = objectid;
2638 inode = ilookup5(s, objectid, btrfs_find_actor,
2641 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
2647 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
2648 struct btrfs_root *root)
2650 struct inode *inode;
2651 struct btrfs_iget_args args;
2652 args.ino = objectid;
2655 inode = iget5_locked(s, objectid, btrfs_find_actor,
2656 btrfs_init_locked_inode,
2661 /* Get an inode object given its location and corresponding root.
2662 * Returns in *is_new if the inode was read from disk
2664 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
2665 struct btrfs_root *root, int *is_new)
2667 struct inode *inode;
2669 inode = btrfs_iget_locked(s, location->objectid, root);
2671 return ERR_PTR(-EACCES);
2673 if (inode->i_state & I_NEW) {
2674 BTRFS_I(inode)->root = root;
2675 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
2676 btrfs_read_locked_inode(inode);
2677 unlock_new_inode(inode);
2688 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
2689 struct nameidata *nd)
2691 struct inode * inode;
2692 struct btrfs_inode *bi = BTRFS_I(dir);
2693 struct btrfs_root *root = bi->root;
2694 struct btrfs_root *sub_root = root;
2695 struct btrfs_key location;
2696 int ret, new, do_orphan = 0;
2698 if (dentry->d_name.len > BTRFS_NAME_LEN)
2699 return ERR_PTR(-ENAMETOOLONG);
2701 ret = btrfs_inode_by_name(dir, dentry, &location);
2704 return ERR_PTR(ret);
2707 if (location.objectid) {
2708 ret = fixup_tree_root_location(root, &location, &sub_root,
2711 return ERR_PTR(ret);
2713 return ERR_PTR(-ENOENT);
2714 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
2716 return ERR_CAST(inode);
2718 /* the inode and parent dir are two different roots */
2719 if (new && root != sub_root) {
2721 sub_root->inode = inode;
2726 if (unlikely(do_orphan))
2727 btrfs_orphan_cleanup(sub_root);
2729 return d_splice_alias(inode, dentry);
2732 static unsigned char btrfs_filetype_table[] = {
2733 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
2736 static int btrfs_real_readdir(struct file *filp, void *dirent,
2739 struct inode *inode = filp->f_dentry->d_inode;
2740 struct btrfs_root *root = BTRFS_I(inode)->root;
2741 struct btrfs_item *item;
2742 struct btrfs_dir_item *di;
2743 struct btrfs_key key;
2744 struct btrfs_key found_key;
2745 struct btrfs_path *path;
2748 struct extent_buffer *leaf;
2751 unsigned char d_type;
2756 int key_type = BTRFS_DIR_INDEX_KEY;
2761 /* FIXME, use a real flag for deciding about the key type */
2762 if (root->fs_info->tree_root == root)
2763 key_type = BTRFS_DIR_ITEM_KEY;
2765 /* special case for "." */
2766 if (filp->f_pos == 0) {
2767 over = filldir(dirent, ".", 1,
2774 /* special case for .., just use the back ref */
2775 if (filp->f_pos == 1) {
2776 u64 pino = parent_ino(filp->f_path.dentry);
2777 over = filldir(dirent, "..", 2,
2784 path = btrfs_alloc_path();
2787 btrfs_set_key_type(&key, key_type);
2788 key.offset = filp->f_pos;
2789 key.objectid = inode->i_ino;
2791 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2797 leaf = path->nodes[0];
2798 nritems = btrfs_header_nritems(leaf);
2799 slot = path->slots[0];
2800 if (advance || slot >= nritems) {
2801 if (slot >= nritems - 1) {
2802 ret = btrfs_next_leaf(root, path);
2805 leaf = path->nodes[0];
2806 nritems = btrfs_header_nritems(leaf);
2807 slot = path->slots[0];
2814 item = btrfs_item_nr(leaf, slot);
2815 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2817 if (found_key.objectid != key.objectid)
2819 if (btrfs_key_type(&found_key) != key_type)
2821 if (found_key.offset < filp->f_pos)
2824 filp->f_pos = found_key.offset;
2826 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
2828 di_total = btrfs_item_size(leaf, item);
2830 while (di_cur < di_total) {
2831 struct btrfs_key location;
2833 name_len = btrfs_dir_name_len(leaf, di);
2834 if (name_len <= sizeof(tmp_name)) {
2835 name_ptr = tmp_name;
2837 name_ptr = kmalloc(name_len, GFP_NOFS);
2843 read_extent_buffer(leaf, name_ptr,
2844 (unsigned long)(di + 1), name_len);
2846 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
2847 btrfs_dir_item_key_to_cpu(leaf, di, &location);
2848 over = filldir(dirent, name_ptr, name_len,
2849 found_key.offset, location.objectid,
2852 if (name_ptr != tmp_name)
2858 di_len = btrfs_dir_name_len(leaf, di) +
2859 btrfs_dir_data_len(leaf, di) + sizeof(*di);
2861 di = (struct btrfs_dir_item *)((char *)di + di_len);
2865 /* Reached end of directory/root. Bump pos past the last item. */
2866 if (key_type == BTRFS_DIR_INDEX_KEY)
2867 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
2873 btrfs_free_path(path);
2877 int btrfs_write_inode(struct inode *inode, int wait)
2879 struct btrfs_root *root = BTRFS_I(inode)->root;
2880 struct btrfs_trans_handle *trans;
2883 if (root->fs_info->closing > 1)
2887 trans = btrfs_join_transaction(root, 1);
2888 btrfs_set_trans_block_group(trans, inode);
2889 ret = btrfs_commit_transaction(trans, root);
2895 * This is somewhat expensive, updating the tree every time the
2896 * inode changes. But, it is most likely to find the inode in cache.
2897 * FIXME, needs more benchmarking...there are no reasons other than performance
2898 * to keep or drop this code.
2900 void btrfs_dirty_inode(struct inode *inode)
2902 struct btrfs_root *root = BTRFS_I(inode)->root;
2903 struct btrfs_trans_handle *trans;
2905 trans = btrfs_join_transaction(root, 1);
2906 btrfs_set_trans_block_group(trans, inode);
2907 btrfs_update_inode(trans, root, inode);
2908 btrfs_end_transaction(trans, root);
2912 * find the highest existing sequence number in a directory
2913 * and then set the in-memory index_cnt variable to reflect
2914 * free sequence numbers
2916 static int btrfs_set_inode_index_count(struct inode *inode)
2918 struct btrfs_root *root = BTRFS_I(inode)->root;
2919 struct btrfs_key key, found_key;
2920 struct btrfs_path *path;
2921 struct extent_buffer *leaf;
2924 key.objectid = inode->i_ino;
2925 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
2926 key.offset = (u64)-1;
2928 path = btrfs_alloc_path();
2932 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2935 /* FIXME: we should be able to handle this */
2941 * MAGIC NUMBER EXPLANATION:
2942 * since we search a directory based on f_pos we have to start at 2
2943 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
2944 * else has to start at 2
2946 if (path->slots[0] == 0) {
2947 BTRFS_I(inode)->index_cnt = 2;
2953 leaf = path->nodes[0];
2954 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2956 if (found_key.objectid != inode->i_ino ||
2957 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
2958 BTRFS_I(inode)->index_cnt = 2;
2962 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
2964 btrfs_free_path(path);
2969 * helper to find a free sequence number in a given directory. This current
2970 * code is very simple, later versions will do smarter things in the btree
2972 static int btrfs_set_inode_index(struct inode *dir, struct inode *inode,
2977 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
2978 ret = btrfs_set_inode_index_count(dir);
2984 *index = BTRFS_I(dir)->index_cnt;
2985 BTRFS_I(dir)->index_cnt++;
2990 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
2991 struct btrfs_root *root,
2993 const char *name, int name_len,
2996 struct btrfs_block_group_cache *group,
2997 int mode, u64 *index)
2999 struct inode *inode;
3000 struct btrfs_inode_item *inode_item;
3001 struct btrfs_block_group_cache *new_inode_group;
3002 struct btrfs_key *location;
3003 struct btrfs_path *path;
3004 struct btrfs_inode_ref *ref;
3005 struct btrfs_key key[2];
3011 path = btrfs_alloc_path();
3014 inode = new_inode(root->fs_info->sb);
3016 return ERR_PTR(-ENOMEM);
3019 ret = btrfs_set_inode_index(dir, inode, index);
3021 return ERR_PTR(ret);
3024 * index_cnt is ignored for everything but a dir,
3025 * btrfs_get_inode_index_count has an explanation for the magic
3028 init_btrfs_i(inode);
3029 BTRFS_I(inode)->index_cnt = 2;
3030 BTRFS_I(inode)->root = root;
3031 BTRFS_I(inode)->generation = trans->transid;
3037 new_inode_group = btrfs_find_block_group(root, group, 0,
3038 BTRFS_BLOCK_GROUP_METADATA, owner);
3039 if (!new_inode_group) {
3040 printk("find_block group failed\n");
3041 new_inode_group = group;
3043 BTRFS_I(inode)->block_group = new_inode_group;
3045 key[0].objectid = objectid;
3046 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3049 key[1].objectid = objectid;
3050 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3051 key[1].offset = ref_objectid;
3053 sizes[0] = sizeof(struct btrfs_inode_item);
3054 sizes[1] = name_len + sizeof(*ref);
3056 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3060 if (objectid > root->highest_inode)
3061 root->highest_inode = objectid;
3063 inode->i_uid = current->fsuid;
3064 inode->i_gid = current->fsgid;
3065 inode->i_mode = mode;
3066 inode->i_ino = objectid;
3067 inode_set_bytes(inode, 0);
3068 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3069 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3070 struct btrfs_inode_item);
3071 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3073 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3074 struct btrfs_inode_ref);
3075 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3076 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3077 ptr = (unsigned long)(ref + 1);
3078 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3080 btrfs_mark_buffer_dirty(path->nodes[0]);
3081 btrfs_free_path(path);
3083 location = &BTRFS_I(inode)->location;
3084 location->objectid = objectid;
3085 location->offset = 0;
3086 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3088 insert_inode_hash(inode);
3092 BTRFS_I(dir)->index_cnt--;
3093 btrfs_free_path(path);
3094 return ERR_PTR(ret);
3097 static inline u8 btrfs_inode_type(struct inode *inode)
3099 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3103 * utility function to add 'inode' into 'parent_inode' with
3104 * a give name and a given sequence number.
3105 * if 'add_backref' is true, also insert a backref from the
3106 * inode to the parent directory.
3108 int btrfs_add_link(struct btrfs_trans_handle *trans,
3109 struct inode *parent_inode, struct inode *inode,
3110 const char *name, int name_len, int add_backref, u64 index)
3113 struct btrfs_key key;
3114 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3116 key.objectid = inode->i_ino;
3117 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3120 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3121 parent_inode->i_ino,
3122 &key, btrfs_inode_type(inode),
3126 ret = btrfs_insert_inode_ref(trans, root,
3129 parent_inode->i_ino,
3132 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3134 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3135 ret = btrfs_update_inode(trans, root, parent_inode);
3140 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3141 struct dentry *dentry, struct inode *inode,
3142 int backref, u64 index)
3144 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3145 inode, dentry->d_name.name,
3146 dentry->d_name.len, backref, index);
3148 d_instantiate(dentry, inode);
3156 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3157 int mode, dev_t rdev)
3159 struct btrfs_trans_handle *trans;
3160 struct btrfs_root *root = BTRFS_I(dir)->root;
3161 struct inode *inode = NULL;
3165 unsigned long nr = 0;
3168 if (!new_valid_dev(rdev))
3171 err = btrfs_check_free_space(root, 1, 0);
3175 trans = btrfs_start_transaction(root, 1);
3176 btrfs_set_trans_block_group(trans, dir);
3178 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3184 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3186 dentry->d_parent->d_inode->i_ino, objectid,
3187 BTRFS_I(dir)->block_group, mode, &index);
3188 err = PTR_ERR(inode);
3192 err = btrfs_init_acl(inode, dir);
3198 btrfs_set_trans_block_group(trans, inode);
3199 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3203 inode->i_op = &btrfs_special_inode_operations;
3204 init_special_inode(inode, inode->i_mode, rdev);
3205 btrfs_update_inode(trans, root, inode);
3207 dir->i_sb->s_dirt = 1;
3208 btrfs_update_inode_block_group(trans, inode);
3209 btrfs_update_inode_block_group(trans, dir);
3211 nr = trans->blocks_used;
3212 btrfs_end_transaction_throttle(trans, root);
3215 inode_dec_link_count(inode);
3218 btrfs_btree_balance_dirty(root, nr);
3222 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3223 int mode, struct nameidata *nd)
3225 struct btrfs_trans_handle *trans;
3226 struct btrfs_root *root = BTRFS_I(dir)->root;
3227 struct inode *inode = NULL;
3230 unsigned long nr = 0;
3234 err = btrfs_check_free_space(root, 1, 0);
3237 trans = btrfs_start_transaction(root, 1);
3238 btrfs_set_trans_block_group(trans, dir);
3240 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3246 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3248 dentry->d_parent->d_inode->i_ino,
3249 objectid, BTRFS_I(dir)->block_group, mode,
3251 err = PTR_ERR(inode);
3255 err = btrfs_init_acl(inode, dir);
3261 btrfs_set_trans_block_group(trans, inode);
3262 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3266 inode->i_mapping->a_ops = &btrfs_aops;
3267 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3268 inode->i_fop = &btrfs_file_operations;
3269 inode->i_op = &btrfs_file_inode_operations;
3270 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3272 dir->i_sb->s_dirt = 1;
3273 btrfs_update_inode_block_group(trans, inode);
3274 btrfs_update_inode_block_group(trans, dir);
3276 nr = trans->blocks_used;
3277 btrfs_end_transaction_throttle(trans, root);
3280 inode_dec_link_count(inode);
3283 btrfs_btree_balance_dirty(root, nr);
3287 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3288 struct dentry *dentry)
3290 struct btrfs_trans_handle *trans;
3291 struct btrfs_root *root = BTRFS_I(dir)->root;
3292 struct inode *inode = old_dentry->d_inode;
3294 unsigned long nr = 0;
3298 if (inode->i_nlink == 0)
3301 btrfs_inc_nlink(inode);
3302 err = btrfs_check_free_space(root, 1, 0);
3305 err = btrfs_set_inode_index(dir, inode, &index);
3309 trans = btrfs_start_transaction(root, 1);
3311 btrfs_set_trans_block_group(trans, dir);
3312 atomic_inc(&inode->i_count);
3314 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3319 dir->i_sb->s_dirt = 1;
3320 btrfs_update_inode_block_group(trans, dir);
3321 err = btrfs_update_inode(trans, root, inode);
3326 nr = trans->blocks_used;
3327 btrfs_end_transaction_throttle(trans, root);
3330 inode_dec_link_count(inode);
3333 btrfs_btree_balance_dirty(root, nr);
3337 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3339 struct inode *inode = NULL;
3340 struct btrfs_trans_handle *trans;
3341 struct btrfs_root *root = BTRFS_I(dir)->root;
3343 int drop_on_err = 0;
3346 unsigned long nr = 1;
3348 err = btrfs_check_free_space(root, 1, 0);
3352 trans = btrfs_start_transaction(root, 1);
3353 btrfs_set_trans_block_group(trans, dir);
3355 if (IS_ERR(trans)) {
3356 err = PTR_ERR(trans);
3360 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3366 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3368 dentry->d_parent->d_inode->i_ino, objectid,
3369 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3371 if (IS_ERR(inode)) {
3372 err = PTR_ERR(inode);
3378 err = btrfs_init_acl(inode, dir);
3382 inode->i_op = &btrfs_dir_inode_operations;
3383 inode->i_fop = &btrfs_dir_file_operations;
3384 btrfs_set_trans_block_group(trans, inode);
3386 btrfs_i_size_write(inode, 0);
3387 err = btrfs_update_inode(trans, root, inode);
3391 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3392 inode, dentry->d_name.name,
3393 dentry->d_name.len, 0, index);
3397 d_instantiate(dentry, inode);
3399 dir->i_sb->s_dirt = 1;
3400 btrfs_update_inode_block_group(trans, inode);
3401 btrfs_update_inode_block_group(trans, dir);
3404 nr = trans->blocks_used;
3405 btrfs_end_transaction_throttle(trans, root);
3410 btrfs_btree_balance_dirty(root, nr);
3414 /* helper for btfs_get_extent. Given an existing extent in the tree,
3415 * and an extent that you want to insert, deal with overlap and insert
3416 * the new extent into the tree.
3418 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3419 struct extent_map *existing,
3420 struct extent_map *em,
3421 u64 map_start, u64 map_len)
3425 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3426 start_diff = map_start - em->start;
3427 em->start = map_start;
3429 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3430 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3431 em->block_start += start_diff;
3432 em->block_len -= start_diff;
3434 return add_extent_mapping(em_tree, em);
3437 static noinline int uncompress_inline(struct btrfs_path *path,
3438 struct inode *inode, struct page *page,
3439 size_t pg_offset, u64 extent_offset,
3440 struct btrfs_file_extent_item *item)
3443 struct extent_buffer *leaf = path->nodes[0];
3446 unsigned long inline_size;
3449 WARN_ON(pg_offset != 0);
3450 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3451 inline_size = btrfs_file_extent_inline_item_len(leaf,
3452 btrfs_item_nr(leaf, path->slots[0]));
3453 tmp = kmalloc(inline_size, GFP_NOFS);
3454 ptr = btrfs_file_extent_inline_start(item);
3456 read_extent_buffer(leaf, tmp, ptr, inline_size);
3458 max_size = min(PAGE_CACHE_SIZE, max_size);
3459 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3460 inline_size, max_size);
3462 char *kaddr = kmap_atomic(page, KM_USER0);
3463 unsigned long copy_size = min_t(u64,
3464 PAGE_CACHE_SIZE - pg_offset,
3465 max_size - extent_offset);
3466 memset(kaddr + pg_offset, 0, copy_size);
3467 kunmap_atomic(kaddr, KM_USER0);
3474 * a bit scary, this does extent mapping from logical file offset to the disk.
3475 * the ugly parts come from merging extents from the disk with the
3476 * in-ram representation. This gets more complex because of the data=ordered code,
3477 * where the in-ram extents might be locked pending data=ordered completion.
3479 * This also copies inline extents directly into the page.
3481 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3482 size_t pg_offset, u64 start, u64 len,
3488 u64 extent_start = 0;
3490 u64 objectid = inode->i_ino;
3492 struct btrfs_path *path = NULL;
3493 struct btrfs_root *root = BTRFS_I(inode)->root;
3494 struct btrfs_file_extent_item *item;
3495 struct extent_buffer *leaf;
3496 struct btrfs_key found_key;
3497 struct extent_map *em = NULL;
3498 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3499 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3500 struct btrfs_trans_handle *trans = NULL;
3504 spin_lock(&em_tree->lock);
3505 em = lookup_extent_mapping(em_tree, start, len);
3507 em->bdev = root->fs_info->fs_devices->latest_bdev;
3508 spin_unlock(&em_tree->lock);
3511 if (em->start > start || em->start + em->len <= start)
3512 free_extent_map(em);
3513 else if (em->block_start == EXTENT_MAP_INLINE && page)
3514 free_extent_map(em);
3518 em = alloc_extent_map(GFP_NOFS);
3523 em->bdev = root->fs_info->fs_devices->latest_bdev;
3524 em->start = EXTENT_MAP_HOLE;
3526 em->block_len = (u64)-1;
3529 path = btrfs_alloc_path();
3533 ret = btrfs_lookup_file_extent(trans, root, path,
3534 objectid, start, trans != NULL);
3541 if (path->slots[0] == 0)
3546 leaf = path->nodes[0];
3547 item = btrfs_item_ptr(leaf, path->slots[0],
3548 struct btrfs_file_extent_item);
3549 /* are we inside the extent that was found? */
3550 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3551 found_type = btrfs_key_type(&found_key);
3552 if (found_key.objectid != objectid ||
3553 found_type != BTRFS_EXTENT_DATA_KEY) {
3557 found_type = btrfs_file_extent_type(leaf, item);
3558 extent_start = found_key.offset;
3559 compressed = btrfs_file_extent_compression(leaf, item);
3560 if (found_type == BTRFS_FILE_EXTENT_REG ||
3561 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3562 extent_end = extent_start +
3563 btrfs_file_extent_num_bytes(leaf, item);
3564 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3566 size = btrfs_file_extent_inline_len(leaf, item);
3567 extent_end = (extent_start + size + root->sectorsize - 1) &
3568 ~((u64)root->sectorsize - 1);
3571 if (start >= extent_end) {
3573 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3574 ret = btrfs_next_leaf(root, path);
3581 leaf = path->nodes[0];
3583 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3584 if (found_key.objectid != objectid ||
3585 found_key.type != BTRFS_EXTENT_DATA_KEY)
3587 if (start + len <= found_key.offset)
3590 em->len = found_key.offset - start;
3594 if (found_type == BTRFS_FILE_EXTENT_REG ||
3595 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3596 em->start = extent_start;
3597 em->len = extent_end - extent_start;
3598 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
3600 em->block_start = EXTENT_MAP_HOLE;
3604 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3605 em->block_start = bytenr;
3606 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
3609 bytenr += btrfs_file_extent_offset(leaf, item);
3610 em->block_start = bytenr;
3611 em->block_len = em->len;
3612 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
3613 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
3616 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3620 size_t extent_offset;
3623 em->block_start = EXTENT_MAP_INLINE;
3624 if (!page || create) {
3625 em->start = extent_start;
3626 em->len = extent_end - extent_start;
3630 size = btrfs_file_extent_inline_len(leaf, item);
3631 extent_offset = page_offset(page) + pg_offset - extent_start;
3632 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
3633 size - extent_offset);
3634 em->start = extent_start + extent_offset;
3635 em->len = (copy_size + root->sectorsize - 1) &
3636 ~((u64)root->sectorsize - 1);
3638 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3639 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
3640 if (create == 0 && !PageUptodate(page)) {
3641 if (btrfs_file_extent_compression(leaf, item) ==
3642 BTRFS_COMPRESS_ZLIB) {
3643 ret = uncompress_inline(path, inode, page,
3645 extent_offset, item);
3649 read_extent_buffer(leaf, map + pg_offset, ptr,
3653 flush_dcache_page(page);
3654 } else if (create && PageUptodate(page)) {
3657 free_extent_map(em);
3659 btrfs_release_path(root, path);
3660 trans = btrfs_join_transaction(root, 1);
3664 write_extent_buffer(leaf, map + pg_offset, ptr,
3667 btrfs_mark_buffer_dirty(leaf);
3669 set_extent_uptodate(io_tree, em->start,
3670 extent_map_end(em) - 1, GFP_NOFS);
3673 printk("unkknown found_type %d\n", found_type);
3680 em->block_start = EXTENT_MAP_HOLE;
3681 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
3683 btrfs_release_path(root, path);
3684 if (em->start > start || extent_map_end(em) <= start) {
3685 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
3691 spin_lock(&em_tree->lock);
3692 ret = add_extent_mapping(em_tree, em);
3693 /* it is possible that someone inserted the extent into the tree
3694 * while we had the lock dropped. It is also possible that
3695 * an overlapping map exists in the tree
3697 if (ret == -EEXIST) {
3698 struct extent_map *existing;
3702 existing = lookup_extent_mapping(em_tree, start, len);
3703 if (existing && (existing->start > start ||
3704 existing->start + existing->len <= start)) {
3705 free_extent_map(existing);
3709 existing = lookup_extent_mapping(em_tree, em->start,
3712 err = merge_extent_mapping(em_tree, existing,
3715 free_extent_map(existing);
3717 free_extent_map(em);
3722 printk("failing to insert %Lu %Lu\n",
3724 free_extent_map(em);
3728 free_extent_map(em);
3733 spin_unlock(&em_tree->lock);
3736 btrfs_free_path(path);
3738 ret = btrfs_end_transaction(trans, root);
3744 free_extent_map(em);
3746 return ERR_PTR(err);
3751 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
3752 const struct iovec *iov, loff_t offset,
3753 unsigned long nr_segs)
3758 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
3760 return extent_bmap(mapping, iblock, btrfs_get_extent);
3763 int btrfs_readpage(struct file *file, struct page *page)
3765 struct extent_io_tree *tree;
3766 tree = &BTRFS_I(page->mapping->host)->io_tree;
3767 return extent_read_full_page(tree, page, btrfs_get_extent);
3770 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
3772 struct extent_io_tree *tree;
3775 if (current->flags & PF_MEMALLOC) {
3776 redirty_page_for_writepage(wbc, page);
3780 tree = &BTRFS_I(page->mapping->host)->io_tree;
3781 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
3784 int btrfs_writepages(struct address_space *mapping,
3785 struct writeback_control *wbc)
3787 struct extent_io_tree *tree;
3788 tree = &BTRFS_I(mapping->host)->io_tree;
3789 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
3793 btrfs_readpages(struct file *file, struct address_space *mapping,
3794 struct list_head *pages, unsigned nr_pages)
3796 struct extent_io_tree *tree;
3797 tree = &BTRFS_I(mapping->host)->io_tree;
3798 return extent_readpages(tree, mapping, pages, nr_pages,
3801 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3803 struct extent_io_tree *tree;
3804 struct extent_map_tree *map;
3807 tree = &BTRFS_I(page->mapping->host)->io_tree;
3808 map = &BTRFS_I(page->mapping->host)->extent_tree;
3809 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
3811 ClearPagePrivate(page);
3812 set_page_private(page, 0);
3813 page_cache_release(page);
3818 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3820 if (PageWriteback(page) || PageDirty(page))
3822 return __btrfs_releasepage(page, gfp_flags);
3825 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
3827 struct extent_io_tree *tree;
3828 struct btrfs_ordered_extent *ordered;
3829 u64 page_start = page_offset(page);
3830 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
3832 wait_on_page_writeback(page);
3833 tree = &BTRFS_I(page->mapping->host)->io_tree;
3835 btrfs_releasepage(page, GFP_NOFS);
3839 lock_extent(tree, page_start, page_end, GFP_NOFS);
3840 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
3844 * IO on this page will never be started, so we need
3845 * to account for any ordered extents now
3847 clear_extent_bit(tree, page_start, page_end,
3848 EXTENT_DIRTY | EXTENT_DELALLOC |
3849 EXTENT_LOCKED, 1, 0, GFP_NOFS);
3850 btrfs_finish_ordered_io(page->mapping->host,
3851 page_start, page_end);
3852 btrfs_put_ordered_extent(ordered);
3853 lock_extent(tree, page_start, page_end, GFP_NOFS);
3855 clear_extent_bit(tree, page_start, page_end,
3856 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3859 __btrfs_releasepage(page, GFP_NOFS);
3861 ClearPageChecked(page);
3862 if (PagePrivate(page)) {
3863 ClearPagePrivate(page);
3864 set_page_private(page, 0);
3865 page_cache_release(page);
3870 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
3871 * called from a page fault handler when a page is first dirtied. Hence we must
3872 * be careful to check for EOF conditions here. We set the page up correctly
3873 * for a written page which means we get ENOSPC checking when writing into
3874 * holes and correct delalloc and unwritten extent mapping on filesystems that
3875 * support these features.
3877 * We are not allowed to take the i_mutex here so we have to play games to
3878 * protect against truncate races as the page could now be beyond EOF. Because
3879 * vmtruncate() writes the inode size before removing pages, once we have the
3880 * page lock we can determine safely if the page is beyond EOF. If it is not
3881 * beyond EOF, then the page is guaranteed safe against truncation until we
3884 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
3886 struct inode *inode = fdentry(vma->vm_file)->d_inode;
3887 struct btrfs_root *root = BTRFS_I(inode)->root;
3888 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3889 struct btrfs_ordered_extent *ordered;
3891 unsigned long zero_start;
3897 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
3904 size = i_size_read(inode);
3905 page_start = page_offset(page);
3906 page_end = page_start + PAGE_CACHE_SIZE - 1;
3908 if ((page->mapping != inode->i_mapping) ||
3909 (page_start >= size)) {
3910 /* page got truncated out from underneath us */
3913 wait_on_page_writeback(page);
3915 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3916 set_page_extent_mapped(page);
3919 * we can't set the delalloc bits if there are pending ordered
3920 * extents. Drop our locks and wait for them to finish
3922 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3924 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3926 btrfs_start_ordered_extent(inode, ordered, 1);
3927 btrfs_put_ordered_extent(ordered);
3931 btrfs_set_extent_delalloc(inode, page_start, page_end);
3934 /* page is wholly or partially inside EOF */
3935 if (page_start + PAGE_CACHE_SIZE > size)
3936 zero_start = size & ~PAGE_CACHE_MASK;
3938 zero_start = PAGE_CACHE_SIZE;
3940 if (zero_start != PAGE_CACHE_SIZE) {
3942 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
3943 flush_dcache_page(page);
3946 ClearPageChecked(page);
3947 set_page_dirty(page);
3948 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3956 static void btrfs_truncate(struct inode *inode)
3958 struct btrfs_root *root = BTRFS_I(inode)->root;
3960 struct btrfs_trans_handle *trans;
3962 u64 mask = root->sectorsize - 1;
3964 if (!S_ISREG(inode->i_mode))
3966 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3969 btrfs_truncate_page(inode->i_mapping, inode->i_size);
3970 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
3972 trans = btrfs_start_transaction(root, 1);
3973 btrfs_set_trans_block_group(trans, inode);
3974 btrfs_i_size_write(inode, inode->i_size);
3976 ret = btrfs_orphan_add(trans, inode);
3979 /* FIXME, add redo link to tree so we don't leak on crash */
3980 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
3981 BTRFS_EXTENT_DATA_KEY);
3982 btrfs_update_inode(trans, root, inode);
3984 ret = btrfs_orphan_del(trans, inode);
3988 nr = trans->blocks_used;
3989 ret = btrfs_end_transaction_throttle(trans, root);
3991 btrfs_btree_balance_dirty(root, nr);
3995 * Invalidate a single dcache entry at the root of the filesystem.
3996 * Needed after creation of snapshot or subvolume.
3998 void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name,
4001 struct dentry *alias, *entry;
4004 alias = d_find_alias(root->fs_info->sb->s_root->d_inode);
4008 /* change me if btrfs ever gets a d_hash operation */
4009 qstr.hash = full_name_hash(qstr.name, qstr.len);
4010 entry = d_lookup(alias, &qstr);
4013 d_invalidate(entry);
4020 * create a new subvolume directory/inode (helper for the ioctl).
4022 int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry,
4023 struct btrfs_trans_handle *trans, u64 new_dirid,
4024 struct btrfs_block_group_cache *block_group)
4026 struct inode *inode;
4030 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4031 new_dirid, block_group, S_IFDIR | 0700, &index);
4033 return PTR_ERR(inode);
4034 inode->i_op = &btrfs_dir_inode_operations;
4035 inode->i_fop = &btrfs_dir_file_operations;
4036 new_root->inode = inode;
4039 btrfs_i_size_write(inode, 0);
4041 error = btrfs_update_inode(trans, new_root, inode);
4045 atomic_inc(&inode->i_count);
4046 d_instantiate(dentry, inode);
4050 /* helper function for file defrag and space balancing. This
4051 * forces readahead on a given range of bytes in an inode
4053 unsigned long btrfs_force_ra(struct address_space *mapping,
4054 struct file_ra_state *ra, struct file *file,
4055 pgoff_t offset, pgoff_t last_index)
4057 pgoff_t req_size = last_index - offset + 1;
4059 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4060 return offset + req_size;
4063 struct inode *btrfs_alloc_inode(struct super_block *sb)
4065 struct btrfs_inode *ei;
4067 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4071 ei->logged_trans = 0;
4072 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4073 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4074 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4075 INIT_LIST_HEAD(&ei->i_orphan);
4076 return &ei->vfs_inode;
4079 void btrfs_destroy_inode(struct inode *inode)
4081 struct btrfs_ordered_extent *ordered;
4082 WARN_ON(!list_empty(&inode->i_dentry));
4083 WARN_ON(inode->i_data.nrpages);
4085 if (BTRFS_I(inode)->i_acl &&
4086 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4087 posix_acl_release(BTRFS_I(inode)->i_acl);
4088 if (BTRFS_I(inode)->i_default_acl &&
4089 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4090 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4092 spin_lock(&BTRFS_I(inode)->root->list_lock);
4093 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4094 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4095 " list\n", inode->i_ino);
4098 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4101 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4105 printk("found ordered extent %Lu %Lu\n",
4106 ordered->file_offset, ordered->len);
4107 btrfs_remove_ordered_extent(inode, ordered);
4108 btrfs_put_ordered_extent(ordered);
4109 btrfs_put_ordered_extent(ordered);
4112 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4113 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4116 static void init_once(void *foo)
4118 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4120 inode_init_once(&ei->vfs_inode);
4123 void btrfs_destroy_cachep(void)
4125 if (btrfs_inode_cachep)
4126 kmem_cache_destroy(btrfs_inode_cachep);
4127 if (btrfs_trans_handle_cachep)
4128 kmem_cache_destroy(btrfs_trans_handle_cachep);
4129 if (btrfs_transaction_cachep)
4130 kmem_cache_destroy(btrfs_transaction_cachep);
4131 if (btrfs_bit_radix_cachep)
4132 kmem_cache_destroy(btrfs_bit_radix_cachep);
4133 if (btrfs_path_cachep)
4134 kmem_cache_destroy(btrfs_path_cachep);
4137 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4138 unsigned long extra_flags,
4139 void (*ctor)(void *))
4141 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4142 SLAB_MEM_SPREAD | extra_flags), ctor);
4145 int btrfs_init_cachep(void)
4147 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4148 sizeof(struct btrfs_inode),
4150 if (!btrfs_inode_cachep)
4152 btrfs_trans_handle_cachep =
4153 btrfs_cache_create("btrfs_trans_handle_cache",
4154 sizeof(struct btrfs_trans_handle),
4156 if (!btrfs_trans_handle_cachep)
4158 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4159 sizeof(struct btrfs_transaction),
4161 if (!btrfs_transaction_cachep)
4163 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4164 sizeof(struct btrfs_path),
4166 if (!btrfs_path_cachep)
4168 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4169 SLAB_DESTROY_BY_RCU, NULL);
4170 if (!btrfs_bit_radix_cachep)
4174 btrfs_destroy_cachep();
4178 static int btrfs_getattr(struct vfsmount *mnt,
4179 struct dentry *dentry, struct kstat *stat)
4181 struct inode *inode = dentry->d_inode;
4182 generic_fillattr(inode, stat);
4183 stat->blksize = PAGE_CACHE_SIZE;
4184 stat->blocks = (inode_get_bytes(inode) +
4185 BTRFS_I(inode)->delalloc_bytes) >> 9;
4189 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
4190 struct inode * new_dir,struct dentry *new_dentry)
4192 struct btrfs_trans_handle *trans;
4193 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4194 struct inode *new_inode = new_dentry->d_inode;
4195 struct inode *old_inode = old_dentry->d_inode;
4196 struct timespec ctime = CURRENT_TIME;
4200 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4201 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4205 ret = btrfs_check_free_space(root, 1, 0);
4209 trans = btrfs_start_transaction(root, 1);
4211 btrfs_set_trans_block_group(trans, new_dir);
4213 btrfs_inc_nlink(old_dentry->d_inode);
4214 old_dir->i_ctime = old_dir->i_mtime = ctime;
4215 new_dir->i_ctime = new_dir->i_mtime = ctime;
4216 old_inode->i_ctime = ctime;
4218 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4219 old_dentry->d_name.name,
4220 old_dentry->d_name.len);
4225 new_inode->i_ctime = CURRENT_TIME;
4226 ret = btrfs_unlink_inode(trans, root, new_dir,
4227 new_dentry->d_inode,
4228 new_dentry->d_name.name,
4229 new_dentry->d_name.len);
4232 if (new_inode->i_nlink == 0) {
4233 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4239 ret = btrfs_set_inode_index(new_dir, old_inode, &index);
4243 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4244 old_inode, new_dentry->d_name.name,
4245 new_dentry->d_name.len, 1, index);
4250 btrfs_end_transaction_throttle(trans, root);
4256 * some fairly slow code that needs optimization. This walks the list
4257 * of all the inodes with pending delalloc and forces them to disk.
4259 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4261 struct list_head *head = &root->fs_info->delalloc_inodes;
4262 struct btrfs_inode *binode;
4263 struct inode *inode;
4264 unsigned long flags;
4266 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4267 while(!list_empty(head)) {
4268 binode = list_entry(head->next, struct btrfs_inode,
4270 inode = igrab(&binode->vfs_inode);
4272 list_del_init(&binode->delalloc_inodes);
4273 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4275 filemap_flush(inode->i_mapping);
4279 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4281 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4283 /* the filemap_flush will queue IO into the worker threads, but
4284 * we have to make sure the IO is actually started and that
4285 * ordered extents get created before we return
4287 atomic_inc(&root->fs_info->async_submit_draining);
4288 while(atomic_read(&root->fs_info->nr_async_submits)) {
4289 wait_event(root->fs_info->async_submit_wait,
4290 (atomic_read(&root->fs_info->nr_async_submits) == 0));
4292 atomic_dec(&root->fs_info->async_submit_draining);
4296 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4297 const char *symname)
4299 struct btrfs_trans_handle *trans;
4300 struct btrfs_root *root = BTRFS_I(dir)->root;
4301 struct btrfs_path *path;
4302 struct btrfs_key key;
4303 struct inode *inode = NULL;
4311 struct btrfs_file_extent_item *ei;
4312 struct extent_buffer *leaf;
4313 unsigned long nr = 0;
4315 name_len = strlen(symname) + 1;
4316 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4317 return -ENAMETOOLONG;
4319 err = btrfs_check_free_space(root, 1, 0);
4323 trans = btrfs_start_transaction(root, 1);
4324 btrfs_set_trans_block_group(trans, dir);
4326 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4332 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4334 dentry->d_parent->d_inode->i_ino, objectid,
4335 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4337 err = PTR_ERR(inode);
4341 err = btrfs_init_acl(inode, dir);
4347 btrfs_set_trans_block_group(trans, inode);
4348 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4352 inode->i_mapping->a_ops = &btrfs_aops;
4353 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4354 inode->i_fop = &btrfs_file_operations;
4355 inode->i_op = &btrfs_file_inode_operations;
4356 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4358 dir->i_sb->s_dirt = 1;
4359 btrfs_update_inode_block_group(trans, inode);
4360 btrfs_update_inode_block_group(trans, dir);
4364 path = btrfs_alloc_path();
4366 key.objectid = inode->i_ino;
4368 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4369 datasize = btrfs_file_extent_calc_inline_size(name_len);
4370 err = btrfs_insert_empty_item(trans, root, path, &key,
4376 leaf = path->nodes[0];
4377 ei = btrfs_item_ptr(leaf, path->slots[0],
4378 struct btrfs_file_extent_item);
4379 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4380 btrfs_set_file_extent_type(leaf, ei,
4381 BTRFS_FILE_EXTENT_INLINE);
4382 btrfs_set_file_extent_encryption(leaf, ei, 0);
4383 btrfs_set_file_extent_compression(leaf, ei, 0);
4384 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4385 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4387 ptr = btrfs_file_extent_inline_start(ei);
4388 write_extent_buffer(leaf, symname, ptr, name_len);
4389 btrfs_mark_buffer_dirty(leaf);
4390 btrfs_free_path(path);
4392 inode->i_op = &btrfs_symlink_inode_operations;
4393 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4394 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4395 inode_set_bytes(inode, name_len);
4396 btrfs_i_size_write(inode, name_len - 1);
4397 err = btrfs_update_inode(trans, root, inode);
4402 nr = trans->blocks_used;
4403 btrfs_end_transaction_throttle(trans, root);
4406 inode_dec_link_count(inode);
4409 btrfs_btree_balance_dirty(root, nr);
4413 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4414 u64 alloc_hint, int mode)
4416 struct btrfs_trans_handle *trans;
4417 struct btrfs_root *root = BTRFS_I(inode)->root;
4418 struct btrfs_key ins;
4420 u64 cur_offset = start;
4421 u64 num_bytes = end - start;
4424 trans = btrfs_join_transaction(root, 1);
4426 btrfs_set_trans_block_group(trans, inode);
4428 while (num_bytes > 0) {
4429 alloc_size = min(num_bytes, root->fs_info->max_extent);
4430 ret = btrfs_reserve_extent(trans, root, alloc_size,
4431 root->sectorsize, 0, alloc_hint,
4437 ret = insert_reserved_file_extent(trans, inode,
4438 cur_offset, ins.objectid,
4439 ins.offset, ins.offset,
4440 ins.offset, 0, 0, 0,
4441 BTRFS_FILE_EXTENT_PREALLOC);
4443 num_bytes -= ins.offset;
4444 cur_offset += ins.offset;
4445 alloc_hint = ins.objectid + ins.offset;
4448 if (cur_offset > start) {
4449 inode->i_ctime = CURRENT_TIME;
4450 btrfs_set_flag(inode, PREALLOC);
4451 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4452 cur_offset > i_size_read(inode))
4453 btrfs_i_size_write(inode, cur_offset);
4454 ret = btrfs_update_inode(trans, root, inode);
4458 btrfs_end_transaction(trans, root);
4462 static long btrfs_fallocate(struct inode *inode, int mode,
4463 loff_t offset, loff_t len)
4470 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
4471 struct extent_map *em;
4474 alloc_start = offset & ~mask;
4475 alloc_end = (offset + len + mask) & ~mask;
4477 mutex_lock(&inode->i_mutex);
4478 if (alloc_start > inode->i_size) {
4479 ret = btrfs_cont_expand(inode, alloc_start);
4485 struct btrfs_ordered_extent *ordered;
4486 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
4487 alloc_end - 1, GFP_NOFS);
4488 ordered = btrfs_lookup_first_ordered_extent(inode,
4491 ordered->file_offset + ordered->len > alloc_start &&
4492 ordered->file_offset < alloc_end) {
4493 btrfs_put_ordered_extent(ordered);
4494 unlock_extent(&BTRFS_I(inode)->io_tree,
4495 alloc_start, alloc_end - 1, GFP_NOFS);
4496 btrfs_wait_ordered_range(inode, alloc_start,
4497 alloc_end - alloc_start);
4500 btrfs_put_ordered_extent(ordered);
4505 cur_offset = alloc_start;
4507 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4508 alloc_end - cur_offset, 0);
4509 BUG_ON(IS_ERR(em) || !em);
4510 last_byte = min(extent_map_end(em), alloc_end);
4511 last_byte = (last_byte + mask) & ~mask;
4512 if (em->block_start == EXTENT_MAP_HOLE) {
4513 ret = prealloc_file_range(inode, cur_offset,
4514 last_byte, alloc_hint, mode);
4516 free_extent_map(em);
4520 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
4521 alloc_hint = em->block_start;
4522 free_extent_map(em);
4524 cur_offset = last_byte;
4525 if (cur_offset >= alloc_end) {
4530 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
4533 mutex_unlock(&inode->i_mutex);
4537 static int btrfs_set_page_dirty(struct page *page)
4539 return __set_page_dirty_nobuffers(page);
4542 static int btrfs_permission(struct inode *inode, int mask)
4544 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4546 return generic_permission(inode, mask, btrfs_check_acl);
4549 static struct inode_operations btrfs_dir_inode_operations = {
4550 .lookup = btrfs_lookup,
4551 .create = btrfs_create,
4552 .unlink = btrfs_unlink,
4554 .mkdir = btrfs_mkdir,
4555 .rmdir = btrfs_rmdir,
4556 .rename = btrfs_rename,
4557 .symlink = btrfs_symlink,
4558 .setattr = btrfs_setattr,
4559 .mknod = btrfs_mknod,
4560 .setxattr = btrfs_setxattr,
4561 .getxattr = btrfs_getxattr,
4562 .listxattr = btrfs_listxattr,
4563 .removexattr = btrfs_removexattr,
4564 .permission = btrfs_permission,
4566 static struct inode_operations btrfs_dir_ro_inode_operations = {
4567 .lookup = btrfs_lookup,
4568 .permission = btrfs_permission,
4570 static struct file_operations btrfs_dir_file_operations = {
4571 .llseek = generic_file_llseek,
4572 .read = generic_read_dir,
4573 .readdir = btrfs_real_readdir,
4574 .unlocked_ioctl = btrfs_ioctl,
4575 #ifdef CONFIG_COMPAT
4576 .compat_ioctl = btrfs_ioctl,
4578 .release = btrfs_release_file,
4579 .fsync = btrfs_sync_file,
4582 static struct extent_io_ops btrfs_extent_io_ops = {
4583 .fill_delalloc = run_delalloc_range,
4584 .submit_bio_hook = btrfs_submit_bio_hook,
4585 .merge_bio_hook = btrfs_merge_bio_hook,
4586 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
4587 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
4588 .writepage_start_hook = btrfs_writepage_start_hook,
4589 .readpage_io_failed_hook = btrfs_io_failed_hook,
4590 .set_bit_hook = btrfs_set_bit_hook,
4591 .clear_bit_hook = btrfs_clear_bit_hook,
4594 static struct address_space_operations btrfs_aops = {
4595 .readpage = btrfs_readpage,
4596 .writepage = btrfs_writepage,
4597 .writepages = btrfs_writepages,
4598 .readpages = btrfs_readpages,
4599 .sync_page = block_sync_page,
4601 .direct_IO = btrfs_direct_IO,
4602 .invalidatepage = btrfs_invalidatepage,
4603 .releasepage = btrfs_releasepage,
4604 .set_page_dirty = btrfs_set_page_dirty,
4607 static struct address_space_operations btrfs_symlink_aops = {
4608 .readpage = btrfs_readpage,
4609 .writepage = btrfs_writepage,
4610 .invalidatepage = btrfs_invalidatepage,
4611 .releasepage = btrfs_releasepage,
4614 static struct inode_operations btrfs_file_inode_operations = {
4615 .truncate = btrfs_truncate,
4616 .getattr = btrfs_getattr,
4617 .setattr = btrfs_setattr,
4618 .setxattr = btrfs_setxattr,
4619 .getxattr = btrfs_getxattr,
4620 .listxattr = btrfs_listxattr,
4621 .removexattr = btrfs_removexattr,
4622 .permission = btrfs_permission,
4623 .fallocate = btrfs_fallocate,
4625 static struct inode_operations btrfs_special_inode_operations = {
4626 .getattr = btrfs_getattr,
4627 .setattr = btrfs_setattr,
4628 .permission = btrfs_permission,
4629 .setxattr = btrfs_setxattr,
4630 .getxattr = btrfs_getxattr,
4631 .listxattr = btrfs_listxattr,
4632 .removexattr = btrfs_removexattr,
4634 static struct inode_operations btrfs_symlink_inode_operations = {
4635 .readlink = generic_readlink,
4636 .follow_link = page_follow_link_light,
4637 .put_link = page_put_link,
4638 .permission = btrfs_permission,