2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include <linux/posix_acl.h>
42 #include "transaction.h"
43 #include "btrfs_inode.h"
45 #include "print-tree.h"
47 #include "ordered-data.h"
51 #include "ref-cache.h"
52 #include "compression.h"
54 struct btrfs_iget_args {
56 struct btrfs_root *root;
59 static struct inode_operations btrfs_dir_inode_operations;
60 static struct inode_operations btrfs_symlink_inode_operations;
61 static struct inode_operations btrfs_dir_ro_inode_operations;
62 static struct inode_operations btrfs_special_inode_operations;
63 static struct inode_operations btrfs_file_inode_operations;
64 static struct address_space_operations btrfs_aops;
65 static struct address_space_operations btrfs_symlink_aops;
66 static struct file_operations btrfs_dir_file_operations;
67 static struct extent_io_ops btrfs_extent_io_ops;
69 static struct kmem_cache *btrfs_inode_cachep;
70 struct kmem_cache *btrfs_trans_handle_cachep;
71 struct kmem_cache *btrfs_transaction_cachep;
72 struct kmem_cache *btrfs_bit_radix_cachep;
73 struct kmem_cache *btrfs_path_cachep;
76 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
77 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
78 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
79 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
80 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
81 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
82 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
83 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
86 static void btrfs_truncate(struct inode *inode);
87 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
90 * a very lame attempt at stopping writes when the FS is 85% full. There
91 * are countless ways this is incorrect, but it is better than nothing.
93 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
102 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
103 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
104 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
112 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
114 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
119 * this does all the hard work for inserting an inline extent into
120 * the btree. The caller should have done a btrfs_drop_extents so that
121 * no overlapping inline items exist in the btree
123 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
124 struct btrfs_root *root, struct inode *inode,
125 u64 start, size_t size, size_t compressed_size,
126 struct page **compressed_pages)
128 struct btrfs_key key;
129 struct btrfs_path *path;
130 struct extent_buffer *leaf;
131 struct page *page = NULL;
134 struct btrfs_file_extent_item *ei;
137 size_t cur_size = size;
139 unsigned long offset;
140 int use_compress = 0;
142 if (compressed_size && compressed_pages) {
144 cur_size = compressed_size;
147 path = btrfs_alloc_path(); if (!path)
150 btrfs_set_trans_block_group(trans, inode);
152 key.objectid = inode->i_ino;
154 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
155 inode_add_bytes(inode, size);
156 datasize = btrfs_file_extent_calc_inline_size(cur_size);
158 inode_add_bytes(inode, size);
159 ret = btrfs_insert_empty_item(trans, root, path, &key,
164 printk("got bad ret %d\n", ret);
167 leaf = path->nodes[0];
168 ei = btrfs_item_ptr(leaf, path->slots[0],
169 struct btrfs_file_extent_item);
170 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
171 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
172 btrfs_set_file_extent_encryption(leaf, ei, 0);
173 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
174 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
175 ptr = btrfs_file_extent_inline_start(ei);
180 while(compressed_size > 0) {
181 cpage = compressed_pages[i];
182 cur_size = min(compressed_size,
186 write_extent_buffer(leaf, kaddr, ptr, cur_size);
191 compressed_size -= cur_size;
193 btrfs_set_file_extent_compression(leaf, ei,
194 BTRFS_COMPRESS_ZLIB);
196 page = find_get_page(inode->i_mapping,
197 start >> PAGE_CACHE_SHIFT);
198 btrfs_set_file_extent_compression(leaf, ei, 0);
199 kaddr = kmap_atomic(page, KM_USER0);
200 offset = start & (PAGE_CACHE_SIZE - 1);
201 write_extent_buffer(leaf, kaddr + offset, ptr, size);
202 kunmap_atomic(kaddr, KM_USER0);
203 page_cache_release(page);
205 btrfs_mark_buffer_dirty(leaf);
206 btrfs_free_path(path);
208 BTRFS_I(inode)->disk_i_size = inode->i_size;
209 btrfs_update_inode(trans, root, inode);
212 btrfs_free_path(path);
218 * conditionally insert an inline extent into the file. This
219 * does the checks required to make sure the data is small enough
220 * to fit as an inline extent.
222 static int cow_file_range_inline(struct btrfs_trans_handle *trans,
223 struct btrfs_root *root,
224 struct inode *inode, u64 start, u64 end,
225 size_t compressed_size,
226 struct page **compressed_pages)
228 u64 isize = i_size_read(inode);
229 u64 actual_end = min(end + 1, isize);
230 u64 inline_len = actual_end - start;
231 u64 aligned_end = (end + root->sectorsize - 1) &
232 ~((u64)root->sectorsize - 1);
234 u64 data_len = inline_len;
238 data_len = compressed_size;
241 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
243 (actual_end & (root->sectorsize - 1)) == 0) ||
245 data_len > root->fs_info->max_inline) {
249 mutex_lock(&BTRFS_I(inode)->extent_mutex);
250 ret = btrfs_drop_extents(trans, root, inode, start,
251 aligned_end, aligned_end, &hint_byte);
254 if (isize > actual_end)
255 inline_len = min_t(u64, isize, actual_end);
256 ret = insert_inline_extent(trans, root, inode, start,
257 inline_len, compressed_size,
260 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
261 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
266 * when extent_io.c finds a delayed allocation range in the file,
267 * the call backs end up in this code. The basic idea is to
268 * allocate extents on disk for the range, and create ordered data structs
269 * in ram to track those extents.
271 * locked_page is the page that writepage had locked already. We use
272 * it to make sure we don't do extra locks or unlocks.
274 * *page_started is set to one if we unlock locked_page and do everything
275 * required to start IO on it. It may be clean and already done with
278 static int cow_file_range(struct inode *inode, struct page *locked_page,
279 u64 start, u64 end, int *page_started)
281 struct btrfs_root *root = BTRFS_I(inode)->root;
282 struct btrfs_trans_handle *trans;
285 unsigned long ram_size;
289 u64 blocksize = root->sectorsize;
291 struct btrfs_key ins;
292 struct extent_map *em;
293 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
295 struct page **pages = NULL;
296 unsigned long nr_pages;
297 unsigned long nr_pages_ret = 0;
298 unsigned long total_compressed = 0;
299 unsigned long total_in = 0;
300 unsigned long max_compressed = 128 * 1024;
301 unsigned long max_uncompressed = 256 * 1024;
305 trans = btrfs_join_transaction(root, 1);
307 btrfs_set_trans_block_group(trans, inode);
311 * compression made this loop a bit ugly, but the basic idea is to
312 * compress some pages but keep the total size of the compressed
313 * extent relatively small. If compression is off, this goto target
318 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
319 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
321 actual_end = min_t(u64, i_size_read(inode), end + 1);
322 total_compressed = actual_end - start;
324 /* we want to make sure that amount of ram required to uncompress
325 * an extent is reasonable, so we limit the total size in ram
326 * of a compressed extent to 256k
328 total_compressed = min(total_compressed, max_uncompressed);
329 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
330 num_bytes = max(blocksize, num_bytes);
331 disk_num_bytes = num_bytes;
335 /* we do compression for mount -o compress and when the
336 * inode has not been flagged as nocompress
338 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
339 btrfs_test_opt(root, COMPRESS)) {
341 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
343 /* we want to make sure the amount of IO required to satisfy
344 * a random read is reasonably small, so we limit the size
345 * of a compressed extent to 128k
347 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
348 total_compressed, pages,
349 nr_pages, &nr_pages_ret,
355 unsigned long offset = total_compressed &
356 (PAGE_CACHE_SIZE - 1);
357 struct page *page = pages[nr_pages_ret - 1];
360 /* zero the tail end of the last page, we might be
361 * sending it down to disk
364 kaddr = kmap_atomic(page, KM_USER0);
365 memset(kaddr + offset, 0,
366 PAGE_CACHE_SIZE - offset);
367 kunmap_atomic(kaddr, KM_USER0);
373 /* lets try to make an inline extent */
374 if (ret || total_in < (end - start + 1)) {
375 /* we didn't compress the entire range, try
376 * to make an uncompressed inline extent. This
377 * is almost sure to fail, but maybe inline sizes
378 * will get bigger later
380 ret = cow_file_range_inline(trans, root, inode,
381 start, end, 0, NULL);
383 ret = cow_file_range_inline(trans, root, inode,
385 total_compressed, pages);
388 extent_clear_unlock_delalloc(inode,
389 &BTRFS_I(inode)->io_tree,
400 * we aren't doing an inline extent round the compressed size
401 * up to a block size boundary so the allocator does sane
404 total_compressed = (total_compressed + blocksize - 1) &
408 * one last check to make sure the compression is really a
409 * win, compare the page count read with the blocks on disk
411 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
412 ~(PAGE_CACHE_SIZE - 1);
413 if (total_compressed >= total_in) {
416 disk_num_bytes = total_compressed;
417 num_bytes = total_in;
420 if (!will_compress && pages) {
422 * the compression code ran but failed to make things smaller,
423 * free any pages it allocated and our page pointer array
425 for (i = 0; i < nr_pages_ret; i++) {
426 page_cache_release(pages[i]);
430 total_compressed = 0;
433 /* flag the file so we don't compress in the future */
434 btrfs_set_flag(inode, NOCOMPRESS);
437 BUG_ON(disk_num_bytes >
438 btrfs_super_total_bytes(&root->fs_info->super_copy));
440 mutex_lock(&BTRFS_I(inode)->extent_mutex);
441 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
442 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
444 while(disk_num_bytes > 0) {
445 unsigned long min_bytes;
448 * the max size of a compressed extent is pretty small,
449 * make the code a little less complex by forcing
450 * the allocator to find a whole compressed extent at once
453 min_bytes = disk_num_bytes;
455 min_bytes = root->sectorsize;
457 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
458 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
459 min_bytes, 0, alloc_hint,
463 goto free_pages_out_fail;
465 em = alloc_extent_map(GFP_NOFS);
469 ram_size = num_bytes;
472 /* ramsize == disk size */
473 ram_size = ins.offset;
474 em->len = ins.offset;
477 em->block_start = ins.objectid;
478 em->block_len = ins.offset;
479 em->bdev = root->fs_info->fs_devices->latest_bdev;
481 mutex_lock(&BTRFS_I(inode)->extent_mutex);
482 set_bit(EXTENT_FLAG_PINNED, &em->flags);
485 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
488 spin_lock(&em_tree->lock);
489 ret = add_extent_mapping(em_tree, em);
490 spin_unlock(&em_tree->lock);
491 if (ret != -EEXIST) {
495 btrfs_drop_extent_cache(inode, start,
496 start + ram_size - 1, 0);
498 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
500 cur_alloc_size = ins.offset;
501 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
502 ram_size, cur_alloc_size, 0,
506 if (disk_num_bytes < cur_alloc_size) {
507 printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes,
514 * we're doing compression, we and we need to
515 * submit the compressed extents down to the device.
517 * We lock down all the file pages, clearing their
518 * dirty bits and setting them writeback. Everyone
519 * that wants to modify the page will wait on the
520 * ordered extent above.
522 * The writeback bits on the file pages are
523 * cleared when the compressed pages are on disk
525 btrfs_end_transaction(trans, root);
527 if (start <= page_offset(locked_page) &&
528 page_offset(locked_page) < start + ram_size) {
532 extent_clear_unlock_delalloc(inode,
533 &BTRFS_I(inode)->io_tree,
535 start + ram_size - 1,
538 ret = btrfs_submit_compressed_write(inode, start,
539 ram_size, ins.objectid,
540 cur_alloc_size, pages,
544 trans = btrfs_join_transaction(root, 1);
545 if (start + ram_size < end) {
547 alloc_hint = ins.objectid + ins.offset;
548 /* pages will be freed at end_bio time */
552 /* we've written everything, time to go */
556 /* we're not doing compressed IO, don't unlock the first
557 * page (which the caller expects to stay locked), don't
558 * clear any dirty bits and don't set any writeback bits
560 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
561 start, start + ram_size - 1,
562 locked_page, 0, 0, 0);
563 disk_num_bytes -= cur_alloc_size;
564 num_bytes -= cur_alloc_size;
565 alloc_hint = ins.objectid + ins.offset;
566 start += cur_alloc_size;
571 btrfs_end_transaction(trans, root);
576 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
577 start, end, locked_page, 0, 0, 0);
579 for (i = 0; i < nr_pages_ret; i++)
580 page_cache_release(pages[i]);
588 * when nowcow writeback call back. This checks for snapshots or COW copies
589 * of the extents that exist in the file, and COWs the file as required.
591 * If no cow copies or snapshots exist, we write directly to the existing
594 static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
595 u64 start, u64 end, int *page_started)
602 struct btrfs_root *root = BTRFS_I(inode)->root;
603 struct btrfs_block_group_cache *block_group;
604 struct btrfs_trans_handle *trans;
605 struct extent_buffer *leaf;
607 struct btrfs_path *path;
608 struct btrfs_file_extent_item *item;
611 struct btrfs_key found_key;
613 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
614 path = btrfs_alloc_path();
616 trans = btrfs_join_transaction(root, 1);
619 ret = btrfs_lookup_file_extent(NULL, root, path,
620 inode->i_ino, start, 0);
627 if (path->slots[0] == 0)
632 leaf = path->nodes[0];
633 item = btrfs_item_ptr(leaf, path->slots[0],
634 struct btrfs_file_extent_item);
636 /* are we inside the extent that was found? */
637 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
638 found_type = btrfs_key_type(&found_key);
639 if (found_key.objectid != inode->i_ino ||
640 found_type != BTRFS_EXTENT_DATA_KEY)
643 found_type = btrfs_file_extent_type(leaf, item);
644 extent_start = found_key.offset;
645 if (found_type == BTRFS_FILE_EXTENT_REG) {
646 u64 extent_num_bytes;
648 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
649 extent_end = extent_start + extent_num_bytes;
652 if (btrfs_file_extent_compression(leaf, item) ||
653 btrfs_file_extent_encryption(leaf,item) ||
654 btrfs_file_extent_other_encoding(leaf, item))
657 if (loops && start != extent_start)
660 if (start < extent_start || start >= extent_end)
663 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
667 if (btrfs_cross_ref_exists(trans, root, &found_key, bytenr))
670 * we may be called by the resizer, make sure we're inside
671 * the limits of the FS
673 block_group = btrfs_lookup_block_group(root->fs_info,
675 if (!block_group || block_group->ro)
678 bytenr += btrfs_file_extent_offset(leaf, item);
679 extent_num_bytes = min(end + 1, extent_end) - start;
680 ret = btrfs_add_ordered_extent(inode, start, bytenr,
682 extent_num_bytes, 1, 0);
688 btrfs_release_path(root, path);
696 btrfs_end_transaction(trans, root);
697 btrfs_free_path(path);
698 return cow_file_range(inode, locked_page, start, end,
703 btrfs_end_transaction(trans, root);
704 btrfs_free_path(path);
709 * extent_io.c call back to do delayed allocation processing
711 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
712 u64 start, u64 end, int *page_started)
714 struct btrfs_root *root = BTRFS_I(inode)->root;
717 if (btrfs_test_opt(root, NODATACOW) ||
718 btrfs_test_flag(inode, NODATACOW))
719 ret = run_delalloc_nocow(inode, locked_page, start, end,
722 ret = cow_file_range(inode, locked_page, start, end,
729 * extent_io.c set_bit_hook, used to track delayed allocation
730 * bytes in this file, and to maintain the list of inodes that
731 * have pending delalloc work to be done.
733 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
734 unsigned long old, unsigned long bits)
737 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
738 struct btrfs_root *root = BTRFS_I(inode)->root;
739 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
740 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
741 root->fs_info->delalloc_bytes += end - start + 1;
742 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
743 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
744 &root->fs_info->delalloc_inodes);
746 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
752 * extent_io.c clear_bit_hook, see set_bit_hook for why
754 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
755 unsigned long old, unsigned long bits)
757 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
758 struct btrfs_root *root = BTRFS_I(inode)->root;
761 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
762 if (end - start + 1 > root->fs_info->delalloc_bytes) {
763 printk("warning: delalloc account %Lu %Lu\n",
764 end - start + 1, root->fs_info->delalloc_bytes);
765 root->fs_info->delalloc_bytes = 0;
766 BTRFS_I(inode)->delalloc_bytes = 0;
768 root->fs_info->delalloc_bytes -= end - start + 1;
769 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
771 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
772 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
773 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
775 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
781 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
782 * we don't create bios that span stripes or chunks
784 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
785 size_t size, struct bio *bio,
786 unsigned long bio_flags)
788 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
789 struct btrfs_mapping_tree *map_tree;
790 u64 logical = (u64)bio->bi_sector << 9;
795 length = bio->bi_size;
796 map_tree = &root->fs_info->mapping_tree;
798 ret = btrfs_map_block(map_tree, READ, logical,
799 &map_length, NULL, 0);
801 if (map_length < length + size) {
808 * in order to insert checksums into the metadata in large chunks,
809 * we wait until bio submission time. All the pages in the bio are
810 * checksummed and sums are attached onto the ordered extent record.
812 * At IO completion time the cums attached on the ordered extent record
813 * are inserted into the btree
815 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
816 int mirror_num, unsigned long bio_flags)
818 struct btrfs_root *root = BTRFS_I(inode)->root;
821 ret = btrfs_csum_one_bio(root, inode, bio);
824 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
828 * extent_io.c submission hook. This does the right thing for csum calculation on write,
829 * or reading the csums from the tree before a read
831 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
832 int mirror_num, unsigned long bio_flags)
834 struct btrfs_root *root = BTRFS_I(inode)->root;
838 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
841 skip_sum = btrfs_test_opt(root, NODATASUM) ||
842 btrfs_test_flag(inode, NODATASUM);
844 if (!(rw & (1 << BIO_RW))) {
846 btrfs_lookup_bio_sums(root, inode, bio);
848 if (bio_flags & EXTENT_BIO_COMPRESSED)
849 return btrfs_submit_compressed_read(inode, bio,
850 mirror_num, bio_flags);
852 } else if (!skip_sum) {
853 /* we're doing a write, do the async checksumming */
854 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
855 inode, rw, bio, mirror_num,
856 bio_flags, __btrfs_submit_bio_hook);
860 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
864 * given a list of ordered sums record them in the inode. This happens
865 * at IO completion time based on sums calculated at bio submission time.
867 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
868 struct inode *inode, u64 file_offset,
869 struct list_head *list)
871 struct list_head *cur;
872 struct btrfs_ordered_sum *sum;
874 btrfs_set_trans_block_group(trans, inode);
875 list_for_each(cur, list) {
876 sum = list_entry(cur, struct btrfs_ordered_sum, list);
877 btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root,
883 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
885 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
889 /* see btrfs_writepage_start_hook for details on why this is required */
890 struct btrfs_writepage_fixup {
892 struct btrfs_work work;
895 void btrfs_writepage_fixup_worker(struct btrfs_work *work)
897 struct btrfs_writepage_fixup *fixup;
898 struct btrfs_ordered_extent *ordered;
904 fixup = container_of(work, struct btrfs_writepage_fixup, work);
908 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
909 ClearPageChecked(page);
913 inode = page->mapping->host;
914 page_start = page_offset(page);
915 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
917 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
919 /* already ordered? We're done */
920 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
921 EXTENT_ORDERED, 0)) {
925 ordered = btrfs_lookup_ordered_extent(inode, page_start);
927 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
930 btrfs_start_ordered_extent(inode, ordered, 1);
934 btrfs_set_extent_delalloc(inode, page_start, page_end);
935 ClearPageChecked(page);
937 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
940 page_cache_release(page);
944 * There are a few paths in the higher layers of the kernel that directly
945 * set the page dirty bit without asking the filesystem if it is a
946 * good idea. This causes problems because we want to make sure COW
947 * properly happens and the data=ordered rules are followed.
949 * In our case any range that doesn't have the ORDERED bit set
950 * hasn't been properly setup for IO. We kick off an async process
951 * to fix it up. The async helper will wait for ordered extents, set
952 * the delalloc bit and make it safe to write the page.
954 int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
956 struct inode *inode = page->mapping->host;
957 struct btrfs_writepage_fixup *fixup;
958 struct btrfs_root *root = BTRFS_I(inode)->root;
961 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
966 if (PageChecked(page))
969 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
973 SetPageChecked(page);
974 page_cache_get(page);
975 fixup->work.func = btrfs_writepage_fixup_worker;
977 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
981 /* as ordered data IO finishes, this gets called so we can finish
982 * an ordered extent if the range of bytes in the file it covers are
985 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
987 struct btrfs_root *root = BTRFS_I(inode)->root;
988 struct btrfs_trans_handle *trans;
989 struct btrfs_ordered_extent *ordered_extent;
990 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
991 struct btrfs_file_extent_item *extent_item;
992 struct btrfs_path *path = NULL;
993 struct extent_buffer *leaf;
995 struct list_head list;
996 struct btrfs_key ins;
999 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1003 trans = btrfs_join_transaction(root, 1);
1005 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1006 BUG_ON(!ordered_extent);
1007 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1010 path = btrfs_alloc_path();
1013 lock_extent(io_tree, ordered_extent->file_offset,
1014 ordered_extent->file_offset + ordered_extent->len - 1,
1017 INIT_LIST_HEAD(&list);
1019 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1021 ret = btrfs_drop_extents(trans, root, inode,
1022 ordered_extent->file_offset,
1023 ordered_extent->file_offset +
1024 ordered_extent->len,
1025 ordered_extent->file_offset, &alloc_hint);
1028 ins.objectid = inode->i_ino;
1029 ins.offset = ordered_extent->file_offset;
1030 ins.type = BTRFS_EXTENT_DATA_KEY;
1031 ret = btrfs_insert_empty_item(trans, root, path, &ins,
1032 sizeof(*extent_item));
1034 leaf = path->nodes[0];
1035 extent_item = btrfs_item_ptr(leaf, path->slots[0],
1036 struct btrfs_file_extent_item);
1037 btrfs_set_file_extent_generation(leaf, extent_item, trans->transid);
1038 btrfs_set_file_extent_type(leaf, extent_item, BTRFS_FILE_EXTENT_REG);
1039 btrfs_set_file_extent_disk_bytenr(leaf, extent_item,
1040 ordered_extent->start);
1041 btrfs_set_file_extent_disk_num_bytes(leaf, extent_item,
1042 ordered_extent->disk_len);
1043 btrfs_set_file_extent_offset(leaf, extent_item, 0);
1045 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1046 btrfs_set_file_extent_compression(leaf, extent_item, 1);
1048 btrfs_set_file_extent_compression(leaf, extent_item, 0);
1049 btrfs_set_file_extent_encryption(leaf, extent_item, 0);
1050 btrfs_set_file_extent_other_encoding(leaf, extent_item, 0);
1052 /* ram bytes = extent_num_bytes for now */
1053 btrfs_set_file_extent_num_bytes(leaf, extent_item,
1054 ordered_extent->len);
1055 btrfs_set_file_extent_ram_bytes(leaf, extent_item,
1056 ordered_extent->len);
1057 btrfs_mark_buffer_dirty(leaf);
1059 btrfs_drop_extent_cache(inode, ordered_extent->file_offset,
1060 ordered_extent->file_offset +
1061 ordered_extent->len - 1, 0);
1062 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1064 ins.objectid = ordered_extent->start;
1065 ins.offset = ordered_extent->disk_len;
1066 ins.type = BTRFS_EXTENT_ITEM_KEY;
1067 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1068 root->root_key.objectid,
1069 trans->transid, inode->i_ino, &ins);
1071 btrfs_release_path(root, path);
1073 inode_add_bytes(inode, ordered_extent->len);
1074 unlock_extent(io_tree, ordered_extent->file_offset,
1075 ordered_extent->file_offset + ordered_extent->len - 1,
1078 add_pending_csums(trans, inode, ordered_extent->file_offset,
1079 &ordered_extent->list);
1081 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1082 btrfs_ordered_update_i_size(inode, ordered_extent);
1083 btrfs_update_inode(trans, root, inode);
1084 btrfs_remove_ordered_extent(inode, ordered_extent);
1085 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1088 btrfs_put_ordered_extent(ordered_extent);
1089 /* once for the tree */
1090 btrfs_put_ordered_extent(ordered_extent);
1092 btrfs_end_transaction(trans, root);
1094 btrfs_free_path(path);
1098 int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1099 struct extent_state *state, int uptodate)
1101 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1105 * When IO fails, either with EIO or csum verification fails, we
1106 * try other mirrors that might have a good copy of the data. This
1107 * io_failure_record is used to record state as we go through all the
1108 * mirrors. If another mirror has good data, the page is set up to date
1109 * and things continue. If a good mirror can't be found, the original
1110 * bio end_io callback is called to indicate things have failed.
1112 struct io_failure_record {
1120 int btrfs_io_failed_hook(struct bio *failed_bio,
1121 struct page *page, u64 start, u64 end,
1122 struct extent_state *state)
1124 struct io_failure_record *failrec = NULL;
1126 struct extent_map *em;
1127 struct inode *inode = page->mapping->host;
1128 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1129 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1135 unsigned long bio_flags = 0;
1137 ret = get_state_private(failure_tree, start, &private);
1139 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1142 failrec->start = start;
1143 failrec->len = end - start + 1;
1144 failrec->last_mirror = 0;
1146 spin_lock(&em_tree->lock);
1147 em = lookup_extent_mapping(em_tree, start, failrec->len);
1148 if (em->start > start || em->start + em->len < start) {
1149 free_extent_map(em);
1152 spin_unlock(&em_tree->lock);
1154 if (!em || IS_ERR(em)) {
1158 logical = start - em->start;
1159 logical = em->block_start + logical;
1160 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1161 bio_flags = EXTENT_BIO_COMPRESSED;
1162 failrec->logical = logical;
1163 free_extent_map(em);
1164 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1165 EXTENT_DIRTY, GFP_NOFS);
1166 set_state_private(failure_tree, start,
1167 (u64)(unsigned long)failrec);
1169 failrec = (struct io_failure_record *)(unsigned long)private;
1171 num_copies = btrfs_num_copies(
1172 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1173 failrec->logical, failrec->len);
1174 failrec->last_mirror++;
1176 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
1177 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1180 if (state && state->start != failrec->start)
1182 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
1184 if (!state || failrec->last_mirror > num_copies) {
1185 set_state_private(failure_tree, failrec->start, 0);
1186 clear_extent_bits(failure_tree, failrec->start,
1187 failrec->start + failrec->len - 1,
1188 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1192 bio = bio_alloc(GFP_NOFS, 1);
1193 bio->bi_private = state;
1194 bio->bi_end_io = failed_bio->bi_end_io;
1195 bio->bi_sector = failrec->logical >> 9;
1196 bio->bi_bdev = failed_bio->bi_bdev;
1198 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1199 if (failed_bio->bi_rw & (1 << BIO_RW))
1204 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1205 failrec->last_mirror,
1211 * each time an IO finishes, we do a fast check in the IO failure tree
1212 * to see if we need to process or clean up an io_failure_record
1214 int btrfs_clean_io_failures(struct inode *inode, u64 start)
1217 u64 private_failure;
1218 struct io_failure_record *failure;
1222 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1223 (u64)-1, 1, EXTENT_DIRTY)) {
1224 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1225 start, &private_failure);
1227 failure = (struct io_failure_record *)(unsigned long)
1229 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1231 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1233 failure->start + failure->len - 1,
1234 EXTENT_DIRTY | EXTENT_LOCKED,
1243 * when reads are done, we need to check csums to verify the data is correct
1244 * if there's a match, we allow the bio to finish. If not, we go through
1245 * the io_failure_record routines to find good copies
1247 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1248 struct extent_state *state)
1250 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1251 struct inode *inode = page->mapping->host;
1252 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1254 u64 private = ~(u32)0;
1256 struct btrfs_root *root = BTRFS_I(inode)->root;
1258 unsigned long flags;
1260 if (btrfs_test_opt(root, NODATASUM) ||
1261 btrfs_test_flag(inode, NODATASUM))
1263 if (state && state->start == start) {
1264 private = state->private;
1267 ret = get_state_private(io_tree, start, &private);
1269 local_irq_save(flags);
1270 kaddr = kmap_atomic(page, KM_IRQ0);
1274 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1275 btrfs_csum_final(csum, (char *)&csum);
1276 if (csum != private) {
1279 kunmap_atomic(kaddr, KM_IRQ0);
1280 local_irq_restore(flags);
1282 /* if the io failure tree for this inode is non-empty,
1283 * check to see if we've recovered from a failed IO
1285 btrfs_clean_io_failures(inode, start);
1289 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
1290 page->mapping->host->i_ino, (unsigned long long)start, csum,
1292 memset(kaddr + offset, 1, end - start + 1);
1293 flush_dcache_page(page);
1294 kunmap_atomic(kaddr, KM_IRQ0);
1295 local_irq_restore(flags);
1302 * This creates an orphan entry for the given inode in case something goes
1303 * wrong in the middle of an unlink/truncate.
1305 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1307 struct btrfs_root *root = BTRFS_I(inode)->root;
1310 spin_lock(&root->list_lock);
1312 /* already on the orphan list, we're good */
1313 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1314 spin_unlock(&root->list_lock);
1318 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1320 spin_unlock(&root->list_lock);
1323 * insert an orphan item to track this unlinked/truncated file
1325 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1331 * We have done the truncate/delete so we can go ahead and remove the orphan
1332 * item for this particular inode.
1334 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1336 struct btrfs_root *root = BTRFS_I(inode)->root;
1339 spin_lock(&root->list_lock);
1341 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1342 spin_unlock(&root->list_lock);
1346 list_del_init(&BTRFS_I(inode)->i_orphan);
1348 spin_unlock(&root->list_lock);
1352 spin_unlock(&root->list_lock);
1354 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1360 * this cleans up any orphans that may be left on the list from the last use
1363 void btrfs_orphan_cleanup(struct btrfs_root *root)
1365 struct btrfs_path *path;
1366 struct extent_buffer *leaf;
1367 struct btrfs_item *item;
1368 struct btrfs_key key, found_key;
1369 struct btrfs_trans_handle *trans;
1370 struct inode *inode;
1371 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1373 /* don't do orphan cleanup if the fs is readonly. */
1374 if (root->fs_info->sb->s_flags & MS_RDONLY)
1377 path = btrfs_alloc_path();
1382 key.objectid = BTRFS_ORPHAN_OBJECTID;
1383 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1384 key.offset = (u64)-1;
1388 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1390 printk(KERN_ERR "Error searching slot for orphan: %d"
1396 * if ret == 0 means we found what we were searching for, which
1397 * is weird, but possible, so only screw with path if we didnt
1398 * find the key and see if we have stuff that matches
1401 if (path->slots[0] == 0)
1406 /* pull out the item */
1407 leaf = path->nodes[0];
1408 item = btrfs_item_nr(leaf, path->slots[0]);
1409 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1411 /* make sure the item matches what we want */
1412 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1414 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1417 /* release the path since we're done with it */
1418 btrfs_release_path(root, path);
1421 * this is where we are basically btrfs_lookup, without the
1422 * crossing root thing. we store the inode number in the
1423 * offset of the orphan item.
1425 inode = btrfs_iget_locked(root->fs_info->sb,
1426 found_key.offset, root);
1430 if (inode->i_state & I_NEW) {
1431 BTRFS_I(inode)->root = root;
1433 /* have to set the location manually */
1434 BTRFS_I(inode)->location.objectid = inode->i_ino;
1435 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1436 BTRFS_I(inode)->location.offset = 0;
1438 btrfs_read_locked_inode(inode);
1439 unlock_new_inode(inode);
1443 * add this inode to the orphan list so btrfs_orphan_del does
1444 * the proper thing when we hit it
1446 spin_lock(&root->list_lock);
1447 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1448 spin_unlock(&root->list_lock);
1451 * if this is a bad inode, means we actually succeeded in
1452 * removing the inode, but not the orphan record, which means
1453 * we need to manually delete the orphan since iput will just
1454 * do a destroy_inode
1456 if (is_bad_inode(inode)) {
1457 trans = btrfs_start_transaction(root, 1);
1458 btrfs_orphan_del(trans, inode);
1459 btrfs_end_transaction(trans, root);
1464 /* if we have links, this was a truncate, lets do that */
1465 if (inode->i_nlink) {
1467 btrfs_truncate(inode);
1472 /* this will do delete_inode and everything for us */
1477 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1479 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1481 btrfs_free_path(path);
1485 * read an inode from the btree into the in-memory inode
1487 void btrfs_read_locked_inode(struct inode *inode)
1489 struct btrfs_path *path;
1490 struct extent_buffer *leaf;
1491 struct btrfs_inode_item *inode_item;
1492 struct btrfs_timespec *tspec;
1493 struct btrfs_root *root = BTRFS_I(inode)->root;
1494 struct btrfs_key location;
1495 u64 alloc_group_block;
1499 path = btrfs_alloc_path();
1501 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1503 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1507 leaf = path->nodes[0];
1508 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1509 struct btrfs_inode_item);
1511 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
1512 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
1513 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
1514 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
1515 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
1517 tspec = btrfs_inode_atime(inode_item);
1518 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1519 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1521 tspec = btrfs_inode_mtime(inode_item);
1522 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1523 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1525 tspec = btrfs_inode_ctime(inode_item);
1526 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1527 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1529 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
1530 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
1531 inode->i_generation = BTRFS_I(inode)->generation;
1533 rdev = btrfs_inode_rdev(leaf, inode_item);
1535 BTRFS_I(inode)->index_cnt = (u64)-1;
1537 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
1538 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
1540 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
1541 if (!BTRFS_I(inode)->block_group) {
1542 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
1544 BTRFS_BLOCK_GROUP_METADATA, 0);
1546 btrfs_free_path(path);
1549 switch (inode->i_mode & S_IFMT) {
1551 inode->i_mapping->a_ops = &btrfs_aops;
1552 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1553 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1554 inode->i_fop = &btrfs_file_operations;
1555 inode->i_op = &btrfs_file_inode_operations;
1558 inode->i_fop = &btrfs_dir_file_operations;
1559 if (root == root->fs_info->tree_root)
1560 inode->i_op = &btrfs_dir_ro_inode_operations;
1562 inode->i_op = &btrfs_dir_inode_operations;
1565 inode->i_op = &btrfs_symlink_inode_operations;
1566 inode->i_mapping->a_ops = &btrfs_symlink_aops;
1567 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1570 init_special_inode(inode, inode->i_mode, rdev);
1576 btrfs_free_path(path);
1577 make_bad_inode(inode);
1581 * given a leaf and an inode, copy the inode fields into the leaf
1583 static void fill_inode_item(struct btrfs_trans_handle *trans,
1584 struct extent_buffer *leaf,
1585 struct btrfs_inode_item *item,
1586 struct inode *inode)
1588 btrfs_set_inode_uid(leaf, item, inode->i_uid);
1589 btrfs_set_inode_gid(leaf, item, inode->i_gid);
1590 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
1591 btrfs_set_inode_mode(leaf, item, inode->i_mode);
1592 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
1594 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
1595 inode->i_atime.tv_sec);
1596 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
1597 inode->i_atime.tv_nsec);
1599 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
1600 inode->i_mtime.tv_sec);
1601 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
1602 inode->i_mtime.tv_nsec);
1604 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
1605 inode->i_ctime.tv_sec);
1606 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
1607 inode->i_ctime.tv_nsec);
1609 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
1610 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
1611 btrfs_set_inode_transid(leaf, item, trans->transid);
1612 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
1613 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
1614 btrfs_set_inode_block_group(leaf, item,
1615 BTRFS_I(inode)->block_group->key.objectid);
1619 * copy everything in the in-memory inode into the btree.
1621 int noinline btrfs_update_inode(struct btrfs_trans_handle *trans,
1622 struct btrfs_root *root,
1623 struct inode *inode)
1625 struct btrfs_inode_item *inode_item;
1626 struct btrfs_path *path;
1627 struct extent_buffer *leaf;
1630 path = btrfs_alloc_path();
1632 ret = btrfs_lookup_inode(trans, root, path,
1633 &BTRFS_I(inode)->location, 1);
1640 leaf = path->nodes[0];
1641 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1642 struct btrfs_inode_item);
1644 fill_inode_item(trans, leaf, inode_item, inode);
1645 btrfs_mark_buffer_dirty(leaf);
1646 btrfs_set_inode_last_trans(trans, inode);
1649 btrfs_free_path(path);
1655 * unlink helper that gets used here in inode.c and in the tree logging
1656 * recovery code. It remove a link in a directory with a given name, and
1657 * also drops the back refs in the inode to the directory
1659 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
1660 struct btrfs_root *root,
1661 struct inode *dir, struct inode *inode,
1662 const char *name, int name_len)
1664 struct btrfs_path *path;
1666 struct extent_buffer *leaf;
1667 struct btrfs_dir_item *di;
1668 struct btrfs_key key;
1671 path = btrfs_alloc_path();
1677 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
1678 name, name_len, -1);
1687 leaf = path->nodes[0];
1688 btrfs_dir_item_key_to_cpu(leaf, di, &key);
1689 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1692 btrfs_release_path(root, path);
1694 ret = btrfs_del_inode_ref(trans, root, name, name_len,
1696 dir->i_ino, &index);
1698 printk("failed to delete reference to %.*s, "
1699 "inode %lu parent %lu\n", name_len, name,
1700 inode->i_ino, dir->i_ino);
1704 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
1705 index, name, name_len, -1);
1714 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1715 btrfs_release_path(root, path);
1717 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
1719 BUG_ON(ret != 0 && ret != -ENOENT);
1721 BTRFS_I(dir)->log_dirty_trans = trans->transid;
1723 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
1727 btrfs_free_path(path);
1731 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
1732 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
1733 btrfs_update_inode(trans, root, dir);
1734 btrfs_drop_nlink(inode);
1735 ret = btrfs_update_inode(trans, root, inode);
1736 dir->i_sb->s_dirt = 1;
1741 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
1743 struct btrfs_root *root;
1744 struct btrfs_trans_handle *trans;
1745 struct inode *inode = dentry->d_inode;
1747 unsigned long nr = 0;
1749 root = BTRFS_I(dir)->root;
1751 ret = btrfs_check_free_space(root, 1, 1);
1755 trans = btrfs_start_transaction(root, 1);
1757 btrfs_set_trans_block_group(trans, dir);
1758 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1759 dentry->d_name.name, dentry->d_name.len);
1761 if (inode->i_nlink == 0)
1762 ret = btrfs_orphan_add(trans, inode);
1764 nr = trans->blocks_used;
1766 btrfs_end_transaction_throttle(trans, root);
1768 btrfs_btree_balance_dirty(root, nr);
1772 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
1774 struct inode *inode = dentry->d_inode;
1777 struct btrfs_root *root = BTRFS_I(dir)->root;
1778 struct btrfs_trans_handle *trans;
1779 unsigned long nr = 0;
1781 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
1785 ret = btrfs_check_free_space(root, 1, 1);
1789 trans = btrfs_start_transaction(root, 1);
1790 btrfs_set_trans_block_group(trans, dir);
1792 err = btrfs_orphan_add(trans, inode);
1796 /* now the directory is empty */
1797 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1798 dentry->d_name.name, dentry->d_name.len);
1800 btrfs_i_size_write(inode, 0);
1804 nr = trans->blocks_used;
1805 ret = btrfs_end_transaction_throttle(trans, root);
1807 btrfs_btree_balance_dirty(root, nr);
1815 * when truncating bytes in a file, it is possible to avoid reading
1816 * the leaves that contain only checksum items. This can be the
1817 * majority of the IO required to delete a large file, but it must
1818 * be done carefully.
1820 * The keys in the level just above the leaves are checked to make sure
1821 * the lowest key in a given leaf is a csum key, and starts at an offset
1822 * after the new size.
1824 * Then the key for the next leaf is checked to make sure it also has
1825 * a checksum item for the same file. If it does, we know our target leaf
1826 * contains only checksum items, and it can be safely freed without reading
1829 * This is just an optimization targeted at large files. It may do
1830 * nothing. It will return 0 unless things went badly.
1832 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
1833 struct btrfs_root *root,
1834 struct btrfs_path *path,
1835 struct inode *inode, u64 new_size)
1837 struct btrfs_key key;
1840 struct btrfs_key found_key;
1841 struct btrfs_key other_key;
1842 struct btrfs_leaf_ref *ref;
1846 path->lowest_level = 1;
1847 key.objectid = inode->i_ino;
1848 key.type = BTRFS_CSUM_ITEM_KEY;
1849 key.offset = new_size;
1851 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1855 if (path->nodes[1] == NULL) {
1860 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
1861 nritems = btrfs_header_nritems(path->nodes[1]);
1866 if (path->slots[1] >= nritems)
1869 /* did we find a key greater than anything we want to delete? */
1870 if (found_key.objectid > inode->i_ino ||
1871 (found_key.objectid == inode->i_ino && found_key.type > key.type))
1874 /* we check the next key in the node to make sure the leave contains
1875 * only checksum items. This comparison doesn't work if our
1876 * leaf is the last one in the node
1878 if (path->slots[1] + 1 >= nritems) {
1880 /* search forward from the last key in the node, this
1881 * will bring us into the next node in the tree
1883 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
1885 /* unlikely, but we inc below, so check to be safe */
1886 if (found_key.offset == (u64)-1)
1889 /* search_forward needs a path with locks held, do the
1890 * search again for the original key. It is possible
1891 * this will race with a balance and return a path that
1892 * we could modify, but this drop is just an optimization
1893 * and is allowed to miss some leaves.
1895 btrfs_release_path(root, path);
1898 /* setup a max key for search_forward */
1899 other_key.offset = (u64)-1;
1900 other_key.type = key.type;
1901 other_key.objectid = key.objectid;
1903 path->keep_locks = 1;
1904 ret = btrfs_search_forward(root, &found_key, &other_key,
1906 path->keep_locks = 0;
1907 if (ret || found_key.objectid != key.objectid ||
1908 found_key.type != key.type) {
1913 key.offset = found_key.offset;
1914 btrfs_release_path(root, path);
1919 /* we know there's one more slot after us in the tree,
1920 * read that key so we can verify it is also a checksum item
1922 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
1924 if (found_key.objectid < inode->i_ino)
1927 if (found_key.type != key.type || found_key.offset < new_size)
1931 * if the key for the next leaf isn't a csum key from this objectid,
1932 * we can't be sure there aren't good items inside this leaf.
1935 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
1938 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
1939 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
1941 * it is safe to delete this leaf, it contains only
1942 * csum items from this inode at an offset >= new_size
1944 ret = btrfs_del_leaf(trans, root, path, leaf_start);
1947 if (root->ref_cows && leaf_gen < trans->transid) {
1948 ref = btrfs_alloc_leaf_ref(root, 0);
1950 ref->root_gen = root->root_key.offset;
1951 ref->bytenr = leaf_start;
1953 ref->generation = leaf_gen;
1956 ret = btrfs_add_leaf_ref(root, ref, 0);
1958 btrfs_free_leaf_ref(root, ref);
1964 btrfs_release_path(root, path);
1966 if (other_key.objectid == inode->i_ino &&
1967 other_key.type == key.type && other_key.offset > key.offset) {
1968 key.offset = other_key.offset;
1974 /* fixup any changes we've made to the path */
1975 path->lowest_level = 0;
1976 path->keep_locks = 0;
1977 btrfs_release_path(root, path);
1982 * this can truncate away extent items, csum items and directory items.
1983 * It starts at a high offset and removes keys until it can't find
1984 * any higher than new_size
1986 * csum items that cross the new i_size are truncated to the new size
1989 * min_type is the minimum key type to truncate down to. If set to 0, this
1990 * will kill all the items on this inode, including the INODE_ITEM_KEY.
1992 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
1993 struct btrfs_root *root,
1994 struct inode *inode,
1995 u64 new_size, u32 min_type)
1998 struct btrfs_path *path;
1999 struct btrfs_key key;
2000 struct btrfs_key found_key;
2002 struct extent_buffer *leaf;
2003 struct btrfs_file_extent_item *fi;
2004 u64 extent_start = 0;
2005 u64 extent_num_bytes = 0;
2011 int pending_del_nr = 0;
2012 int pending_del_slot = 0;
2013 int extent_type = -1;
2014 u64 mask = root->sectorsize - 1;
2017 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2018 path = btrfs_alloc_path();
2022 /* FIXME, add redo link to tree so we don't leak on crash */
2023 key.objectid = inode->i_ino;
2024 key.offset = (u64)-1;
2027 btrfs_init_path(path);
2029 ret = drop_csum_leaves(trans, root, path, inode, new_size);
2033 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2038 /* there are no items in the tree for us to truncate, we're
2041 if (path->slots[0] == 0) {
2050 leaf = path->nodes[0];
2051 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2052 found_type = btrfs_key_type(&found_key);
2054 if (found_key.objectid != inode->i_ino)
2057 if (found_type < min_type)
2060 item_end = found_key.offset;
2061 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2062 fi = btrfs_item_ptr(leaf, path->slots[0],
2063 struct btrfs_file_extent_item);
2064 extent_type = btrfs_file_extent_type(leaf, fi);
2065 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2067 btrfs_file_extent_num_bytes(leaf, fi);
2068 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2069 item_end += btrfs_file_extent_inline_len(leaf,
2074 if (found_type == BTRFS_CSUM_ITEM_KEY) {
2075 ret = btrfs_csum_truncate(trans, root, path,
2079 if (item_end < new_size) {
2080 if (found_type == BTRFS_DIR_ITEM_KEY) {
2081 found_type = BTRFS_INODE_ITEM_KEY;
2082 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
2083 found_type = BTRFS_CSUM_ITEM_KEY;
2084 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
2085 found_type = BTRFS_XATTR_ITEM_KEY;
2086 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
2087 found_type = BTRFS_INODE_REF_KEY;
2088 } else if (found_type) {
2093 btrfs_set_key_type(&key, found_type);
2096 if (found_key.offset >= new_size)
2102 /* FIXME, shrink the extent if the ref count is only 1 */
2103 if (found_type != BTRFS_EXTENT_DATA_KEY)
2106 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2108 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2110 u64 orig_num_bytes =
2111 btrfs_file_extent_num_bytes(leaf, fi);
2112 extent_num_bytes = new_size -
2113 found_key.offset + root->sectorsize - 1;
2114 extent_num_bytes = extent_num_bytes &
2115 ~((u64)root->sectorsize - 1);
2116 btrfs_set_file_extent_num_bytes(leaf, fi,
2118 num_dec = (orig_num_bytes -
2120 if (root->ref_cows && extent_start != 0)
2121 inode_sub_bytes(inode, num_dec);
2122 btrfs_mark_buffer_dirty(leaf);
2125 btrfs_file_extent_disk_num_bytes(leaf,
2127 /* FIXME blocksize != 4096 */
2128 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2129 if (extent_start != 0) {
2132 inode_sub_bytes(inode, num_dec);
2134 root_gen = btrfs_header_generation(leaf);
2135 root_owner = btrfs_header_owner(leaf);
2137 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2139 * we can't truncate inline items that have had
2143 btrfs_file_extent_compression(leaf, fi) == 0 &&
2144 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2145 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2146 u32 size = new_size - found_key.offset;
2148 if (root->ref_cows) {
2149 inode_sub_bytes(inode, item_end + 1 -
2153 btrfs_file_extent_calc_inline_size(size);
2154 ret = btrfs_truncate_item(trans, root, path,
2157 } else if (root->ref_cows) {
2158 inode_sub_bytes(inode, item_end + 1 -
2164 if (!pending_del_nr) {
2165 /* no pending yet, add ourselves */
2166 pending_del_slot = path->slots[0];
2168 } else if (pending_del_nr &&
2169 path->slots[0] + 1 == pending_del_slot) {
2170 /* hop on the pending chunk */
2172 pending_del_slot = path->slots[0];
2174 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
2180 ret = btrfs_free_extent(trans, root, extent_start,
2182 leaf->start, root_owner,
2183 root_gen, inode->i_ino, 0);
2187 if (path->slots[0] == 0) {
2190 btrfs_release_path(root, path);
2195 if (pending_del_nr &&
2196 path->slots[0] + 1 != pending_del_slot) {
2197 struct btrfs_key debug;
2199 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2201 ret = btrfs_del_items(trans, root, path,
2206 btrfs_release_path(root, path);
2212 if (pending_del_nr) {
2213 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2216 btrfs_free_path(path);
2217 inode->i_sb->s_dirt = 1;
2222 * taken from block_truncate_page, but does cow as it zeros out
2223 * any bytes left in the last page in the file.
2225 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2227 struct inode *inode = mapping->host;
2228 struct btrfs_root *root = BTRFS_I(inode)->root;
2229 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2230 struct btrfs_ordered_extent *ordered;
2232 u32 blocksize = root->sectorsize;
2233 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2234 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2240 if ((offset & (blocksize - 1)) == 0)
2245 page = grab_cache_page(mapping, index);
2249 page_start = page_offset(page);
2250 page_end = page_start + PAGE_CACHE_SIZE - 1;
2252 if (!PageUptodate(page)) {
2253 ret = btrfs_readpage(NULL, page);
2255 if (page->mapping != mapping) {
2257 page_cache_release(page);
2260 if (!PageUptodate(page)) {
2265 wait_on_page_writeback(page);
2267 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2268 set_page_extent_mapped(page);
2270 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2272 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2274 page_cache_release(page);
2275 btrfs_start_ordered_extent(inode, ordered, 1);
2276 btrfs_put_ordered_extent(ordered);
2280 btrfs_set_extent_delalloc(inode, page_start, page_end);
2282 if (offset != PAGE_CACHE_SIZE) {
2284 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2285 flush_dcache_page(page);
2288 ClearPageChecked(page);
2289 set_page_dirty(page);
2290 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2294 page_cache_release(page);
2299 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2301 struct inode *inode = dentry->d_inode;
2304 err = inode_change_ok(inode, attr);
2308 if (S_ISREG(inode->i_mode) &&
2309 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2310 struct btrfs_trans_handle *trans;
2311 struct btrfs_root *root = BTRFS_I(inode)->root;
2312 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2314 u64 mask = root->sectorsize - 1;
2315 u64 hole_start = (inode->i_size + mask) & ~mask;
2316 u64 block_end = (attr->ia_size + mask) & ~mask;
2320 if (attr->ia_size <= hole_start)
2323 err = btrfs_check_free_space(root, 1, 0);
2327 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2329 hole_size = block_end - hole_start;
2331 struct btrfs_ordered_extent *ordered;
2332 btrfs_wait_ordered_range(inode, hole_start, hole_size);
2334 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2335 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2337 unlock_extent(io_tree, hole_start,
2338 block_end - 1, GFP_NOFS);
2339 btrfs_put_ordered_extent(ordered);
2345 trans = btrfs_start_transaction(root, 1);
2346 btrfs_set_trans_block_group(trans, inode);
2347 mutex_lock(&BTRFS_I(inode)->extent_mutex);
2348 err = btrfs_drop_extents(trans, root, inode,
2349 hole_start, block_end, hole_start,
2352 if (alloc_hint != EXTENT_MAP_INLINE) {
2353 err = btrfs_insert_file_extent(trans, root,
2356 hole_size, 0, hole_size,
2358 btrfs_drop_extent_cache(inode, hole_start,
2360 btrfs_check_file(root, inode);
2362 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
2363 btrfs_end_transaction(trans, root);
2364 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2369 err = inode_setattr(inode, attr);
2371 if (!err && ((attr->ia_valid & ATTR_MODE)))
2372 err = btrfs_acl_chmod(inode);
2377 void btrfs_delete_inode(struct inode *inode)
2379 struct btrfs_trans_handle *trans;
2380 struct btrfs_root *root = BTRFS_I(inode)->root;
2384 truncate_inode_pages(&inode->i_data, 0);
2385 if (is_bad_inode(inode)) {
2386 btrfs_orphan_del(NULL, inode);
2389 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2391 btrfs_i_size_write(inode, 0);
2392 trans = btrfs_start_transaction(root, 1);
2394 btrfs_set_trans_block_group(trans, inode);
2395 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2397 btrfs_orphan_del(NULL, inode);
2398 goto no_delete_lock;
2401 btrfs_orphan_del(trans, inode);
2403 nr = trans->blocks_used;
2406 btrfs_end_transaction(trans, root);
2407 btrfs_btree_balance_dirty(root, nr);
2411 nr = trans->blocks_used;
2412 btrfs_end_transaction(trans, root);
2413 btrfs_btree_balance_dirty(root, nr);
2419 * this returns the key found in the dir entry in the location pointer.
2420 * If no dir entries were found, location->objectid is 0.
2422 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2423 struct btrfs_key *location)
2425 const char *name = dentry->d_name.name;
2426 int namelen = dentry->d_name.len;
2427 struct btrfs_dir_item *di;
2428 struct btrfs_path *path;
2429 struct btrfs_root *root = BTRFS_I(dir)->root;
2432 path = btrfs_alloc_path();
2435 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2439 if (!di || IS_ERR(di)) {
2442 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2444 btrfs_free_path(path);
2447 location->objectid = 0;
2452 * when we hit a tree root in a directory, the btrfs part of the inode
2453 * needs to be changed to reflect the root directory of the tree root. This
2454 * is kind of like crossing a mount point.
2456 static int fixup_tree_root_location(struct btrfs_root *root,
2457 struct btrfs_key *location,
2458 struct btrfs_root **sub_root,
2459 struct dentry *dentry)
2461 struct btrfs_root_item *ri;
2463 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2465 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2468 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2469 dentry->d_name.name,
2470 dentry->d_name.len);
2471 if (IS_ERR(*sub_root))
2472 return PTR_ERR(*sub_root);
2474 ri = &(*sub_root)->root_item;
2475 location->objectid = btrfs_root_dirid(ri);
2476 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2477 location->offset = 0;
2482 static noinline void init_btrfs_i(struct inode *inode)
2484 struct btrfs_inode *bi = BTRFS_I(inode);
2487 bi->i_default_acl = NULL;
2491 bi->logged_trans = 0;
2492 bi->delalloc_bytes = 0;
2493 bi->disk_i_size = 0;
2495 bi->index_cnt = (u64)-1;
2496 bi->log_dirty_trans = 0;
2497 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2498 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2499 inode->i_mapping, GFP_NOFS);
2500 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2501 inode->i_mapping, GFP_NOFS);
2502 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
2503 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
2504 mutex_init(&BTRFS_I(inode)->csum_mutex);
2505 mutex_init(&BTRFS_I(inode)->extent_mutex);
2506 mutex_init(&BTRFS_I(inode)->log_mutex);
2509 static int btrfs_init_locked_inode(struct inode *inode, void *p)
2511 struct btrfs_iget_args *args = p;
2512 inode->i_ino = args->ino;
2513 init_btrfs_i(inode);
2514 BTRFS_I(inode)->root = args->root;
2518 static int btrfs_find_actor(struct inode *inode, void *opaque)
2520 struct btrfs_iget_args *args = opaque;
2521 return (args->ino == inode->i_ino &&
2522 args->root == BTRFS_I(inode)->root);
2525 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
2526 struct btrfs_root *root, int wait)
2528 struct inode *inode;
2529 struct btrfs_iget_args args;
2530 args.ino = objectid;
2534 inode = ilookup5(s, objectid, btrfs_find_actor,
2537 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
2543 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
2544 struct btrfs_root *root)
2546 struct inode *inode;
2547 struct btrfs_iget_args args;
2548 args.ino = objectid;
2551 inode = iget5_locked(s, objectid, btrfs_find_actor,
2552 btrfs_init_locked_inode,
2557 /* Get an inode object given its location and corresponding root.
2558 * Returns in *is_new if the inode was read from disk
2560 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
2561 struct btrfs_root *root, int *is_new)
2563 struct inode *inode;
2565 inode = btrfs_iget_locked(s, location->objectid, root);
2567 return ERR_PTR(-EACCES);
2569 if (inode->i_state & I_NEW) {
2570 BTRFS_I(inode)->root = root;
2571 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
2572 btrfs_read_locked_inode(inode);
2573 unlock_new_inode(inode);
2584 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
2585 struct nameidata *nd)
2587 struct inode * inode;
2588 struct btrfs_inode *bi = BTRFS_I(dir);
2589 struct btrfs_root *root = bi->root;
2590 struct btrfs_root *sub_root = root;
2591 struct btrfs_key location;
2592 int ret, new, do_orphan = 0;
2594 if (dentry->d_name.len > BTRFS_NAME_LEN)
2595 return ERR_PTR(-ENAMETOOLONG);
2597 ret = btrfs_inode_by_name(dir, dentry, &location);
2600 return ERR_PTR(ret);
2603 if (location.objectid) {
2604 ret = fixup_tree_root_location(root, &location, &sub_root,
2607 return ERR_PTR(ret);
2609 return ERR_PTR(-ENOENT);
2610 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
2612 return ERR_CAST(inode);
2614 /* the inode and parent dir are two different roots */
2615 if (new && root != sub_root) {
2617 sub_root->inode = inode;
2622 if (unlikely(do_orphan))
2623 btrfs_orphan_cleanup(sub_root);
2625 return d_splice_alias(inode, dentry);
2628 static unsigned char btrfs_filetype_table[] = {
2629 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
2632 static int btrfs_real_readdir(struct file *filp, void *dirent,
2635 struct inode *inode = filp->f_dentry->d_inode;
2636 struct btrfs_root *root = BTRFS_I(inode)->root;
2637 struct btrfs_item *item;
2638 struct btrfs_dir_item *di;
2639 struct btrfs_key key;
2640 struct btrfs_key found_key;
2641 struct btrfs_path *path;
2644 struct extent_buffer *leaf;
2647 unsigned char d_type;
2652 int key_type = BTRFS_DIR_INDEX_KEY;
2657 /* FIXME, use a real flag for deciding about the key type */
2658 if (root->fs_info->tree_root == root)
2659 key_type = BTRFS_DIR_ITEM_KEY;
2661 /* special case for "." */
2662 if (filp->f_pos == 0) {
2663 over = filldir(dirent, ".", 1,
2670 /* special case for .., just use the back ref */
2671 if (filp->f_pos == 1) {
2672 u64 pino = parent_ino(filp->f_path.dentry);
2673 over = filldir(dirent, "..", 2,
2680 path = btrfs_alloc_path();
2683 btrfs_set_key_type(&key, key_type);
2684 key.offset = filp->f_pos;
2685 key.objectid = inode->i_ino;
2687 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2693 leaf = path->nodes[0];
2694 nritems = btrfs_header_nritems(leaf);
2695 slot = path->slots[0];
2696 if (advance || slot >= nritems) {
2697 if (slot >= nritems - 1) {
2698 ret = btrfs_next_leaf(root, path);
2701 leaf = path->nodes[0];
2702 nritems = btrfs_header_nritems(leaf);
2703 slot = path->slots[0];
2710 item = btrfs_item_nr(leaf, slot);
2711 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2713 if (found_key.objectid != key.objectid)
2715 if (btrfs_key_type(&found_key) != key_type)
2717 if (found_key.offset < filp->f_pos)
2720 filp->f_pos = found_key.offset;
2722 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
2724 di_total = btrfs_item_size(leaf, item);
2726 while (di_cur < di_total) {
2727 struct btrfs_key location;
2729 name_len = btrfs_dir_name_len(leaf, di);
2730 if (name_len <= sizeof(tmp_name)) {
2731 name_ptr = tmp_name;
2733 name_ptr = kmalloc(name_len, GFP_NOFS);
2739 read_extent_buffer(leaf, name_ptr,
2740 (unsigned long)(di + 1), name_len);
2742 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
2743 btrfs_dir_item_key_to_cpu(leaf, di, &location);
2744 over = filldir(dirent, name_ptr, name_len,
2745 found_key.offset, location.objectid,
2748 if (name_ptr != tmp_name)
2754 di_len = btrfs_dir_name_len(leaf, di) +
2755 btrfs_dir_data_len(leaf, di) + sizeof(*di);
2757 di = (struct btrfs_dir_item *)((char *)di + di_len);
2761 /* Reached end of directory/root. Bump pos past the last item. */
2762 if (key_type == BTRFS_DIR_INDEX_KEY)
2763 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
2769 btrfs_free_path(path);
2773 int btrfs_write_inode(struct inode *inode, int wait)
2775 struct btrfs_root *root = BTRFS_I(inode)->root;
2776 struct btrfs_trans_handle *trans;
2779 if (root->fs_info->closing > 1)
2783 trans = btrfs_join_transaction(root, 1);
2784 btrfs_set_trans_block_group(trans, inode);
2785 ret = btrfs_commit_transaction(trans, root);
2791 * This is somewhat expensive, updating the tree every time the
2792 * inode changes. But, it is most likely to find the inode in cache.
2793 * FIXME, needs more benchmarking...there are no reasons other than performance
2794 * to keep or drop this code.
2796 void btrfs_dirty_inode(struct inode *inode)
2798 struct btrfs_root *root = BTRFS_I(inode)->root;
2799 struct btrfs_trans_handle *trans;
2801 trans = btrfs_join_transaction(root, 1);
2802 btrfs_set_trans_block_group(trans, inode);
2803 btrfs_update_inode(trans, root, inode);
2804 btrfs_end_transaction(trans, root);
2808 * find the highest existing sequence number in a directory
2809 * and then set the in-memory index_cnt variable to reflect
2810 * free sequence numbers
2812 static int btrfs_set_inode_index_count(struct inode *inode)
2814 struct btrfs_root *root = BTRFS_I(inode)->root;
2815 struct btrfs_key key, found_key;
2816 struct btrfs_path *path;
2817 struct extent_buffer *leaf;
2820 key.objectid = inode->i_ino;
2821 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
2822 key.offset = (u64)-1;
2824 path = btrfs_alloc_path();
2828 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2831 /* FIXME: we should be able to handle this */
2837 * MAGIC NUMBER EXPLANATION:
2838 * since we search a directory based on f_pos we have to start at 2
2839 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
2840 * else has to start at 2
2842 if (path->slots[0] == 0) {
2843 BTRFS_I(inode)->index_cnt = 2;
2849 leaf = path->nodes[0];
2850 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2852 if (found_key.objectid != inode->i_ino ||
2853 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
2854 BTRFS_I(inode)->index_cnt = 2;
2858 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
2860 btrfs_free_path(path);
2865 * helper to find a free sequence number in a given directory. This current
2866 * code is very simple, later versions will do smarter things in the btree
2868 static int btrfs_set_inode_index(struct inode *dir, struct inode *inode,
2873 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
2874 ret = btrfs_set_inode_index_count(dir);
2880 *index = BTRFS_I(dir)->index_cnt;
2881 BTRFS_I(dir)->index_cnt++;
2886 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
2887 struct btrfs_root *root,
2889 const char *name, int name_len,
2892 struct btrfs_block_group_cache *group,
2893 int mode, u64 *index)
2895 struct inode *inode;
2896 struct btrfs_inode_item *inode_item;
2897 struct btrfs_block_group_cache *new_inode_group;
2898 struct btrfs_key *location;
2899 struct btrfs_path *path;
2900 struct btrfs_inode_ref *ref;
2901 struct btrfs_key key[2];
2907 path = btrfs_alloc_path();
2910 inode = new_inode(root->fs_info->sb);
2912 return ERR_PTR(-ENOMEM);
2915 ret = btrfs_set_inode_index(dir, inode, index);
2917 return ERR_PTR(ret);
2920 * index_cnt is ignored for everything but a dir,
2921 * btrfs_get_inode_index_count has an explanation for the magic
2924 init_btrfs_i(inode);
2925 BTRFS_I(inode)->index_cnt = 2;
2926 BTRFS_I(inode)->root = root;
2927 BTRFS_I(inode)->generation = trans->transid;
2933 new_inode_group = btrfs_find_block_group(root, group, 0,
2934 BTRFS_BLOCK_GROUP_METADATA, owner);
2935 if (!new_inode_group) {
2936 printk("find_block group failed\n");
2937 new_inode_group = group;
2939 BTRFS_I(inode)->block_group = new_inode_group;
2941 key[0].objectid = objectid;
2942 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
2945 key[1].objectid = objectid;
2946 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
2947 key[1].offset = ref_objectid;
2949 sizes[0] = sizeof(struct btrfs_inode_item);
2950 sizes[1] = name_len + sizeof(*ref);
2952 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
2956 if (objectid > root->highest_inode)
2957 root->highest_inode = objectid;
2959 inode->i_uid = current->fsuid;
2960 inode->i_gid = current->fsgid;
2961 inode->i_mode = mode;
2962 inode->i_ino = objectid;
2963 inode_set_bytes(inode, 0);
2964 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
2965 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2966 struct btrfs_inode_item);
2967 fill_inode_item(trans, path->nodes[0], inode_item, inode);
2969 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2970 struct btrfs_inode_ref);
2971 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
2972 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
2973 ptr = (unsigned long)(ref + 1);
2974 write_extent_buffer(path->nodes[0], name, ptr, name_len);
2976 btrfs_mark_buffer_dirty(path->nodes[0]);
2977 btrfs_free_path(path);
2979 location = &BTRFS_I(inode)->location;
2980 location->objectid = objectid;
2981 location->offset = 0;
2982 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2984 insert_inode_hash(inode);
2988 BTRFS_I(dir)->index_cnt--;
2989 btrfs_free_path(path);
2990 return ERR_PTR(ret);
2993 static inline u8 btrfs_inode_type(struct inode *inode)
2995 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
2999 * utility function to add 'inode' into 'parent_inode' with
3000 * a give name and a given sequence number.
3001 * if 'add_backref' is true, also insert a backref from the
3002 * inode to the parent directory.
3004 int btrfs_add_link(struct btrfs_trans_handle *trans,
3005 struct inode *parent_inode, struct inode *inode,
3006 const char *name, int name_len, int add_backref, u64 index)
3009 struct btrfs_key key;
3010 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3012 key.objectid = inode->i_ino;
3013 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3016 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3017 parent_inode->i_ino,
3018 &key, btrfs_inode_type(inode),
3022 ret = btrfs_insert_inode_ref(trans, root,
3025 parent_inode->i_ino,
3028 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3030 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3031 ret = btrfs_update_inode(trans, root, parent_inode);
3036 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3037 struct dentry *dentry, struct inode *inode,
3038 int backref, u64 index)
3040 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3041 inode, dentry->d_name.name,
3042 dentry->d_name.len, backref, index);
3044 d_instantiate(dentry, inode);
3052 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3053 int mode, dev_t rdev)
3055 struct btrfs_trans_handle *trans;
3056 struct btrfs_root *root = BTRFS_I(dir)->root;
3057 struct inode *inode = NULL;
3061 unsigned long nr = 0;
3064 if (!new_valid_dev(rdev))
3067 err = btrfs_check_free_space(root, 1, 0);
3071 trans = btrfs_start_transaction(root, 1);
3072 btrfs_set_trans_block_group(trans, dir);
3074 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3080 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3082 dentry->d_parent->d_inode->i_ino, objectid,
3083 BTRFS_I(dir)->block_group, mode, &index);
3084 err = PTR_ERR(inode);
3088 err = btrfs_init_acl(inode, dir);
3094 btrfs_set_trans_block_group(trans, inode);
3095 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3099 inode->i_op = &btrfs_special_inode_operations;
3100 init_special_inode(inode, inode->i_mode, rdev);
3101 btrfs_update_inode(trans, root, inode);
3103 dir->i_sb->s_dirt = 1;
3104 btrfs_update_inode_block_group(trans, inode);
3105 btrfs_update_inode_block_group(trans, dir);
3107 nr = trans->blocks_used;
3108 btrfs_end_transaction_throttle(trans, root);
3111 inode_dec_link_count(inode);
3114 btrfs_btree_balance_dirty(root, nr);
3118 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3119 int mode, struct nameidata *nd)
3121 struct btrfs_trans_handle *trans;
3122 struct btrfs_root *root = BTRFS_I(dir)->root;
3123 struct inode *inode = NULL;
3126 unsigned long nr = 0;
3130 err = btrfs_check_free_space(root, 1, 0);
3133 trans = btrfs_start_transaction(root, 1);
3134 btrfs_set_trans_block_group(trans, dir);
3136 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3142 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3144 dentry->d_parent->d_inode->i_ino,
3145 objectid, BTRFS_I(dir)->block_group, mode,
3147 err = PTR_ERR(inode);
3151 err = btrfs_init_acl(inode, dir);
3157 btrfs_set_trans_block_group(trans, inode);
3158 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3162 inode->i_mapping->a_ops = &btrfs_aops;
3163 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3164 inode->i_fop = &btrfs_file_operations;
3165 inode->i_op = &btrfs_file_inode_operations;
3166 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3168 dir->i_sb->s_dirt = 1;
3169 btrfs_update_inode_block_group(trans, inode);
3170 btrfs_update_inode_block_group(trans, dir);
3172 nr = trans->blocks_used;
3173 btrfs_end_transaction_throttle(trans, root);
3176 inode_dec_link_count(inode);
3179 btrfs_btree_balance_dirty(root, nr);
3183 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3184 struct dentry *dentry)
3186 struct btrfs_trans_handle *trans;
3187 struct btrfs_root *root = BTRFS_I(dir)->root;
3188 struct inode *inode = old_dentry->d_inode;
3190 unsigned long nr = 0;
3194 if (inode->i_nlink == 0)
3197 btrfs_inc_nlink(inode);
3198 err = btrfs_check_free_space(root, 1, 0);
3201 err = btrfs_set_inode_index(dir, inode, &index);
3205 trans = btrfs_start_transaction(root, 1);
3207 btrfs_set_trans_block_group(trans, dir);
3208 atomic_inc(&inode->i_count);
3210 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3215 dir->i_sb->s_dirt = 1;
3216 btrfs_update_inode_block_group(trans, dir);
3217 err = btrfs_update_inode(trans, root, inode);
3222 nr = trans->blocks_used;
3223 btrfs_end_transaction_throttle(trans, root);
3226 inode_dec_link_count(inode);
3229 btrfs_btree_balance_dirty(root, nr);
3233 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3235 struct inode *inode = NULL;
3236 struct btrfs_trans_handle *trans;
3237 struct btrfs_root *root = BTRFS_I(dir)->root;
3239 int drop_on_err = 0;
3242 unsigned long nr = 1;
3244 err = btrfs_check_free_space(root, 1, 0);
3248 trans = btrfs_start_transaction(root, 1);
3249 btrfs_set_trans_block_group(trans, dir);
3251 if (IS_ERR(trans)) {
3252 err = PTR_ERR(trans);
3256 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3262 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3264 dentry->d_parent->d_inode->i_ino, objectid,
3265 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3267 if (IS_ERR(inode)) {
3268 err = PTR_ERR(inode);
3274 err = btrfs_init_acl(inode, dir);
3278 inode->i_op = &btrfs_dir_inode_operations;
3279 inode->i_fop = &btrfs_dir_file_operations;
3280 btrfs_set_trans_block_group(trans, inode);
3282 btrfs_i_size_write(inode, 0);
3283 err = btrfs_update_inode(trans, root, inode);
3287 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3288 inode, dentry->d_name.name,
3289 dentry->d_name.len, 0, index);
3293 d_instantiate(dentry, inode);
3295 dir->i_sb->s_dirt = 1;
3296 btrfs_update_inode_block_group(trans, inode);
3297 btrfs_update_inode_block_group(trans, dir);
3300 nr = trans->blocks_used;
3301 btrfs_end_transaction_throttle(trans, root);
3306 btrfs_btree_balance_dirty(root, nr);
3310 /* helper for btfs_get_extent. Given an existing extent in the tree,
3311 * and an extent that you want to insert, deal with overlap and insert
3312 * the new extent into the tree.
3314 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3315 struct extent_map *existing,
3316 struct extent_map *em,
3317 u64 map_start, u64 map_len)
3321 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3322 start_diff = map_start - em->start;
3323 em->start = map_start;
3325 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3326 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3327 em->block_start += start_diff;
3328 em->block_len -= start_diff;
3330 return add_extent_mapping(em_tree, em);
3333 static noinline int uncompress_inline(struct btrfs_path *path,
3334 struct inode *inode, struct page *page,
3335 size_t pg_offset, u64 extent_offset,
3336 struct btrfs_file_extent_item *item)
3339 struct extent_buffer *leaf = path->nodes[0];
3342 unsigned long inline_size;
3345 WARN_ON(pg_offset != 0);
3346 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3347 inline_size = btrfs_file_extent_inline_item_len(leaf,
3348 btrfs_item_nr(leaf, path->slots[0]));
3349 tmp = kmalloc(inline_size, GFP_NOFS);
3350 ptr = btrfs_file_extent_inline_start(item);
3352 read_extent_buffer(leaf, tmp, ptr, inline_size);
3354 max_size = min(PAGE_CACHE_SIZE, max_size);
3355 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3356 inline_size, max_size);
3358 char *kaddr = kmap_atomic(page, KM_USER0);
3359 unsigned long copy_size = min_t(u64,
3360 PAGE_CACHE_SIZE - pg_offset,
3361 max_size - extent_offset);
3362 memset(kaddr + pg_offset, 0, copy_size);
3363 kunmap_atomic(kaddr, KM_USER0);
3370 * a bit scary, this does extent mapping from logical file offset to the disk.
3371 * the ugly parts come from merging extents from the disk with the
3372 * in-ram representation. This gets more complex because of the data=ordered code,
3373 * where the in-ram extents might be locked pending data=ordered completion.
3375 * This also copies inline extents directly into the page.
3377 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3378 size_t pg_offset, u64 start, u64 len,
3384 u64 extent_start = 0;
3386 u64 objectid = inode->i_ino;
3388 struct btrfs_path *path = NULL;
3389 struct btrfs_root *root = BTRFS_I(inode)->root;
3390 struct btrfs_file_extent_item *item;
3391 struct extent_buffer *leaf;
3392 struct btrfs_key found_key;
3393 struct extent_map *em = NULL;
3394 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3395 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3396 struct btrfs_trans_handle *trans = NULL;
3400 spin_lock(&em_tree->lock);
3401 em = lookup_extent_mapping(em_tree, start, len);
3403 em->bdev = root->fs_info->fs_devices->latest_bdev;
3404 spin_unlock(&em_tree->lock);
3407 if (em->start > start || em->start + em->len <= start)
3408 free_extent_map(em);
3409 else if (em->block_start == EXTENT_MAP_INLINE && page)
3410 free_extent_map(em);
3414 em = alloc_extent_map(GFP_NOFS);
3419 em->bdev = root->fs_info->fs_devices->latest_bdev;
3420 em->start = EXTENT_MAP_HOLE;
3422 em->block_len = (u64)-1;
3425 path = btrfs_alloc_path();
3429 ret = btrfs_lookup_file_extent(trans, root, path,
3430 objectid, start, trans != NULL);
3437 if (path->slots[0] == 0)
3442 leaf = path->nodes[0];
3443 item = btrfs_item_ptr(leaf, path->slots[0],
3444 struct btrfs_file_extent_item);
3445 /* are we inside the extent that was found? */
3446 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3447 found_type = btrfs_key_type(&found_key);
3448 if (found_key.objectid != objectid ||
3449 found_type != BTRFS_EXTENT_DATA_KEY) {
3453 found_type = btrfs_file_extent_type(leaf, item);
3454 extent_start = found_key.offset;
3455 compressed = btrfs_file_extent_compression(leaf, item);
3456 if (found_type == BTRFS_FILE_EXTENT_REG) {
3457 extent_end = extent_start +
3458 btrfs_file_extent_num_bytes(leaf, item);
3460 if (start < extent_start || start >= extent_end) {
3462 if (start < extent_start) {
3463 if (start + len <= extent_start)
3465 em->len = extent_end - extent_start;
3471 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
3473 em->start = extent_start;
3474 em->len = extent_end - extent_start;
3475 em->block_start = EXTENT_MAP_HOLE;
3478 em->start = extent_start;
3479 em->len = extent_end - extent_start;
3481 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3482 em->block_start = bytenr;
3483 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
3486 bytenr += btrfs_file_extent_offset(leaf, item);
3487 em->block_start = bytenr;
3488 em->block_len = em->len;
3491 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3496 size_t extent_offset;
3499 size = btrfs_file_extent_inline_len(leaf, item);
3500 extent_end = (extent_start + size + root->sectorsize - 1) &
3501 ~((u64)root->sectorsize - 1);
3502 if (start < extent_start || start >= extent_end) {
3504 if (start < extent_start) {
3505 if (start + len <= extent_start)
3507 em->len = extent_end - extent_start;
3513 em->block_start = EXTENT_MAP_INLINE;
3515 if (!page || create) {
3516 em->start = extent_start;
3517 em->len = (size + root->sectorsize - 1) &
3518 ~((u64)root->sectorsize - 1);
3522 page_start = page_offset(page) + pg_offset;
3523 extent_offset = page_start - extent_start;
3524 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
3525 size - extent_offset);
3526 em->start = extent_start + extent_offset;
3527 em->len = (copy_size + root->sectorsize - 1) &
3528 ~((u64)root->sectorsize - 1);
3530 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3531 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
3532 if (create == 0 && !PageUptodate(page)) {
3533 if (btrfs_file_extent_compression(leaf, item) ==
3534 BTRFS_COMPRESS_ZLIB) {
3535 ret = uncompress_inline(path, inode, page,
3537 extent_offset, item);
3541 read_extent_buffer(leaf, map + pg_offset, ptr,
3545 flush_dcache_page(page);
3546 } else if (create && PageUptodate(page)) {
3549 free_extent_map(em);
3551 btrfs_release_path(root, path);
3552 trans = btrfs_join_transaction(root, 1);
3556 write_extent_buffer(leaf, map + pg_offset, ptr,
3559 btrfs_mark_buffer_dirty(leaf);
3561 set_extent_uptodate(io_tree, em->start,
3562 extent_map_end(em) - 1, GFP_NOFS);
3565 printk("unkknown found_type %d\n", found_type);
3572 em->block_start = EXTENT_MAP_HOLE;
3574 btrfs_release_path(root, path);
3575 if (em->start > start || extent_map_end(em) <= start) {
3576 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
3582 spin_lock(&em_tree->lock);
3583 ret = add_extent_mapping(em_tree, em);
3584 /* it is possible that someone inserted the extent into the tree
3585 * while we had the lock dropped. It is also possible that
3586 * an overlapping map exists in the tree
3588 if (ret == -EEXIST) {
3589 struct extent_map *existing;
3593 existing = lookup_extent_mapping(em_tree, start, len);
3594 if (existing && (existing->start > start ||
3595 existing->start + existing->len <= start)) {
3596 free_extent_map(existing);
3600 existing = lookup_extent_mapping(em_tree, em->start,
3603 err = merge_extent_mapping(em_tree, existing,
3606 free_extent_map(existing);
3608 free_extent_map(em);
3613 printk("failing to insert %Lu %Lu\n",
3615 free_extent_map(em);
3619 free_extent_map(em);
3624 spin_unlock(&em_tree->lock);
3627 btrfs_free_path(path);
3629 ret = btrfs_end_transaction(trans, root);
3635 free_extent_map(em);
3637 return ERR_PTR(err);
3642 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
3643 const struct iovec *iov, loff_t offset,
3644 unsigned long nr_segs)
3649 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
3651 return extent_bmap(mapping, iblock, btrfs_get_extent);
3654 int btrfs_readpage(struct file *file, struct page *page)
3656 struct extent_io_tree *tree;
3657 tree = &BTRFS_I(page->mapping->host)->io_tree;
3658 return extent_read_full_page(tree, page, btrfs_get_extent);
3661 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
3663 struct extent_io_tree *tree;
3666 if (current->flags & PF_MEMALLOC) {
3667 redirty_page_for_writepage(wbc, page);
3671 tree = &BTRFS_I(page->mapping->host)->io_tree;
3672 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
3675 int btrfs_writepages(struct address_space *mapping,
3676 struct writeback_control *wbc)
3678 struct extent_io_tree *tree;
3679 tree = &BTRFS_I(mapping->host)->io_tree;
3680 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
3684 btrfs_readpages(struct file *file, struct address_space *mapping,
3685 struct list_head *pages, unsigned nr_pages)
3687 struct extent_io_tree *tree;
3688 tree = &BTRFS_I(mapping->host)->io_tree;
3689 return extent_readpages(tree, mapping, pages, nr_pages,
3692 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3694 struct extent_io_tree *tree;
3695 struct extent_map_tree *map;
3698 tree = &BTRFS_I(page->mapping->host)->io_tree;
3699 map = &BTRFS_I(page->mapping->host)->extent_tree;
3700 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
3702 ClearPagePrivate(page);
3703 set_page_private(page, 0);
3704 page_cache_release(page);
3709 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3711 if (PageWriteback(page) || PageDirty(page))
3713 return __btrfs_releasepage(page, gfp_flags);
3716 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
3718 struct extent_io_tree *tree;
3719 struct btrfs_ordered_extent *ordered;
3720 u64 page_start = page_offset(page);
3721 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
3723 wait_on_page_writeback(page);
3724 tree = &BTRFS_I(page->mapping->host)->io_tree;
3726 btrfs_releasepage(page, GFP_NOFS);
3730 lock_extent(tree, page_start, page_end, GFP_NOFS);
3731 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
3735 * IO on this page will never be started, so we need
3736 * to account for any ordered extents now
3738 clear_extent_bit(tree, page_start, page_end,
3739 EXTENT_DIRTY | EXTENT_DELALLOC |
3740 EXTENT_LOCKED, 1, 0, GFP_NOFS);
3741 btrfs_finish_ordered_io(page->mapping->host,
3742 page_start, page_end);
3743 btrfs_put_ordered_extent(ordered);
3744 lock_extent(tree, page_start, page_end, GFP_NOFS);
3746 clear_extent_bit(tree, page_start, page_end,
3747 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3750 __btrfs_releasepage(page, GFP_NOFS);
3752 ClearPageChecked(page);
3753 if (PagePrivate(page)) {
3754 ClearPagePrivate(page);
3755 set_page_private(page, 0);
3756 page_cache_release(page);
3761 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
3762 * called from a page fault handler when a page is first dirtied. Hence we must
3763 * be careful to check for EOF conditions here. We set the page up correctly
3764 * for a written page which means we get ENOSPC checking when writing into
3765 * holes and correct delalloc and unwritten extent mapping on filesystems that
3766 * support these features.
3768 * We are not allowed to take the i_mutex here so we have to play games to
3769 * protect against truncate races as the page could now be beyond EOF. Because
3770 * vmtruncate() writes the inode size before removing pages, once we have the
3771 * page lock we can determine safely if the page is beyond EOF. If it is not
3772 * beyond EOF, then the page is guaranteed safe against truncation until we
3775 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
3777 struct inode *inode = fdentry(vma->vm_file)->d_inode;
3778 struct btrfs_root *root = BTRFS_I(inode)->root;
3779 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3780 struct btrfs_ordered_extent *ordered;
3782 unsigned long zero_start;
3788 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
3795 size = i_size_read(inode);
3796 page_start = page_offset(page);
3797 page_end = page_start + PAGE_CACHE_SIZE - 1;
3799 if ((page->mapping != inode->i_mapping) ||
3800 (page_start >= size)) {
3801 /* page got truncated out from underneath us */
3804 wait_on_page_writeback(page);
3806 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3807 set_page_extent_mapped(page);
3810 * we can't set the delalloc bits if there are pending ordered
3811 * extents. Drop our locks and wait for them to finish
3813 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3815 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3817 btrfs_start_ordered_extent(inode, ordered, 1);
3818 btrfs_put_ordered_extent(ordered);
3822 btrfs_set_extent_delalloc(inode, page_start, page_end);
3825 /* page is wholly or partially inside EOF */
3826 if (page_start + PAGE_CACHE_SIZE > size)
3827 zero_start = size & ~PAGE_CACHE_MASK;
3829 zero_start = PAGE_CACHE_SIZE;
3831 if (zero_start != PAGE_CACHE_SIZE) {
3833 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
3834 flush_dcache_page(page);
3837 ClearPageChecked(page);
3838 set_page_dirty(page);
3839 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3847 static void btrfs_truncate(struct inode *inode)
3849 struct btrfs_root *root = BTRFS_I(inode)->root;
3851 struct btrfs_trans_handle *trans;
3853 u64 mask = root->sectorsize - 1;
3855 if (!S_ISREG(inode->i_mode))
3857 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3860 btrfs_truncate_page(inode->i_mapping, inode->i_size);
3861 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
3863 trans = btrfs_start_transaction(root, 1);
3864 btrfs_set_trans_block_group(trans, inode);
3865 btrfs_i_size_write(inode, inode->i_size);
3867 ret = btrfs_orphan_add(trans, inode);
3870 /* FIXME, add redo link to tree so we don't leak on crash */
3871 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
3872 BTRFS_EXTENT_DATA_KEY);
3873 btrfs_update_inode(trans, root, inode);
3875 ret = btrfs_orphan_del(trans, inode);
3879 nr = trans->blocks_used;
3880 ret = btrfs_end_transaction_throttle(trans, root);
3882 btrfs_btree_balance_dirty(root, nr);
3886 * Invalidate a single dcache entry at the root of the filesystem.
3887 * Needed after creation of snapshot or subvolume.
3889 void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name,
3892 struct dentry *alias, *entry;
3895 alias = d_find_alias(root->fs_info->sb->s_root->d_inode);
3899 /* change me if btrfs ever gets a d_hash operation */
3900 qstr.hash = full_name_hash(qstr.name, qstr.len);
3901 entry = d_lookup(alias, &qstr);
3904 d_invalidate(entry);
3911 * create a new subvolume directory/inode (helper for the ioctl).
3913 int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry,
3914 struct btrfs_trans_handle *trans, u64 new_dirid,
3915 struct btrfs_block_group_cache *block_group)
3917 struct inode *inode;
3921 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
3922 new_dirid, block_group, S_IFDIR | 0700, &index);
3924 return PTR_ERR(inode);
3925 inode->i_op = &btrfs_dir_inode_operations;
3926 inode->i_fop = &btrfs_dir_file_operations;
3927 new_root->inode = inode;
3930 btrfs_i_size_write(inode, 0);
3932 error = btrfs_update_inode(trans, new_root, inode);
3936 d_instantiate(dentry, inode);
3940 /* helper function for file defrag and space balancing. This
3941 * forces readahead on a given range of bytes in an inode
3943 unsigned long btrfs_force_ra(struct address_space *mapping,
3944 struct file_ra_state *ra, struct file *file,
3945 pgoff_t offset, pgoff_t last_index)
3947 pgoff_t req_size = last_index - offset + 1;
3949 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
3950 return offset + req_size;
3953 struct inode *btrfs_alloc_inode(struct super_block *sb)
3955 struct btrfs_inode *ei;
3957 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
3961 ei->logged_trans = 0;
3962 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
3963 ei->i_acl = BTRFS_ACL_NOT_CACHED;
3964 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
3965 INIT_LIST_HEAD(&ei->i_orphan);
3966 return &ei->vfs_inode;
3969 void btrfs_destroy_inode(struct inode *inode)
3971 struct btrfs_ordered_extent *ordered;
3972 WARN_ON(!list_empty(&inode->i_dentry));
3973 WARN_ON(inode->i_data.nrpages);
3975 if (BTRFS_I(inode)->i_acl &&
3976 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
3977 posix_acl_release(BTRFS_I(inode)->i_acl);
3978 if (BTRFS_I(inode)->i_default_acl &&
3979 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
3980 posix_acl_release(BTRFS_I(inode)->i_default_acl);
3982 spin_lock(&BTRFS_I(inode)->root->list_lock);
3983 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
3984 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
3985 " list\n", inode->i_ino);
3988 spin_unlock(&BTRFS_I(inode)->root->list_lock);
3991 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
3995 printk("found ordered extent %Lu %Lu\n",
3996 ordered->file_offset, ordered->len);
3997 btrfs_remove_ordered_extent(inode, ordered);
3998 btrfs_put_ordered_extent(ordered);
3999 btrfs_put_ordered_extent(ordered);
4002 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4003 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4006 static void init_once(void *foo)
4008 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4010 inode_init_once(&ei->vfs_inode);
4013 void btrfs_destroy_cachep(void)
4015 if (btrfs_inode_cachep)
4016 kmem_cache_destroy(btrfs_inode_cachep);
4017 if (btrfs_trans_handle_cachep)
4018 kmem_cache_destroy(btrfs_trans_handle_cachep);
4019 if (btrfs_transaction_cachep)
4020 kmem_cache_destroy(btrfs_transaction_cachep);
4021 if (btrfs_bit_radix_cachep)
4022 kmem_cache_destroy(btrfs_bit_radix_cachep);
4023 if (btrfs_path_cachep)
4024 kmem_cache_destroy(btrfs_path_cachep);
4027 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4028 unsigned long extra_flags,
4029 void (*ctor)(void *))
4031 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4032 SLAB_MEM_SPREAD | extra_flags), ctor);
4035 int btrfs_init_cachep(void)
4037 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4038 sizeof(struct btrfs_inode),
4040 if (!btrfs_inode_cachep)
4042 btrfs_trans_handle_cachep =
4043 btrfs_cache_create("btrfs_trans_handle_cache",
4044 sizeof(struct btrfs_trans_handle),
4046 if (!btrfs_trans_handle_cachep)
4048 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4049 sizeof(struct btrfs_transaction),
4051 if (!btrfs_transaction_cachep)
4053 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4054 sizeof(struct btrfs_path),
4056 if (!btrfs_path_cachep)
4058 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4059 SLAB_DESTROY_BY_RCU, NULL);
4060 if (!btrfs_bit_radix_cachep)
4064 btrfs_destroy_cachep();
4068 static int btrfs_getattr(struct vfsmount *mnt,
4069 struct dentry *dentry, struct kstat *stat)
4071 struct inode *inode = dentry->d_inode;
4072 generic_fillattr(inode, stat);
4073 stat->blksize = PAGE_CACHE_SIZE;
4074 stat->blocks = (inode_get_bytes(inode) +
4075 BTRFS_I(inode)->delalloc_bytes) >> 9;
4079 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
4080 struct inode * new_dir,struct dentry *new_dentry)
4082 struct btrfs_trans_handle *trans;
4083 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4084 struct inode *new_inode = new_dentry->d_inode;
4085 struct inode *old_inode = old_dentry->d_inode;
4086 struct timespec ctime = CURRENT_TIME;
4090 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4091 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4095 ret = btrfs_check_free_space(root, 1, 0);
4099 trans = btrfs_start_transaction(root, 1);
4101 btrfs_set_trans_block_group(trans, new_dir);
4103 btrfs_inc_nlink(old_dentry->d_inode);
4104 old_dir->i_ctime = old_dir->i_mtime = ctime;
4105 new_dir->i_ctime = new_dir->i_mtime = ctime;
4106 old_inode->i_ctime = ctime;
4108 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4109 old_dentry->d_name.name,
4110 old_dentry->d_name.len);
4115 new_inode->i_ctime = CURRENT_TIME;
4116 ret = btrfs_unlink_inode(trans, root, new_dir,
4117 new_dentry->d_inode,
4118 new_dentry->d_name.name,
4119 new_dentry->d_name.len);
4122 if (new_inode->i_nlink == 0) {
4123 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4129 ret = btrfs_set_inode_index(new_dir, old_inode, &index);
4133 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4134 old_inode, new_dentry->d_name.name,
4135 new_dentry->d_name.len, 1, index);
4140 btrfs_end_transaction_throttle(trans, root);
4146 * some fairly slow code that needs optimization. This walks the list
4147 * of all the inodes with pending delalloc and forces them to disk.
4149 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4151 struct list_head *head = &root->fs_info->delalloc_inodes;
4152 struct btrfs_inode *binode;
4153 struct inode *inode;
4154 unsigned long flags;
4156 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4157 while(!list_empty(head)) {
4158 binode = list_entry(head->next, struct btrfs_inode,
4160 inode = igrab(&binode->vfs_inode);
4162 list_del_init(&binode->delalloc_inodes);
4163 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4165 filemap_flush(inode->i_mapping);
4169 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4171 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4173 /* the filemap_flush will queue IO into the worker threads, but
4174 * we have to make sure the IO is actually started and that
4175 * ordered extents get created before we return
4177 atomic_inc(&root->fs_info->async_submit_draining);
4178 while(atomic_read(&root->fs_info->nr_async_submits)) {
4179 wait_event(root->fs_info->async_submit_wait,
4180 (atomic_read(&root->fs_info->nr_async_submits) == 0));
4182 atomic_dec(&root->fs_info->async_submit_draining);
4186 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4187 const char *symname)
4189 struct btrfs_trans_handle *trans;
4190 struct btrfs_root *root = BTRFS_I(dir)->root;
4191 struct btrfs_path *path;
4192 struct btrfs_key key;
4193 struct inode *inode = NULL;
4201 struct btrfs_file_extent_item *ei;
4202 struct extent_buffer *leaf;
4203 unsigned long nr = 0;
4205 name_len = strlen(symname) + 1;
4206 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4207 return -ENAMETOOLONG;
4209 err = btrfs_check_free_space(root, 1, 0);
4213 trans = btrfs_start_transaction(root, 1);
4214 btrfs_set_trans_block_group(trans, dir);
4216 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4222 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4224 dentry->d_parent->d_inode->i_ino, objectid,
4225 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4227 err = PTR_ERR(inode);
4231 err = btrfs_init_acl(inode, dir);
4237 btrfs_set_trans_block_group(trans, inode);
4238 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4242 inode->i_mapping->a_ops = &btrfs_aops;
4243 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4244 inode->i_fop = &btrfs_file_operations;
4245 inode->i_op = &btrfs_file_inode_operations;
4246 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4248 dir->i_sb->s_dirt = 1;
4249 btrfs_update_inode_block_group(trans, inode);
4250 btrfs_update_inode_block_group(trans, dir);
4254 path = btrfs_alloc_path();
4256 key.objectid = inode->i_ino;
4258 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4259 datasize = btrfs_file_extent_calc_inline_size(name_len);
4260 err = btrfs_insert_empty_item(trans, root, path, &key,
4266 leaf = path->nodes[0];
4267 ei = btrfs_item_ptr(leaf, path->slots[0],
4268 struct btrfs_file_extent_item);
4269 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4270 btrfs_set_file_extent_type(leaf, ei,
4271 BTRFS_FILE_EXTENT_INLINE);
4272 btrfs_set_file_extent_encryption(leaf, ei, 0);
4273 btrfs_set_file_extent_compression(leaf, ei, 0);
4274 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4275 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4277 ptr = btrfs_file_extent_inline_start(ei);
4278 write_extent_buffer(leaf, symname, ptr, name_len);
4279 btrfs_mark_buffer_dirty(leaf);
4280 btrfs_free_path(path);
4282 inode->i_op = &btrfs_symlink_inode_operations;
4283 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4284 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4285 btrfs_i_size_write(inode, name_len - 1);
4286 err = btrfs_update_inode(trans, root, inode);
4291 nr = trans->blocks_used;
4292 btrfs_end_transaction_throttle(trans, root);
4295 inode_dec_link_count(inode);
4298 btrfs_btree_balance_dirty(root, nr);
4302 static int btrfs_set_page_dirty(struct page *page)
4304 return __set_page_dirty_nobuffers(page);
4307 static int btrfs_permission(struct inode *inode, int mask)
4309 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4311 return generic_permission(inode, mask, btrfs_check_acl);
4314 static struct inode_operations btrfs_dir_inode_operations = {
4315 .lookup = btrfs_lookup,
4316 .create = btrfs_create,
4317 .unlink = btrfs_unlink,
4319 .mkdir = btrfs_mkdir,
4320 .rmdir = btrfs_rmdir,
4321 .rename = btrfs_rename,
4322 .symlink = btrfs_symlink,
4323 .setattr = btrfs_setattr,
4324 .mknod = btrfs_mknod,
4325 .setxattr = btrfs_setxattr,
4326 .getxattr = btrfs_getxattr,
4327 .listxattr = btrfs_listxattr,
4328 .removexattr = btrfs_removexattr,
4329 .permission = btrfs_permission,
4331 static struct inode_operations btrfs_dir_ro_inode_operations = {
4332 .lookup = btrfs_lookup,
4333 .permission = btrfs_permission,
4335 static struct file_operations btrfs_dir_file_operations = {
4336 .llseek = generic_file_llseek,
4337 .read = generic_read_dir,
4338 .readdir = btrfs_real_readdir,
4339 .unlocked_ioctl = btrfs_ioctl,
4340 #ifdef CONFIG_COMPAT
4341 .compat_ioctl = btrfs_ioctl,
4343 .release = btrfs_release_file,
4344 .fsync = btrfs_sync_file,
4347 static struct extent_io_ops btrfs_extent_io_ops = {
4348 .fill_delalloc = run_delalloc_range,
4349 .submit_bio_hook = btrfs_submit_bio_hook,
4350 .merge_bio_hook = btrfs_merge_bio_hook,
4351 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
4352 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
4353 .writepage_start_hook = btrfs_writepage_start_hook,
4354 .readpage_io_failed_hook = btrfs_io_failed_hook,
4355 .set_bit_hook = btrfs_set_bit_hook,
4356 .clear_bit_hook = btrfs_clear_bit_hook,
4359 static struct address_space_operations btrfs_aops = {
4360 .readpage = btrfs_readpage,
4361 .writepage = btrfs_writepage,
4362 .writepages = btrfs_writepages,
4363 .readpages = btrfs_readpages,
4364 .sync_page = block_sync_page,
4366 .direct_IO = btrfs_direct_IO,
4367 .invalidatepage = btrfs_invalidatepage,
4368 .releasepage = btrfs_releasepage,
4369 .set_page_dirty = btrfs_set_page_dirty,
4372 static struct address_space_operations btrfs_symlink_aops = {
4373 .readpage = btrfs_readpage,
4374 .writepage = btrfs_writepage,
4375 .invalidatepage = btrfs_invalidatepage,
4376 .releasepage = btrfs_releasepage,
4379 static struct inode_operations btrfs_file_inode_operations = {
4380 .truncate = btrfs_truncate,
4381 .getattr = btrfs_getattr,
4382 .setattr = btrfs_setattr,
4383 .setxattr = btrfs_setxattr,
4384 .getxattr = btrfs_getxattr,
4385 .listxattr = btrfs_listxattr,
4386 .removexattr = btrfs_removexattr,
4387 .permission = btrfs_permission,
4389 static struct inode_operations btrfs_special_inode_operations = {
4390 .getattr = btrfs_getattr,
4391 .setattr = btrfs_setattr,
4392 .permission = btrfs_permission,
4393 .setxattr = btrfs_setxattr,
4394 .getxattr = btrfs_getxattr,
4395 .listxattr = btrfs_listxattr,
4396 .removexattr = btrfs_removexattr,
4398 static struct inode_operations btrfs_symlink_inode_operations = {
4399 .readlink = generic_readlink,
4400 .follow_link = page_follow_link_light,
4401 .put_link = page_put_link,
4402 .permission = btrfs_permission,