Btrfs: Drop some verbose printks
[safe/jmp/linux-2.6] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "locking.h"
27
28 static int total_trans = 0;
29 extern struct kmem_cache *btrfs_trans_handle_cachep;
30 extern struct kmem_cache *btrfs_transaction_cachep;
31
32 #define BTRFS_ROOT_TRANS_TAG 0
33
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
35 {
36         WARN_ON(transaction->use_count == 0);
37         transaction->use_count--;
38         if (transaction->use_count == 0) {
39                 WARN_ON(total_trans == 0);
40                 total_trans--;
41                 list_del_init(&transaction->list);
42                 memset(transaction, 0, sizeof(*transaction));
43                 kmem_cache_free(btrfs_transaction_cachep, transaction);
44         }
45 }
46
47 static noinline int join_transaction(struct btrfs_root *root)
48 {
49         struct btrfs_transaction *cur_trans;
50         cur_trans = root->fs_info->running_transaction;
51         if (!cur_trans) {
52                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
53                                              GFP_NOFS);
54                 total_trans++;
55                 BUG_ON(!cur_trans);
56                 root->fs_info->generation++;
57                 root->fs_info->last_alloc = 0;
58                 root->fs_info->last_data_alloc = 0;
59                 cur_trans->num_writers = 1;
60                 cur_trans->num_joined = 0;
61                 cur_trans->transid = root->fs_info->generation;
62                 init_waitqueue_head(&cur_trans->writer_wait);
63                 init_waitqueue_head(&cur_trans->commit_wait);
64                 cur_trans->in_commit = 0;
65                 cur_trans->use_count = 1;
66                 cur_trans->commit_done = 0;
67                 cur_trans->start_time = get_seconds();
68                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
69                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
70                 btrfs_ordered_inode_tree_init(&cur_trans->ordered_inode_tree);
71                 extent_io_tree_init(&cur_trans->dirty_pages,
72                                      root->fs_info->btree_inode->i_mapping,
73                                      GFP_NOFS);
74                 spin_lock(&root->fs_info->new_trans_lock);
75                 root->fs_info->running_transaction = cur_trans;
76                 spin_unlock(&root->fs_info->new_trans_lock);
77         } else {
78                 cur_trans->num_writers++;
79                 cur_trans->num_joined++;
80         }
81
82         return 0;
83 }
84
85 static noinline int record_root_in_trans(struct btrfs_root *root)
86 {
87         u64 running_trans_id = root->fs_info->running_transaction->transid;
88         if (root->ref_cows && root->last_trans < running_trans_id) {
89                 WARN_ON(root == root->fs_info->extent_root);
90                 if (root->root_item.refs != 0) {
91                         radix_tree_tag_set(&root->fs_info->fs_roots_radix,
92                                    (unsigned long)root->root_key.objectid,
93                                    BTRFS_ROOT_TRANS_TAG);
94                         root->commit_root = btrfs_root_node(root);
95                 } else {
96                         WARN_ON(1);
97                 }
98                 root->last_trans = running_trans_id;
99         }
100         return 0;
101 }
102
103 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
104                                                    int num_blocks)
105 {
106         struct btrfs_trans_handle *h =
107                 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
108         int ret;
109
110         mutex_lock(&root->fs_info->trans_mutex);
111         ret = join_transaction(root);
112         BUG_ON(ret);
113
114         record_root_in_trans(root);
115         h->transid = root->fs_info->running_transaction->transid;
116         h->transaction = root->fs_info->running_transaction;
117         h->blocks_reserved = num_blocks;
118         h->blocks_used = 0;
119         h->block_group = NULL;
120         h->alloc_exclude_nr = 0;
121         h->alloc_exclude_start = 0;
122         root->fs_info->running_transaction->use_count++;
123         mutex_unlock(&root->fs_info->trans_mutex);
124         return h;
125 }
126
127 static noinline int wait_for_commit(struct btrfs_root *root,
128                                     struct btrfs_transaction *commit)
129 {
130         DEFINE_WAIT(wait);
131         mutex_lock(&root->fs_info->trans_mutex);
132         while(!commit->commit_done) {
133                 prepare_to_wait(&commit->commit_wait, &wait,
134                                 TASK_UNINTERRUPTIBLE);
135                 if (commit->commit_done)
136                         break;
137                 mutex_unlock(&root->fs_info->trans_mutex);
138                 schedule();
139                 mutex_lock(&root->fs_info->trans_mutex);
140         }
141         mutex_unlock(&root->fs_info->trans_mutex);
142         finish_wait(&commit->commit_wait, &wait);
143         return 0;
144 }
145
146 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
147                           struct btrfs_root *root, int throttle)
148 {
149         struct btrfs_transaction *cur_trans;
150
151         mutex_lock(&root->fs_info->trans_mutex);
152         cur_trans = root->fs_info->running_transaction;
153         WARN_ON(cur_trans != trans->transaction);
154         WARN_ON(cur_trans->num_writers < 1);
155         cur_trans->num_writers--;
156
157         if (waitqueue_active(&cur_trans->writer_wait))
158                 wake_up(&cur_trans->writer_wait);
159
160         if (cur_trans->in_commit && throttle) {
161                 int ret;
162                 mutex_unlock(&root->fs_info->trans_mutex);
163                 ret = wait_for_commit(root, cur_trans);
164                 BUG_ON(ret);
165                 mutex_lock(&root->fs_info->trans_mutex);
166         }
167
168         put_transaction(cur_trans);
169         mutex_unlock(&root->fs_info->trans_mutex);
170         memset(trans, 0, sizeof(*trans));
171         kmem_cache_free(btrfs_trans_handle_cachep, trans);
172         return 0;
173 }
174
175 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
176                           struct btrfs_root *root)
177 {
178         return __btrfs_end_transaction(trans, root, 0);
179 }
180
181 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
182                                    struct btrfs_root *root)
183 {
184         return __btrfs_end_transaction(trans, root, 1);
185 }
186
187
188 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
189                                      struct btrfs_root *root)
190 {
191         int ret;
192         int err;
193         int werr = 0;
194         struct extent_io_tree *dirty_pages;
195         struct page *page;
196         struct inode *btree_inode = root->fs_info->btree_inode;
197         u64 start;
198         u64 end;
199         unsigned long index;
200
201         if (!trans || !trans->transaction) {
202                 return filemap_write_and_wait(btree_inode->i_mapping);
203         }
204         dirty_pages = &trans->transaction->dirty_pages;
205         while(1) {
206                 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
207                                             EXTENT_DIRTY);
208                 if (ret)
209                         break;
210                 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
211                 while(start <= end) {
212                         index = start >> PAGE_CACHE_SHIFT;
213                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
214                         page = find_lock_page(btree_inode->i_mapping, index);
215                         if (!page)
216                                 continue;
217                         if (PageWriteback(page)) {
218                                 if (PageDirty(page))
219                                         wait_on_page_writeback(page);
220                                 else {
221                                         unlock_page(page);
222                                         page_cache_release(page);
223                                         continue;
224                                 }
225                         }
226                         err = write_one_page(page, 0);
227                         if (err)
228                                 werr = err;
229                         page_cache_release(page);
230                 }
231         }
232         err = filemap_fdatawait(btree_inode->i_mapping);
233         if (err)
234                 werr = err;
235         return werr;
236 }
237
238 static int update_cowonly_root(struct btrfs_trans_handle *trans,
239                                struct btrfs_root *root)
240 {
241         int ret;
242         u64 old_root_bytenr;
243         struct btrfs_root *tree_root = root->fs_info->tree_root;
244
245         btrfs_write_dirty_block_groups(trans, root);
246         while(1) {
247                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
248                 if (old_root_bytenr == root->node->start)
249                         break;
250                 btrfs_set_root_bytenr(&root->root_item,
251                                        root->node->start);
252                 btrfs_set_root_level(&root->root_item,
253                                      btrfs_header_level(root->node));
254                 ret = btrfs_update_root(trans, tree_root,
255                                         &root->root_key,
256                                         &root->root_item);
257                 BUG_ON(ret);
258                 btrfs_write_dirty_block_groups(trans, root);
259         }
260         return 0;
261 }
262
263 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
264                             struct btrfs_root *root)
265 {
266         struct btrfs_fs_info *fs_info = root->fs_info;
267         struct list_head *next;
268
269         while(!list_empty(&fs_info->dirty_cowonly_roots)) {
270                 next = fs_info->dirty_cowonly_roots.next;
271                 list_del_init(next);
272                 root = list_entry(next, struct btrfs_root, dirty_list);
273                 update_cowonly_root(trans, root);
274         }
275         return 0;
276 }
277
278 struct dirty_root {
279         struct list_head list;
280         struct btrfs_root *root;
281         struct btrfs_root *latest_root;
282 };
283
284 int btrfs_add_dead_root(struct btrfs_root *root,
285                         struct btrfs_root *latest,
286                         struct list_head *dead_list)
287 {
288         struct dirty_root *dirty;
289
290         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
291         if (!dirty)
292                 return -ENOMEM;
293         dirty->root = root;
294         dirty->latest_root = latest;
295         list_add(&dirty->list, dead_list);
296         return 0;
297 }
298
299 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
300                                     struct radix_tree_root *radix,
301                                     struct list_head *list)
302 {
303         struct dirty_root *dirty;
304         struct btrfs_root *gang[8];
305         struct btrfs_root *root;
306         int i;
307         int ret;
308         int err = 0;
309         u32 refs;
310
311         while(1) {
312                 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
313                                                  ARRAY_SIZE(gang),
314                                                  BTRFS_ROOT_TRANS_TAG);
315                 if (ret == 0)
316                         break;
317                 for (i = 0; i < ret; i++) {
318                         root = gang[i];
319                         radix_tree_tag_clear(radix,
320                                      (unsigned long)root->root_key.objectid,
321                                      BTRFS_ROOT_TRANS_TAG);
322                         if (root->commit_root == root->node) {
323                                 WARN_ON(root->node->start !=
324                                         btrfs_root_bytenr(&root->root_item));
325                                 free_extent_buffer(root->commit_root);
326                                 root->commit_root = NULL;
327
328                                 /* make sure to update the root on disk
329                                  * so we get any updates to the block used
330                                  * counts
331                                  */
332                                 err = btrfs_update_root(trans,
333                                                 root->fs_info->tree_root,
334                                                 &root->root_key,
335                                                 &root->root_item);
336                                 continue;
337                         }
338                         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
339                         BUG_ON(!dirty);
340                         dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
341                         BUG_ON(!dirty->root);
342
343                         memset(&root->root_item.drop_progress, 0,
344                                sizeof(struct btrfs_disk_key));
345                         root->root_item.drop_level = 0;
346
347                         memcpy(dirty->root, root, sizeof(*root));
348                         dirty->root->node = root->commit_root;
349                         dirty->latest_root = root;
350                         root->commit_root = NULL;
351
352                         root->root_key.offset = root->fs_info->generation;
353                         btrfs_set_root_bytenr(&root->root_item,
354                                               root->node->start);
355                         btrfs_set_root_level(&root->root_item,
356                                              btrfs_header_level(root->node));
357                         err = btrfs_insert_root(trans, root->fs_info->tree_root,
358                                                 &root->root_key,
359                                                 &root->root_item);
360                         if (err)
361                                 break;
362
363                         refs = btrfs_root_refs(&dirty->root->root_item);
364                         btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
365                         err = btrfs_update_root(trans, root->fs_info->tree_root,
366                                                 &dirty->root->root_key,
367                                                 &dirty->root->root_item);
368
369                         BUG_ON(err);
370                         if (refs == 1) {
371                                 list_add(&dirty->list, list);
372                         } else {
373                                 WARN_ON(1);
374                                 kfree(dirty->root);
375                                 kfree(dirty);
376                         }
377                 }
378         }
379         return err;
380 }
381
382 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
383 {
384         struct btrfs_fs_info *info = root->fs_info;
385         int ret;
386         struct btrfs_trans_handle *trans;
387         unsigned long nr;
388
389         smp_mb();
390         if (root->defrag_running)
391                 return 0;
392         trans = btrfs_start_transaction(root, 1);
393         while (1) {
394                 root->defrag_running = 1;
395                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
396                 nr = trans->blocks_used;
397                 btrfs_end_transaction(trans, root);
398                 btrfs_btree_balance_dirty(info->tree_root, nr);
399                 cond_resched();
400
401                 trans = btrfs_start_transaction(root, 1);
402                 if (root->fs_info->closing || ret != -EAGAIN)
403                         break;
404         }
405         root->defrag_running = 0;
406         smp_mb();
407         btrfs_end_transaction(trans, root);
408         return 0;
409 }
410
411 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
412                                      struct list_head *list)
413 {
414         struct dirty_root *dirty;
415         struct btrfs_trans_handle *trans;
416         unsigned long nr;
417         u64 num_bytes;
418         u64 bytes_used;
419         int ret = 0;
420         int err;
421
422         while(!list_empty(list)) {
423                 struct btrfs_root *root;
424
425                 dirty = list_entry(list->next, struct dirty_root, list);
426                 list_del_init(&dirty->list);
427
428                 num_bytes = btrfs_root_used(&dirty->root->root_item);
429                 root = dirty->latest_root;
430                 atomic_inc(&root->fs_info->throttles);
431
432                 mutex_lock(&root->fs_info->drop_mutex);
433                 while(1) {
434                         trans = btrfs_start_transaction(tree_root, 1);
435                         ret = btrfs_drop_snapshot(trans, dirty->root);
436                         if (ret != -EAGAIN) {
437                                 break;
438                         }
439
440                         err = btrfs_update_root(trans,
441                                         tree_root,
442                                         &dirty->root->root_key,
443                                         &dirty->root->root_item);
444                         if (err)
445                                 ret = err;
446                         nr = trans->blocks_used;
447                         ret = btrfs_end_transaction_throttle(trans, tree_root);
448                         BUG_ON(ret);
449
450                         mutex_unlock(&root->fs_info->drop_mutex);
451                         btrfs_btree_balance_dirty(tree_root, nr);
452                         cond_resched();
453                         mutex_lock(&root->fs_info->drop_mutex);
454                 }
455                 BUG_ON(ret);
456                 atomic_dec(&root->fs_info->throttles);
457
458                 mutex_lock(&root->fs_info->alloc_mutex);
459                 num_bytes -= btrfs_root_used(&dirty->root->root_item);
460                 bytes_used = btrfs_root_used(&root->root_item);
461                 if (num_bytes) {
462                         record_root_in_trans(root);
463                         btrfs_set_root_used(&root->root_item,
464                                             bytes_used - num_bytes);
465                 }
466                 mutex_unlock(&root->fs_info->alloc_mutex);
467
468                 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
469                 if (ret) {
470                         BUG();
471                         break;
472                 }
473                 mutex_unlock(&root->fs_info->drop_mutex);
474
475                 nr = trans->blocks_used;
476                 ret = btrfs_end_transaction(trans, tree_root);
477                 BUG_ON(ret);
478
479                 free_extent_buffer(dirty->root->node);
480                 kfree(dirty->root);
481                 kfree(dirty);
482
483                 btrfs_btree_balance_dirty(tree_root, nr);
484                 cond_resched();
485         }
486         return ret;
487 }
488
489 int btrfs_write_ordered_inodes(struct btrfs_trans_handle *trans,
490                                 struct btrfs_root *root)
491 {
492         struct btrfs_transaction *cur_trans = trans->transaction;
493         struct inode *inode;
494         u64 root_objectid = 0;
495         u64 objectid = 0;
496         int ret;
497
498         atomic_inc(&root->fs_info->throttles);
499         while(1) {
500                 ret = btrfs_find_first_ordered_inode(
501                                 &cur_trans->ordered_inode_tree,
502                                 &root_objectid, &objectid, &inode);
503                 if (!ret)
504                         break;
505
506                 mutex_unlock(&root->fs_info->trans_mutex);
507
508                 if (S_ISREG(inode->i_mode)) {
509                         atomic_inc(&BTRFS_I(inode)->ordered_writeback);
510                         filemap_fdatawrite(inode->i_mapping);
511                         atomic_dec(&BTRFS_I(inode)->ordered_writeback);
512                 }
513                 iput(inode);
514
515                 mutex_lock(&root->fs_info->trans_mutex);
516         }
517         while(1) {
518                 root_objectid = 0;
519                 objectid = 0;
520                 ret = btrfs_find_del_first_ordered_inode(
521                                 &cur_trans->ordered_inode_tree,
522                                 &root_objectid, &objectid, &inode);
523                 if (!ret)
524                         break;
525                 mutex_unlock(&root->fs_info->trans_mutex);
526
527                 if (S_ISREG(inode->i_mode)) {
528                         atomic_inc(&BTRFS_I(inode)->ordered_writeback);
529                         filemap_write_and_wait(inode->i_mapping);
530                         atomic_dec(&BTRFS_I(inode)->ordered_writeback);
531                 }
532                 atomic_dec(&inode->i_count);
533                 iput(inode);
534
535                 mutex_lock(&root->fs_info->trans_mutex);
536         }
537         atomic_dec(&root->fs_info->throttles);
538         return 0;
539 }
540
541 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
542                                    struct btrfs_fs_info *fs_info,
543                                    struct btrfs_pending_snapshot *pending)
544 {
545         struct btrfs_key key;
546         struct btrfs_root_item *new_root_item;
547         struct btrfs_root *tree_root = fs_info->tree_root;
548         struct btrfs_root *root = pending->root;
549         struct extent_buffer *tmp;
550         struct extent_buffer *old;
551         int ret;
552         int namelen;
553         u64 objectid;
554
555         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
556         if (!new_root_item) {
557                 ret = -ENOMEM;
558                 goto fail;
559         }
560         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
561         if (ret)
562                 goto fail;
563
564         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
565
566         key.objectid = objectid;
567         key.offset = 1;
568         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
569
570         old = btrfs_lock_root_node(root);
571         btrfs_cow_block(trans, root, old, NULL, 0, &old);
572
573         btrfs_copy_root(trans, root, old, &tmp, objectid);
574         btrfs_tree_unlock(old);
575         free_extent_buffer(old);
576
577         btrfs_set_root_bytenr(new_root_item, tmp->start);
578         btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
579         ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
580                                 new_root_item);
581         btrfs_tree_unlock(tmp);
582         free_extent_buffer(tmp);
583         if (ret)
584                 goto fail;
585
586         /*
587          * insert the directory item
588          */
589         key.offset = (u64)-1;
590         namelen = strlen(pending->name);
591         ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
592                                     pending->name, namelen,
593                                     root->fs_info->sb->s_root->d_inode->i_ino,
594                                     &key, BTRFS_FT_DIR);
595
596         if (ret)
597                 goto fail;
598
599         ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
600                              pending->name, strlen(pending->name), objectid,
601                              root->fs_info->sb->s_root->d_inode->i_ino);
602
603         /* Invalidate existing dcache entry for new snapshot. */
604         btrfs_invalidate_dcache_root(root, pending->name, namelen);
605
606 fail:
607         kfree(new_root_item);
608         return ret;
609 }
610
611 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
612                                              struct btrfs_fs_info *fs_info)
613 {
614         struct btrfs_pending_snapshot *pending;
615         struct list_head *head = &trans->transaction->pending_snapshots;
616         int ret;
617
618         while(!list_empty(head)) {
619                 pending = list_entry(head->next,
620                                      struct btrfs_pending_snapshot, list);
621                 ret = create_pending_snapshot(trans, fs_info, pending);
622                 BUG_ON(ret);
623                 list_del(&pending->list);
624                 kfree(pending->name);
625                 kfree(pending);
626         }
627         return 0;
628 }
629
630 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
631                              struct btrfs_root *root)
632 {
633         unsigned long joined = 0;
634         unsigned long timeout = 1;
635         struct btrfs_transaction *cur_trans;
636         struct btrfs_transaction *prev_trans = NULL;
637         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
638         struct list_head dirty_fs_roots;
639         struct extent_io_tree *pinned_copy;
640         DEFINE_WAIT(wait);
641         int ret;
642
643         INIT_LIST_HEAD(&dirty_fs_roots);
644
645         mutex_lock(&root->fs_info->trans_mutex);
646         if (trans->transaction->in_commit) {
647                 cur_trans = trans->transaction;
648                 trans->transaction->use_count++;
649                 mutex_unlock(&root->fs_info->trans_mutex);
650                 btrfs_end_transaction(trans, root);
651
652                 ret = wait_for_commit(root, cur_trans);
653                 BUG_ON(ret);
654
655                 mutex_lock(&root->fs_info->trans_mutex);
656                 put_transaction(cur_trans);
657                 mutex_unlock(&root->fs_info->trans_mutex);
658
659                 return 0;
660         }
661
662         pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
663         if (!pinned_copy)
664                 return -ENOMEM;
665
666         extent_io_tree_init(pinned_copy,
667                              root->fs_info->btree_inode->i_mapping, GFP_NOFS);
668
669         trans->transaction->in_commit = 1;
670         cur_trans = trans->transaction;
671         if (cur_trans->list.prev != &root->fs_info->trans_list) {
672                 prev_trans = list_entry(cur_trans->list.prev,
673                                         struct btrfs_transaction, list);
674                 if (!prev_trans->commit_done) {
675                         prev_trans->use_count++;
676                         mutex_unlock(&root->fs_info->trans_mutex);
677
678                         wait_for_commit(root, prev_trans);
679
680                         mutex_lock(&root->fs_info->trans_mutex);
681                         put_transaction(prev_trans);
682                 }
683         }
684
685         do {
686                 joined = cur_trans->num_joined;
687                 WARN_ON(cur_trans != trans->transaction);
688                 prepare_to_wait(&cur_trans->writer_wait, &wait,
689                                 TASK_UNINTERRUPTIBLE);
690
691                 if (cur_trans->num_writers > 1)
692                         timeout = MAX_SCHEDULE_TIMEOUT;
693                 else
694                         timeout = 1;
695
696                 mutex_unlock(&root->fs_info->trans_mutex);
697
698                 schedule_timeout(timeout);
699
700                 mutex_lock(&root->fs_info->trans_mutex);
701                 finish_wait(&cur_trans->writer_wait, &wait);
702                 ret = btrfs_write_ordered_inodes(trans, root);
703
704         } while (cur_trans->num_writers > 1 ||
705                  (cur_trans->num_joined != joined));
706
707         ret = create_pending_snapshots(trans, root->fs_info);
708         BUG_ON(ret);
709
710         WARN_ON(cur_trans != trans->transaction);
711
712         ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
713                               &dirty_fs_roots);
714         BUG_ON(ret);
715
716         ret = btrfs_commit_tree_roots(trans, root);
717         BUG_ON(ret);
718
719         cur_trans = root->fs_info->running_transaction;
720         spin_lock(&root->fs_info->new_trans_lock);
721         root->fs_info->running_transaction = NULL;
722         spin_unlock(&root->fs_info->new_trans_lock);
723         btrfs_set_super_generation(&root->fs_info->super_copy,
724                                    cur_trans->transid);
725         btrfs_set_super_root(&root->fs_info->super_copy,
726                              root->fs_info->tree_root->node->start);
727         btrfs_set_super_root_level(&root->fs_info->super_copy,
728                            btrfs_header_level(root->fs_info->tree_root->node));
729
730         btrfs_set_super_chunk_root(&root->fs_info->super_copy,
731                                    chunk_root->node->start);
732         btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
733                                          btrfs_header_level(chunk_root->node));
734         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
735                sizeof(root->fs_info->super_copy));
736
737         btrfs_copy_pinned(root, pinned_copy);
738
739         mutex_unlock(&root->fs_info->trans_mutex);
740         ret = btrfs_write_and_wait_transaction(trans, root);
741         BUG_ON(ret);
742         write_ctree_super(trans, root);
743
744         btrfs_finish_extent_commit(trans, root, pinned_copy);
745         mutex_lock(&root->fs_info->trans_mutex);
746
747         kfree(pinned_copy);
748
749         cur_trans->commit_done = 1;
750         root->fs_info->last_trans_committed = cur_trans->transid;
751         wake_up(&cur_trans->commit_wait);
752         put_transaction(cur_trans);
753         put_transaction(cur_trans);
754
755         if (root->fs_info->closing)
756                 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
757         else
758                 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
759
760         mutex_unlock(&root->fs_info->trans_mutex);
761         kmem_cache_free(btrfs_trans_handle_cachep, trans);
762
763         if (root->fs_info->closing) {
764                 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
765         }
766         return ret;
767 }
768
769 int btrfs_clean_old_snapshots(struct btrfs_root *root)
770 {
771         struct list_head dirty_roots;
772         INIT_LIST_HEAD(&dirty_roots);
773 again:
774         mutex_lock(&root->fs_info->trans_mutex);
775         list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
776         mutex_unlock(&root->fs_info->trans_mutex);
777
778         if (!list_empty(&dirty_roots)) {
779                 drop_dirty_roots(root, &dirty_roots);
780                 goto again;
781         }
782         return 0;
783 }
784