Btrfs: Take the csum mutex while reading checksums
[safe/jmp/linux-2.6] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "locking.h"
27
28 static int total_trans = 0;
29 extern struct kmem_cache *btrfs_trans_handle_cachep;
30 extern struct kmem_cache *btrfs_transaction_cachep;
31
32 #define BTRFS_ROOT_TRANS_TAG 0
33
34 static noinline void put_transaction(struct btrfs_transaction *transaction)
35 {
36         WARN_ON(transaction->use_count == 0);
37         transaction->use_count--;
38         if (transaction->use_count == 0) {
39                 WARN_ON(total_trans == 0);
40                 total_trans--;
41                 list_del_init(&transaction->list);
42                 memset(transaction, 0, sizeof(*transaction));
43                 kmem_cache_free(btrfs_transaction_cachep, transaction);
44         }
45 }
46
47 static noinline int join_transaction(struct btrfs_root *root)
48 {
49         struct btrfs_transaction *cur_trans;
50         cur_trans = root->fs_info->running_transaction;
51         if (!cur_trans) {
52                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
53                                              GFP_NOFS);
54                 total_trans++;
55                 BUG_ON(!cur_trans);
56                 root->fs_info->generation++;
57                 root->fs_info->last_alloc = 0;
58                 root->fs_info->last_data_alloc = 0;
59                 cur_trans->num_writers = 1;
60                 cur_trans->num_joined = 0;
61                 cur_trans->transid = root->fs_info->generation;
62                 init_waitqueue_head(&cur_trans->writer_wait);
63                 init_waitqueue_head(&cur_trans->commit_wait);
64                 cur_trans->in_commit = 0;
65                 cur_trans->blocked = 0;
66                 cur_trans->use_count = 1;
67                 cur_trans->commit_done = 0;
68                 cur_trans->start_time = get_seconds();
69                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
70                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
71                 extent_io_tree_init(&cur_trans->dirty_pages,
72                                      root->fs_info->btree_inode->i_mapping,
73                                      GFP_NOFS);
74                 spin_lock(&root->fs_info->new_trans_lock);
75                 root->fs_info->running_transaction = cur_trans;
76                 spin_unlock(&root->fs_info->new_trans_lock);
77         } else {
78                 cur_trans->num_writers++;
79                 cur_trans->num_joined++;
80         }
81
82         return 0;
83 }
84
85 static noinline int record_root_in_trans(struct btrfs_root *root)
86 {
87         u64 running_trans_id = root->fs_info->running_transaction->transid;
88         if (root->ref_cows && root->last_trans < running_trans_id) {
89                 WARN_ON(root == root->fs_info->extent_root);
90                 if (root->root_item.refs != 0) {
91                         radix_tree_tag_set(&root->fs_info->fs_roots_radix,
92                                    (unsigned long)root->root_key.objectid,
93                                    BTRFS_ROOT_TRANS_TAG);
94                         root->commit_root = btrfs_root_node(root);
95                 } else {
96                         WARN_ON(1);
97                 }
98                 root->last_trans = running_trans_id;
99         }
100         return 0;
101 }
102
103 struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
104                                              int num_blocks, int join)
105 {
106         struct btrfs_trans_handle *h =
107                 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
108         struct btrfs_transaction *cur_trans;
109         int ret;
110
111         mutex_lock(&root->fs_info->trans_mutex);
112         cur_trans = root->fs_info->running_transaction;
113         if (cur_trans && cur_trans->blocked && !join) {
114                 DEFINE_WAIT(wait);
115                 cur_trans->use_count++;
116                 while(1) {
117                         prepare_to_wait(&root->fs_info->transaction_wait, &wait,
118                                         TASK_UNINTERRUPTIBLE);
119                         if (cur_trans->blocked) {
120                                 mutex_unlock(&root->fs_info->trans_mutex);
121                                 schedule();
122                                 mutex_lock(&root->fs_info->trans_mutex);
123                                 finish_wait(&root->fs_info->transaction_wait,
124                                             &wait);
125                         } else {
126                                 finish_wait(&root->fs_info->transaction_wait,
127                                             &wait);
128                                 break;
129                         }
130                 }
131                 put_transaction(cur_trans);
132         }
133         ret = join_transaction(root);
134         BUG_ON(ret);
135
136         record_root_in_trans(root);
137         h->transid = root->fs_info->running_transaction->transid;
138         h->transaction = root->fs_info->running_transaction;
139         h->blocks_reserved = num_blocks;
140         h->blocks_used = 0;
141         h->block_group = NULL;
142         h->alloc_exclude_nr = 0;
143         h->alloc_exclude_start = 0;
144         root->fs_info->running_transaction->use_count++;
145         mutex_unlock(&root->fs_info->trans_mutex);
146         return h;
147 }
148
149 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
150                                                    int num_blocks)
151 {
152         return start_transaction(root, num_blocks, 0);
153 }
154 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
155                                                    int num_blocks)
156 {
157         return start_transaction(root, num_blocks, 1);
158 }
159
160 static noinline int wait_for_commit(struct btrfs_root *root,
161                                     struct btrfs_transaction *commit)
162 {
163         DEFINE_WAIT(wait);
164         mutex_lock(&root->fs_info->trans_mutex);
165         while(!commit->commit_done) {
166                 prepare_to_wait(&commit->commit_wait, &wait,
167                                 TASK_UNINTERRUPTIBLE);
168                 if (commit->commit_done)
169                         break;
170                 mutex_unlock(&root->fs_info->trans_mutex);
171                 schedule();
172                 mutex_lock(&root->fs_info->trans_mutex);
173         }
174         mutex_unlock(&root->fs_info->trans_mutex);
175         finish_wait(&commit->commit_wait, &wait);
176         return 0;
177 }
178
179 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
180                           struct btrfs_root *root, int throttle)
181 {
182         struct btrfs_transaction *cur_trans;
183
184         mutex_lock(&root->fs_info->trans_mutex);
185         cur_trans = root->fs_info->running_transaction;
186         WARN_ON(cur_trans != trans->transaction);
187         WARN_ON(cur_trans->num_writers < 1);
188         cur_trans->num_writers--;
189
190         if (waitqueue_active(&cur_trans->writer_wait))
191                 wake_up(&cur_trans->writer_wait);
192
193         if (0 && cur_trans->in_commit && throttle) {
194                 DEFINE_WAIT(wait);
195                 mutex_unlock(&root->fs_info->trans_mutex);
196                 prepare_to_wait(&root->fs_info->transaction_throttle, &wait,
197                                 TASK_UNINTERRUPTIBLE);
198                 schedule();
199                 finish_wait(&root->fs_info->transaction_throttle, &wait);
200                 mutex_lock(&root->fs_info->trans_mutex);
201         }
202
203         put_transaction(cur_trans);
204         mutex_unlock(&root->fs_info->trans_mutex);
205         memset(trans, 0, sizeof(*trans));
206         kmem_cache_free(btrfs_trans_handle_cachep, trans);
207         return 0;
208 }
209
210 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
211                           struct btrfs_root *root)
212 {
213         return __btrfs_end_transaction(trans, root, 0);
214 }
215
216 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
217                                    struct btrfs_root *root)
218 {
219         return __btrfs_end_transaction(trans, root, 1);
220 }
221
222
223 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
224                                      struct btrfs_root *root)
225 {
226         int ret;
227         int err;
228         int werr = 0;
229         struct extent_io_tree *dirty_pages;
230         struct page *page;
231         struct inode *btree_inode = root->fs_info->btree_inode;
232         u64 start;
233         u64 end;
234         unsigned long index;
235
236         if (!trans || !trans->transaction) {
237                 return filemap_write_and_wait(btree_inode->i_mapping);
238         }
239         dirty_pages = &trans->transaction->dirty_pages;
240         while(1) {
241                 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
242                                             EXTENT_DIRTY);
243                 if (ret)
244                         break;
245                 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
246                 while(start <= end) {
247                         index = start >> PAGE_CACHE_SHIFT;
248                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
249                         page = find_lock_page(btree_inode->i_mapping, index);
250                         if (!page)
251                                 continue;
252                         if (PageWriteback(page)) {
253                                 if (PageDirty(page))
254                                         wait_on_page_writeback(page);
255                                 else {
256                                         unlock_page(page);
257                                         page_cache_release(page);
258                                         continue;
259                                 }
260                         }
261                         err = write_one_page(page, 0);
262                         if (err)
263                                 werr = err;
264                         page_cache_release(page);
265                 }
266         }
267         err = filemap_fdatawait(btree_inode->i_mapping);
268         if (err)
269                 werr = err;
270         return werr;
271 }
272
273 static int update_cowonly_root(struct btrfs_trans_handle *trans,
274                                struct btrfs_root *root)
275 {
276         int ret;
277         u64 old_root_bytenr;
278         struct btrfs_root *tree_root = root->fs_info->tree_root;
279
280         btrfs_write_dirty_block_groups(trans, root);
281         while(1) {
282                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
283                 if (old_root_bytenr == root->node->start)
284                         break;
285                 btrfs_set_root_bytenr(&root->root_item,
286                                        root->node->start);
287                 btrfs_set_root_level(&root->root_item,
288                                      btrfs_header_level(root->node));
289                 ret = btrfs_update_root(trans, tree_root,
290                                         &root->root_key,
291                                         &root->root_item);
292                 BUG_ON(ret);
293                 btrfs_write_dirty_block_groups(trans, root);
294         }
295         return 0;
296 }
297
298 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
299                             struct btrfs_root *root)
300 {
301         struct btrfs_fs_info *fs_info = root->fs_info;
302         struct list_head *next;
303
304         while(!list_empty(&fs_info->dirty_cowonly_roots)) {
305                 next = fs_info->dirty_cowonly_roots.next;
306                 list_del_init(next);
307                 root = list_entry(next, struct btrfs_root, dirty_list);
308                 update_cowonly_root(trans, root);
309         }
310         return 0;
311 }
312
313 struct dirty_root {
314         struct list_head list;
315         struct btrfs_root *root;
316         struct btrfs_root *latest_root;
317 };
318
319 int btrfs_add_dead_root(struct btrfs_root *root,
320                         struct btrfs_root *latest,
321                         struct list_head *dead_list)
322 {
323         struct dirty_root *dirty;
324
325         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
326         if (!dirty)
327                 return -ENOMEM;
328         dirty->root = root;
329         dirty->latest_root = latest;
330         list_add(&dirty->list, dead_list);
331         return 0;
332 }
333
334 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
335                                     struct radix_tree_root *radix,
336                                     struct list_head *list)
337 {
338         struct dirty_root *dirty;
339         struct btrfs_root *gang[8];
340         struct btrfs_root *root;
341         int i;
342         int ret;
343         int err = 0;
344         u32 refs;
345
346         while(1) {
347                 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
348                                                  ARRAY_SIZE(gang),
349                                                  BTRFS_ROOT_TRANS_TAG);
350                 if (ret == 0)
351                         break;
352                 for (i = 0; i < ret; i++) {
353                         root = gang[i];
354                         radix_tree_tag_clear(radix,
355                                      (unsigned long)root->root_key.objectid,
356                                      BTRFS_ROOT_TRANS_TAG);
357                         if (root->commit_root == root->node) {
358                                 WARN_ON(root->node->start !=
359                                         btrfs_root_bytenr(&root->root_item));
360                                 free_extent_buffer(root->commit_root);
361                                 root->commit_root = NULL;
362
363                                 /* make sure to update the root on disk
364                                  * so we get any updates to the block used
365                                  * counts
366                                  */
367                                 err = btrfs_update_root(trans,
368                                                 root->fs_info->tree_root,
369                                                 &root->root_key,
370                                                 &root->root_item);
371                                 continue;
372                         }
373                         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
374                         BUG_ON(!dirty);
375                         dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
376                         BUG_ON(!dirty->root);
377
378                         memset(&root->root_item.drop_progress, 0,
379                                sizeof(struct btrfs_disk_key));
380                         root->root_item.drop_level = 0;
381
382                         memcpy(dirty->root, root, sizeof(*root));
383                         dirty->root->node = root->commit_root;
384                         dirty->latest_root = root;
385                         spin_lock_init(&dirty->root->node_lock);
386                         mutex_init(&dirty->root->objectid_mutex);
387
388                         root->commit_root = NULL;
389
390                         root->root_key.offset = root->fs_info->generation;
391                         btrfs_set_root_bytenr(&root->root_item,
392                                               root->node->start);
393                         btrfs_set_root_level(&root->root_item,
394                                              btrfs_header_level(root->node));
395                         err = btrfs_insert_root(trans, root->fs_info->tree_root,
396                                                 &root->root_key,
397                                                 &root->root_item);
398                         if (err)
399                                 break;
400
401                         refs = btrfs_root_refs(&dirty->root->root_item);
402                         btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
403                         err = btrfs_update_root(trans, root->fs_info->tree_root,
404                                                 &dirty->root->root_key,
405                                                 &dirty->root->root_item);
406
407                         BUG_ON(err);
408                         if (refs == 1) {
409                                 list_add(&dirty->list, list);
410                         } else {
411                                 WARN_ON(1);
412                                 kfree(dirty->root);
413                                 kfree(dirty);
414                         }
415                 }
416         }
417         return err;
418 }
419
420 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
421 {
422         struct btrfs_fs_info *info = root->fs_info;
423         int ret;
424         struct btrfs_trans_handle *trans;
425         unsigned long nr;
426
427         smp_mb();
428         if (root->defrag_running)
429                 return 0;
430         trans = btrfs_start_transaction(root, 1);
431         while (1) {
432                 root->defrag_running = 1;
433                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
434                 nr = trans->blocks_used;
435                 btrfs_end_transaction(trans, root);
436                 btrfs_btree_balance_dirty(info->tree_root, nr);
437                 cond_resched();
438
439                 trans = btrfs_start_transaction(root, 1);
440                 if (root->fs_info->closing || ret != -EAGAIN)
441                         break;
442         }
443         root->defrag_running = 0;
444         smp_mb();
445         btrfs_end_transaction(trans, root);
446         return 0;
447 }
448
449 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
450                                      struct list_head *list)
451 {
452         struct dirty_root *dirty;
453         struct btrfs_trans_handle *trans;
454         unsigned long nr;
455         u64 num_bytes;
456         u64 bytes_used;
457         int ret = 0;
458         int err;
459
460         while(!list_empty(list)) {
461                 struct btrfs_root *root;
462
463                 dirty = list_entry(list->next, struct dirty_root, list);
464                 list_del_init(&dirty->list);
465
466                 num_bytes = btrfs_root_used(&dirty->root->root_item);
467                 root = dirty->latest_root;
468                 atomic_inc(&root->fs_info->throttles);
469
470                 mutex_lock(&root->fs_info->drop_mutex);
471                 while(1) {
472                         trans = btrfs_start_transaction(tree_root, 1);
473                         ret = btrfs_drop_snapshot(trans, dirty->root);
474                         if (ret != -EAGAIN) {
475                                 break;
476                         }
477
478                         err = btrfs_update_root(trans,
479                                         tree_root,
480                                         &dirty->root->root_key,
481                                         &dirty->root->root_item);
482                         if (err)
483                                 ret = err;
484                         nr = trans->blocks_used;
485                         ret = btrfs_end_transaction_throttle(trans, tree_root);
486                         BUG_ON(ret);
487
488                         mutex_unlock(&root->fs_info->drop_mutex);
489                         btrfs_btree_balance_dirty(tree_root, nr);
490                         cond_resched();
491                         mutex_lock(&root->fs_info->drop_mutex);
492                 }
493                 BUG_ON(ret);
494                 atomic_dec(&root->fs_info->throttles);
495
496                 mutex_lock(&root->fs_info->alloc_mutex);
497                 num_bytes -= btrfs_root_used(&dirty->root->root_item);
498                 bytes_used = btrfs_root_used(&root->root_item);
499                 if (num_bytes) {
500                         record_root_in_trans(root);
501                         btrfs_set_root_used(&root->root_item,
502                                             bytes_used - num_bytes);
503                 }
504                 mutex_unlock(&root->fs_info->alloc_mutex);
505
506                 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
507                 if (ret) {
508                         BUG();
509                         break;
510                 }
511                 mutex_unlock(&root->fs_info->drop_mutex);
512
513                 nr = trans->blocks_used;
514                 ret = btrfs_end_transaction(trans, tree_root);
515                 BUG_ON(ret);
516
517                 free_extent_buffer(dirty->root->node);
518                 kfree(dirty->root);
519                 kfree(dirty);
520
521                 btrfs_btree_balance_dirty(tree_root, nr);
522                 cond_resched();
523         }
524         return ret;
525 }
526
527 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
528                                    struct btrfs_fs_info *fs_info,
529                                    struct btrfs_pending_snapshot *pending)
530 {
531         struct btrfs_key key;
532         struct btrfs_root_item *new_root_item;
533         struct btrfs_root *tree_root = fs_info->tree_root;
534         struct btrfs_root *root = pending->root;
535         struct extent_buffer *tmp;
536         struct extent_buffer *old;
537         int ret;
538         int namelen;
539         u64 objectid;
540
541         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
542         if (!new_root_item) {
543                 ret = -ENOMEM;
544                 goto fail;
545         }
546         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
547         if (ret)
548                 goto fail;
549
550         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
551
552         key.objectid = objectid;
553         key.offset = 1;
554         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
555
556         old = btrfs_lock_root_node(root);
557         btrfs_cow_block(trans, root, old, NULL, 0, &old);
558
559         btrfs_copy_root(trans, root, old, &tmp, objectid);
560         btrfs_tree_unlock(old);
561         free_extent_buffer(old);
562
563         btrfs_set_root_bytenr(new_root_item, tmp->start);
564         btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
565         ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
566                                 new_root_item);
567         btrfs_tree_unlock(tmp);
568         free_extent_buffer(tmp);
569         if (ret)
570                 goto fail;
571
572         /*
573          * insert the directory item
574          */
575         key.offset = (u64)-1;
576         namelen = strlen(pending->name);
577         ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
578                                     pending->name, namelen,
579                                     root->fs_info->sb->s_root->d_inode->i_ino,
580                                     &key, BTRFS_FT_DIR);
581
582         if (ret)
583                 goto fail;
584
585         ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
586                              pending->name, strlen(pending->name), objectid,
587                              root->fs_info->sb->s_root->d_inode->i_ino);
588
589         /* Invalidate existing dcache entry for new snapshot. */
590         btrfs_invalidate_dcache_root(root, pending->name, namelen);
591
592 fail:
593         kfree(new_root_item);
594         return ret;
595 }
596
597 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
598                                              struct btrfs_fs_info *fs_info)
599 {
600         struct btrfs_pending_snapshot *pending;
601         struct list_head *head = &trans->transaction->pending_snapshots;
602         int ret;
603
604         while(!list_empty(head)) {
605                 pending = list_entry(head->next,
606                                      struct btrfs_pending_snapshot, list);
607                 ret = create_pending_snapshot(trans, fs_info, pending);
608                 BUG_ON(ret);
609                 list_del(&pending->list);
610                 kfree(pending->name);
611                 kfree(pending);
612         }
613         return 0;
614 }
615
616 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
617                              struct btrfs_root *root)
618 {
619         unsigned long joined = 0;
620         unsigned long timeout = 1;
621         struct btrfs_transaction *cur_trans;
622         struct btrfs_transaction *prev_trans = NULL;
623         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
624         struct list_head dirty_fs_roots;
625         struct extent_io_tree *pinned_copy;
626         DEFINE_WAIT(wait);
627         int ret;
628
629         INIT_LIST_HEAD(&dirty_fs_roots);
630
631         mutex_lock(&root->fs_info->trans_mutex);
632         if (trans->transaction->in_commit) {
633                 cur_trans = trans->transaction;
634                 trans->transaction->use_count++;
635                 mutex_unlock(&root->fs_info->trans_mutex);
636                 btrfs_end_transaction(trans, root);
637
638                 ret = wait_for_commit(root, cur_trans);
639                 BUG_ON(ret);
640
641                 mutex_lock(&root->fs_info->trans_mutex);
642                 put_transaction(cur_trans);
643                 mutex_unlock(&root->fs_info->trans_mutex);
644
645                 return 0;
646         }
647
648         pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
649         if (!pinned_copy)
650                 return -ENOMEM;
651
652         extent_io_tree_init(pinned_copy,
653                              root->fs_info->btree_inode->i_mapping, GFP_NOFS);
654
655         trans->transaction->in_commit = 1;
656         trans->transaction->blocked = 1;
657         cur_trans = trans->transaction;
658         if (cur_trans->list.prev != &root->fs_info->trans_list) {
659                 prev_trans = list_entry(cur_trans->list.prev,
660                                         struct btrfs_transaction, list);
661                 if (!prev_trans->commit_done) {
662                         prev_trans->use_count++;
663                         mutex_unlock(&root->fs_info->trans_mutex);
664
665                         wait_for_commit(root, prev_trans);
666
667                         mutex_lock(&root->fs_info->trans_mutex);
668                         put_transaction(prev_trans);
669                 }
670         }
671
672         do {
673                 joined = cur_trans->num_joined;
674                 WARN_ON(cur_trans != trans->transaction);
675                 prepare_to_wait(&cur_trans->writer_wait, &wait,
676                                 TASK_UNINTERRUPTIBLE);
677
678                 if (cur_trans->num_writers > 1)
679                         timeout = MAX_SCHEDULE_TIMEOUT;
680                 else
681                         timeout = 1;
682
683                 mutex_unlock(&root->fs_info->trans_mutex);
684
685                 schedule_timeout(timeout);
686
687                 mutex_lock(&root->fs_info->trans_mutex);
688                 finish_wait(&cur_trans->writer_wait, &wait);
689         } while (cur_trans->num_writers > 1 ||
690                  (cur_trans->num_joined != joined));
691
692         ret = create_pending_snapshots(trans, root->fs_info);
693         BUG_ON(ret);
694
695         WARN_ON(cur_trans != trans->transaction);
696
697         ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
698                               &dirty_fs_roots);
699         BUG_ON(ret);
700
701         ret = btrfs_commit_tree_roots(trans, root);
702         BUG_ON(ret);
703
704         cur_trans = root->fs_info->running_transaction;
705         spin_lock(&root->fs_info->new_trans_lock);
706         root->fs_info->running_transaction = NULL;
707         spin_unlock(&root->fs_info->new_trans_lock);
708         btrfs_set_super_generation(&root->fs_info->super_copy,
709                                    cur_trans->transid);
710         btrfs_set_super_root(&root->fs_info->super_copy,
711                              root->fs_info->tree_root->node->start);
712         btrfs_set_super_root_level(&root->fs_info->super_copy,
713                            btrfs_header_level(root->fs_info->tree_root->node));
714
715         btrfs_set_super_chunk_root(&root->fs_info->super_copy,
716                                    chunk_root->node->start);
717         btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
718                                          btrfs_header_level(chunk_root->node));
719         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
720                sizeof(root->fs_info->super_copy));
721
722         btrfs_copy_pinned(root, pinned_copy);
723
724         trans->transaction->blocked = 0;
725         wake_up(&root->fs_info->transaction_throttle);
726         wake_up(&root->fs_info->transaction_wait);
727
728         mutex_unlock(&root->fs_info->trans_mutex);
729         ret = btrfs_write_and_wait_transaction(trans, root);
730         BUG_ON(ret);
731         write_ctree_super(trans, root);
732
733         btrfs_finish_extent_commit(trans, root, pinned_copy);
734         mutex_lock(&root->fs_info->trans_mutex);
735
736         kfree(pinned_copy);
737
738         cur_trans->commit_done = 1;
739         root->fs_info->last_trans_committed = cur_trans->transid;
740         wake_up(&cur_trans->commit_wait);
741         put_transaction(cur_trans);
742         put_transaction(cur_trans);
743
744         if (root->fs_info->closing)
745                 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
746         else
747                 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
748
749         mutex_unlock(&root->fs_info->trans_mutex);
750         kmem_cache_free(btrfs_trans_handle_cachep, trans);
751
752         if (root->fs_info->closing) {
753                 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
754         }
755         return ret;
756 }
757
758 int btrfs_clean_old_snapshots(struct btrfs_root *root)
759 {
760         struct list_head dirty_roots;
761         INIT_LIST_HEAD(&dirty_roots);
762 again:
763         mutex_lock(&root->fs_info->trans_mutex);
764         list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
765         mutex_unlock(&root->fs_info->trans_mutex);
766
767         if (!list_empty(&dirty_roots)) {
768                 drop_dirty_roots(root, &dirty_roots);
769                 goto again;
770         }
771         return 0;
772 }
773