2 ** Write ahead logging implementation copyright Chris Mason 2000
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
37 #include <linux/time.h>
38 #include <linux/semaphore.h>
39 #include <linux/vmalloc.h>
40 #include <linux/reiserfs_fs.h>
41 #include <linux/kernel.h>
42 #include <linux/errno.h>
43 #include <linux/fcntl.h>
44 #include <linux/stat.h>
45 #include <linux/string.h>
46 #include <linux/smp_lock.h>
47 #include <linux/buffer_head.h>
48 #include <linux/workqueue.h>
49 #include <linux/writeback.h>
50 #include <linux/blkdev.h>
51 #include <linux/backing-dev.h>
52 #include <linux/uaccess.h>
54 #include <asm/system.h>
56 /* gets a struct reiserfs_journal_list * from a list head */
57 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
59 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62 /* the number of mounted filesystems. This is used to decide when to
63 ** start and kill the commit workqueue
65 static int reiserfs_mounted_fs_count;
67 static struct workqueue_struct *commit_wq;
69 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
71 #define BUFNR 64 /*read ahead */
73 /* cnode stat bits. Move these into reiserfs_fs.h */
75 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
76 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
78 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
79 #define BLOCK_DIRTIED 5
81 /* journal list state bits */
82 #define LIST_TOUCHED 1
84 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
86 /* flags for do_journal_end */
87 #define FLUSH_ALL 1 /* flush commit and real blocks */
88 #define COMMIT_NOW 2 /* end and commit this transaction */
89 #define WAIT 4 /* wait for the log blocks to hit the disk */
91 static int do_journal_end(struct reiserfs_transaction_handle *,
92 struct super_block *, unsigned long nblocks,
94 static int flush_journal_list(struct super_block *s,
95 struct reiserfs_journal_list *jl, int flushall);
96 static int flush_commit_list(struct super_block *s,
97 struct reiserfs_journal_list *jl, int flushall);
98 static int can_dirty(struct reiserfs_journal_cnode *cn);
99 static int journal_join(struct reiserfs_transaction_handle *th,
100 struct super_block *sb, unsigned long nblocks);
101 static int release_journal_dev(struct super_block *super,
102 struct reiserfs_journal *journal);
103 static int dirty_one_transaction(struct super_block *s,
104 struct reiserfs_journal_list *jl);
105 static void flush_async_commits(struct work_struct *work);
106 static void queue_log_writer(struct super_block *s);
108 /* values for join in do_journal_begin_r */
110 JBEGIN_REG = 0, /* regular journal begin */
111 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
112 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
115 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
116 struct super_block *sb,
117 unsigned long nblocks, int join);
119 static void init_journal_hash(struct super_block *sb)
121 struct reiserfs_journal *journal = SB_JOURNAL(sb);
122 memset(journal->j_hash_table, 0,
123 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
127 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
128 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
131 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
134 clear_buffer_dirty(bh);
135 clear_buffer_journal_test(bh);
140 static void disable_barrier(struct super_block *s)
142 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
143 printk("reiserfs: disabling flush barriers on %s\n",
144 reiserfs_bdevname(s));
147 static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
150 struct reiserfs_bitmap_node *bn;
153 bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
157 bn->data = kzalloc(sb->s_blocksize, GFP_NOFS);
163 INIT_LIST_HEAD(&bn->list);
167 static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb)
169 struct reiserfs_journal *journal = SB_JOURNAL(sb);
170 struct reiserfs_bitmap_node *bn = NULL;
171 struct list_head *entry = journal->j_bitmap_nodes.next;
173 journal->j_used_bitmap_nodes++;
176 if (entry != &journal->j_bitmap_nodes) {
177 bn = list_entry(entry, struct reiserfs_bitmap_node, list);
179 memset(bn->data, 0, sb->s_blocksize);
180 journal->j_free_bitmap_nodes--;
183 bn = allocate_bitmap_node(sb);
190 static inline void free_bitmap_node(struct super_block *sb,
191 struct reiserfs_bitmap_node *bn)
193 struct reiserfs_journal *journal = SB_JOURNAL(sb);
194 journal->j_used_bitmap_nodes--;
195 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
199 list_add(&bn->list, &journal->j_bitmap_nodes);
200 journal->j_free_bitmap_nodes++;
204 static void allocate_bitmap_nodes(struct super_block *sb)
207 struct reiserfs_journal *journal = SB_JOURNAL(sb);
208 struct reiserfs_bitmap_node *bn = NULL;
209 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
210 bn = allocate_bitmap_node(sb);
212 list_add(&bn->list, &journal->j_bitmap_nodes);
213 journal->j_free_bitmap_nodes++;
215 break; /* this is ok, we'll try again when more are needed */
220 static int set_bit_in_list_bitmap(struct super_block *sb,
222 struct reiserfs_list_bitmap *jb)
224 unsigned int bmap_nr = block / (sb->s_blocksize << 3);
225 unsigned int bit_nr = block % (sb->s_blocksize << 3);
227 if (!jb->bitmaps[bmap_nr]) {
228 jb->bitmaps[bmap_nr] = get_bitmap_node(sb);
230 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
234 static void cleanup_bitmap_list(struct super_block *sb,
235 struct reiserfs_list_bitmap *jb)
238 if (jb->bitmaps == NULL)
241 for (i = 0; i < reiserfs_bmap_count(sb); i++) {
242 if (jb->bitmaps[i]) {
243 free_bitmap_node(sb, jb->bitmaps[i]);
244 jb->bitmaps[i] = NULL;
250 ** only call this on FS unmount.
252 static int free_list_bitmaps(struct super_block *sb,
253 struct reiserfs_list_bitmap *jb_array)
256 struct reiserfs_list_bitmap *jb;
257 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
259 jb->journal_list = NULL;
260 cleanup_bitmap_list(sb, jb);
267 static int free_bitmap_nodes(struct super_block *sb)
269 struct reiserfs_journal *journal = SB_JOURNAL(sb);
270 struct list_head *next = journal->j_bitmap_nodes.next;
271 struct reiserfs_bitmap_node *bn;
273 while (next != &journal->j_bitmap_nodes) {
274 bn = list_entry(next, struct reiserfs_bitmap_node, list);
278 next = journal->j_bitmap_nodes.next;
279 journal->j_free_bitmap_nodes--;
286 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
287 ** jb_array is the array to be filled in.
289 int reiserfs_allocate_list_bitmaps(struct super_block *sb,
290 struct reiserfs_list_bitmap *jb_array,
291 unsigned int bmap_nr)
295 struct reiserfs_list_bitmap *jb;
296 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
298 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
300 jb->journal_list = NULL;
301 jb->bitmaps = vmalloc(mem);
303 reiserfs_warning(sb, "clm-2000", "unable to "
304 "allocate bitmaps for journal lists");
308 memset(jb->bitmaps, 0, mem);
311 free_list_bitmaps(sb, jb_array);
318 ** find an available list bitmap. If you can't find one, flush a commit list
321 static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
322 struct reiserfs_journal_list
326 struct reiserfs_journal *journal = SB_JOURNAL(sb);
327 struct reiserfs_list_bitmap *jb = NULL;
329 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
330 i = journal->j_list_bitmap_index;
331 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
332 jb = journal->j_list_bitmap + i;
333 if (journal->j_list_bitmap[i].journal_list) {
334 flush_commit_list(sb,
335 journal->j_list_bitmap[i].
337 if (!journal->j_list_bitmap[i].journal_list) {
344 if (jb->journal_list) { /* double check to make sure if flushed correctly */
347 jb->journal_list = jl;
352 ** allocates a new chunk of X nodes, and links them all together as a list.
353 ** Uses the cnode->next and cnode->prev pointers
354 ** returns NULL on failure
356 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
358 struct reiserfs_journal_cnode *head;
360 if (num_cnodes <= 0) {
363 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
367 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
369 head[0].next = head + 1;
370 for (i = 1; i < num_cnodes; i++) {
371 head[i].prev = head + (i - 1);
372 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
374 head[num_cnodes - 1].next = NULL;
379 ** pulls a cnode off the free list, or returns NULL on failure
381 static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
383 struct reiserfs_journal_cnode *cn;
384 struct reiserfs_journal *journal = SB_JOURNAL(sb);
386 reiserfs_check_lock_depth(sb, "get_cnode");
388 if (journal->j_cnode_free <= 0) {
391 journal->j_cnode_used++;
392 journal->j_cnode_free--;
393 cn = journal->j_cnode_free_list;
398 cn->next->prev = NULL;
400 journal->j_cnode_free_list = cn->next;
401 memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
406 ** returns a cnode to the free list
408 static void free_cnode(struct super_block *sb,
409 struct reiserfs_journal_cnode *cn)
411 struct reiserfs_journal *journal = SB_JOURNAL(sb);
413 reiserfs_check_lock_depth(sb, "free_cnode");
415 journal->j_cnode_used--;
416 journal->j_cnode_free++;
417 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
418 cn->next = journal->j_cnode_free_list;
419 if (journal->j_cnode_free_list) {
420 journal->j_cnode_free_list->prev = cn;
422 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
423 journal->j_cnode_free_list = cn;
426 static void clear_prepared_bits(struct buffer_head *bh)
428 clear_buffer_journal_prepared(bh);
429 clear_buffer_journal_restore_dirty(bh);
432 /* return a cnode with same dev, block number and size in table, or null if not found */
433 static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
437 reiserfs_journal_cnode
441 struct reiserfs_journal_cnode *cn;
442 cn = journal_hash(table, sb, bl);
444 if (cn->blocknr == bl && cn->sb == sb)
448 return (struct reiserfs_journal_cnode *)0;
452 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
453 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
454 ** being overwritten by a replay after crashing.
456 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
457 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
458 ** sure you never write the block without logging it.
460 ** next_zero_bit is a suggestion about the next block to try for find_forward.
461 ** when bl is rejected because it is set in a journal list bitmap, we search
462 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
463 ** through next_zero_bit for find_forward to try.
465 ** Just because we return something in next_zero_bit does not mean we won't
466 ** reject it on the next call to reiserfs_in_journal
469 int reiserfs_in_journal(struct super_block *sb,
470 unsigned int bmap_nr, int bit_nr, int search_all,
471 b_blocknr_t * next_zero_bit)
473 struct reiserfs_journal *journal = SB_JOURNAL(sb);
474 struct reiserfs_journal_cnode *cn;
475 struct reiserfs_list_bitmap *jb;
479 *next_zero_bit = 0; /* always start this at zero. */
481 PROC_INFO_INC(sb, journal.in_journal);
482 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
483 ** if we crash before the transaction that freed it commits, this transaction won't
484 ** have committed either, and the block will never be written
487 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
488 PROC_INFO_INC(sb, journal.in_journal_bitmap);
489 jb = journal->j_list_bitmap + i;
490 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
492 (unsigned long *)jb->bitmaps[bmap_nr]->
495 find_next_zero_bit((unsigned long *)
496 (jb->bitmaps[bmap_nr]->
498 sb->s_blocksize << 3,
505 bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr;
506 /* is it in any old transactions? */
509 get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) {
513 /* is it in the current transaction. This should never happen */
514 if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
519 PROC_INFO_INC(sb, journal.in_journal_reusable);
524 /* insert cn into table
526 static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
527 struct reiserfs_journal_cnode *cn)
529 struct reiserfs_journal_cnode *cn_orig;
531 cn_orig = journal_hash(table, cn->sb, cn->blocknr);
537 journal_hash(table, cn->sb, cn->blocknr) = cn;
541 * Several mutexes depend on the write lock.
542 * However sometimes we want to relax the write lock while we hold
543 * these mutexes, according to the release/reacquire on schedule()
544 * properties of the Bkl that were used.
545 * Reiserfs performances and locking were based on this scheme.
546 * Now that the write lock is a mutex and not the bkl anymore, doing so
547 * may result in a deadlock:
549 * A acquire write_lock
550 * A acquire j_commit_mutex
551 * A release write_lock and wait for something
552 * B acquire write_lock
553 * B can't acquire j_commit_mutex and sleep
554 * A can't acquire write lock anymore
557 * What we do here is avoiding such deadlock by playing the same game
558 * than the Bkl: if we can't acquire a mutex that depends on the write lock,
559 * we release the write lock, wait a bit and then retry.
561 * The mutexes concerned by this hack are:
562 * - The commit mutex of a journal list
566 static inline void reiserfs_mutex_lock_safe(struct mutex *m,
567 struct super_block *s)
569 while (!mutex_trylock(m)) {
570 reiserfs_write_unlock(s);
572 reiserfs_write_lock(s);
576 /* lock the current transaction */
577 static inline void lock_journal(struct super_block *sb)
579 PROC_INFO_INC(sb, journal.lock_journal);
581 reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb);
584 /* unlock the current transaction */
585 static inline void unlock_journal(struct super_block *sb)
587 mutex_unlock(&SB_JOURNAL(sb)->j_mutex);
590 static inline void get_journal_list(struct reiserfs_journal_list *jl)
595 static inline void put_journal_list(struct super_block *s,
596 struct reiserfs_journal_list *jl)
598 if (jl->j_refcount < 1) {
599 reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d",
600 jl->j_trans_id, jl->j_refcount);
602 if (--jl->j_refcount == 0)
607 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
608 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
611 static void cleanup_freed_for_journal_list(struct super_block *sb,
612 struct reiserfs_journal_list *jl)
615 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
617 cleanup_bitmap_list(sb, jb);
619 jl->j_list_bitmap->journal_list = NULL;
620 jl->j_list_bitmap = NULL;
623 static int journal_list_still_alive(struct super_block *s,
624 unsigned int trans_id)
626 struct reiserfs_journal *journal = SB_JOURNAL(s);
627 struct list_head *entry = &journal->j_journal_list;
628 struct reiserfs_journal_list *jl;
630 if (!list_empty(entry)) {
631 jl = JOURNAL_LIST_ENTRY(entry->next);
632 if (jl->j_trans_id <= trans_id) {
640 * If page->mapping was null, we failed to truncate this page for
641 * some reason. Most likely because it was truncated after being
642 * logged via data=journal.
644 * This does a check to see if the buffer belongs to one of these
645 * lost pages before doing the final put_bh. If page->mapping was
646 * null, it tries to free buffers on the page, which should make the
647 * final page_cache_release drop the page from the lru.
649 static void release_buffer_page(struct buffer_head *bh)
651 struct page *page = bh->b_page;
652 if (!page->mapping && trylock_page(page)) {
653 page_cache_get(page);
656 try_to_free_buffers(page);
658 page_cache_release(page);
664 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
666 char b[BDEVNAME_SIZE];
668 if (buffer_journaled(bh)) {
669 reiserfs_warning(NULL, "clm-2084",
670 "pinned buffer %lu:%s sent to disk",
671 bh->b_blocknr, bdevname(bh->b_bdev, b));
674 set_buffer_uptodate(bh);
676 clear_buffer_uptodate(bh);
679 release_buffer_page(bh);
682 static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
685 set_buffer_uptodate(bh);
687 clear_buffer_uptodate(bh);
692 static void submit_logged_buffer(struct buffer_head *bh)
695 bh->b_end_io = reiserfs_end_buffer_io_sync;
696 clear_buffer_journal_new(bh);
697 clear_buffer_dirty(bh);
698 if (!test_clear_buffer_journal_test(bh))
700 if (!buffer_uptodate(bh))
702 submit_bh(WRITE, bh);
705 static void submit_ordered_buffer(struct buffer_head *bh)
708 bh->b_end_io = reiserfs_end_ordered_io;
709 clear_buffer_dirty(bh);
710 if (!buffer_uptodate(bh))
712 submit_bh(WRITE, bh);
715 static int submit_barrier_buffer(struct buffer_head *bh)
718 bh->b_end_io = reiserfs_end_ordered_io;
719 clear_buffer_dirty(bh);
720 if (!buffer_uptodate(bh))
722 return submit_bh(WRITE_BARRIER, bh);
725 static void check_barrier_completion(struct super_block *s,
726 struct buffer_head *bh)
728 if (buffer_eopnotsupp(bh)) {
729 clear_buffer_eopnotsupp(bh);
731 set_buffer_uptodate(bh);
732 set_buffer_dirty(bh);
733 reiserfs_write_unlock(s);
734 sync_dirty_buffer(bh);
735 reiserfs_write_lock(s);
739 #define CHUNK_SIZE 32
740 struct buffer_chunk {
741 struct buffer_head *bh[CHUNK_SIZE];
745 static void write_chunk(struct buffer_chunk *chunk)
749 for (i = 0; i < chunk->nr; i++) {
750 submit_logged_buffer(chunk->bh[i]);
756 static void write_ordered_chunk(struct buffer_chunk *chunk)
760 for (i = 0; i < chunk->nr; i++) {
761 submit_ordered_buffer(chunk->bh[i]);
767 static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
768 spinlock_t * lock, void (fn) (struct buffer_chunk *))
771 BUG_ON(chunk->nr >= CHUNK_SIZE);
772 chunk->bh[chunk->nr++] = bh;
773 if (chunk->nr >= CHUNK_SIZE) {
784 static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
785 static struct reiserfs_jh *alloc_jh(void)
787 struct reiserfs_jh *jh;
789 jh = kmalloc(sizeof(*jh), GFP_NOFS);
791 atomic_inc(&nr_reiserfs_jh);
799 * we want to free the jh when the buffer has been written
802 void reiserfs_free_jh(struct buffer_head *bh)
804 struct reiserfs_jh *jh;
808 bh->b_private = NULL;
810 list_del_init(&jh->list);
812 if (atomic_read(&nr_reiserfs_jh) <= 0)
814 atomic_dec(&nr_reiserfs_jh);
819 static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
822 struct reiserfs_jh *jh;
825 spin_lock(&j->j_dirty_buffers_lock);
826 if (!bh->b_private) {
827 spin_unlock(&j->j_dirty_buffers_lock);
831 list_del_init(&jh->list);
836 spin_lock(&j->j_dirty_buffers_lock);
837 /* buffer must be locked for __add_jh, should be able to have
838 * two adds at the same time
840 BUG_ON(bh->b_private);
844 jh->jl = j->j_current_jl;
846 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
848 list_add_tail(&jh->list, &jh->jl->j_bh_list);
850 spin_unlock(&j->j_dirty_buffers_lock);
854 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
856 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
858 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
860 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
863 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
864 static int write_ordered_buffers(spinlock_t * lock,
865 struct reiserfs_journal *j,
866 struct reiserfs_journal_list *jl,
867 struct list_head *list)
869 struct buffer_head *bh;
870 struct reiserfs_jh *jh;
871 int ret = j->j_errno;
872 struct buffer_chunk chunk;
873 struct list_head tmp;
874 INIT_LIST_HEAD(&tmp);
878 while (!list_empty(list)) {
879 jh = JH_ENTRY(list->next);
882 if (!trylock_buffer(bh)) {
883 if (!buffer_dirty(bh)) {
884 list_move(&jh->list, &tmp);
889 write_ordered_chunk(&chunk);
895 /* in theory, dirty non-uptodate buffers should never get here,
896 * but the upper layer io error paths still have a few quirks.
897 * Handle them here as gracefully as we can
899 if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
900 clear_buffer_dirty(bh);
903 if (buffer_dirty(bh)) {
904 list_move(&jh->list, &tmp);
905 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
907 reiserfs_free_jh(bh);
912 cond_resched_lock(lock);
916 write_ordered_chunk(&chunk);
919 while (!list_empty(&tmp)) {
920 jh = JH_ENTRY(tmp.prev);
923 reiserfs_free_jh(bh);
925 if (buffer_locked(bh)) {
930 if (!buffer_uptodate(bh)) {
933 /* ugly interaction with invalidatepage here.
934 * reiserfs_invalidate_page will pin any buffer that has a valid
935 * journal head from an older transaction. If someone else sets
936 * our buffer dirty after we write it in the first loop, and
937 * then someone truncates the page away, nobody will ever write
938 * the buffer. We're safe if we write the page one last time
939 * after freeing the journal header.
941 if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
943 ll_rw_block(WRITE, 1, &bh);
947 cond_resched_lock(lock);
953 static int flush_older_commits(struct super_block *s,
954 struct reiserfs_journal_list *jl)
956 struct reiserfs_journal *journal = SB_JOURNAL(s);
957 struct reiserfs_journal_list *other_jl;
958 struct reiserfs_journal_list *first_jl;
959 struct list_head *entry;
960 unsigned int trans_id = jl->j_trans_id;
961 unsigned int other_trans_id;
962 unsigned int first_trans_id;
966 * first we walk backwards to find the oldest uncommitted transation
969 entry = jl->j_list.prev;
971 other_jl = JOURNAL_LIST_ENTRY(entry);
972 if (entry == &journal->j_journal_list ||
973 atomic_read(&other_jl->j_older_commits_done))
977 entry = other_jl->j_list.prev;
980 /* if we didn't find any older uncommitted transactions, return now */
981 if (first_jl == jl) {
985 first_trans_id = first_jl->j_trans_id;
987 entry = &first_jl->j_list;
989 other_jl = JOURNAL_LIST_ENTRY(entry);
990 other_trans_id = other_jl->j_trans_id;
992 if (other_trans_id < trans_id) {
993 if (atomic_read(&other_jl->j_commit_left) != 0) {
994 flush_commit_list(s, other_jl, 0);
996 /* list we were called with is gone, return */
997 if (!journal_list_still_alive(s, trans_id))
1000 /* the one we just flushed is gone, this means all
1001 * older lists are also gone, so first_jl is no longer
1002 * valid either. Go back to the beginning.
1004 if (!journal_list_still_alive
1005 (s, other_trans_id)) {
1009 entry = entry->next;
1010 if (entry == &journal->j_journal_list)
1019 static int reiserfs_async_progress_wait(struct super_block *s)
1022 struct reiserfs_journal *j = SB_JOURNAL(s);
1024 if (atomic_read(&j->j_async_throttle)) {
1025 reiserfs_write_unlock(s);
1026 congestion_wait(BLK_RW_ASYNC, HZ / 10);
1027 reiserfs_write_lock(s);
1034 ** if this journal list still has commit blocks unflushed, send them to disk.
1036 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
1037 ** Before the commit block can by written, every other log block must be safely on disk
1040 static int flush_commit_list(struct super_block *s,
1041 struct reiserfs_journal_list *jl, int flushall)
1045 struct buffer_head *tbh = NULL;
1046 unsigned int trans_id = jl->j_trans_id;
1047 struct reiserfs_journal *journal = SB_JOURNAL(s);
1052 reiserfs_check_lock_depth(s, "flush_commit_list");
1054 if (atomic_read(&jl->j_older_commits_done)) {
1060 /* before we can put our commit blocks on disk, we have to make sure everyone older than
1061 ** us is on disk too
1063 BUG_ON(jl->j_len <= 0);
1064 BUG_ON(trans_id == journal->j_trans_id);
1066 get_journal_list(jl);
1068 if (flush_older_commits(s, jl) == 1) {
1069 /* list disappeared during flush_older_commits. return */
1074 /* make sure nobody is trying to flush this one at the same time */
1075 reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s);
1077 if (!journal_list_still_alive(s, trans_id)) {
1078 mutex_unlock(&jl->j_commit_mutex);
1081 BUG_ON(jl->j_trans_id == 0);
1083 /* this commit is done, exit */
1084 if (atomic_read(&(jl->j_commit_left)) <= 0) {
1086 atomic_set(&(jl->j_older_commits_done), 1);
1088 mutex_unlock(&jl->j_commit_mutex);
1092 if (!list_empty(&jl->j_bh_list)) {
1096 * We might sleep in numerous places inside
1097 * write_ordered_buffers. Relax the write lock.
1099 reiserfs_write_unlock(s);
1100 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1101 journal, jl, &jl->j_bh_list);
1102 if (ret < 0 && retval == 0)
1104 reiserfs_write_lock(s);
1106 BUG_ON(!list_empty(&jl->j_bh_list));
1108 * for the description block and all the log blocks, submit any buffers
1109 * that haven't already reached the disk. Try to write at least 256
1110 * log blocks. later on, we will only wait on blocks that correspond
1111 * to this transaction, but while we're unplugging we might as well
1112 * get a chunk of data on there.
1114 atomic_inc(&journal->j_async_throttle);
1115 write_len = jl->j_len + 1;
1116 if (write_len < 256)
1118 for (i = 0 ; i < write_len ; i++) {
1119 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1120 SB_ONDISK_JOURNAL_SIZE(s);
1121 tbh = journal_find_get_block(s, bn);
1123 if (buffer_dirty(tbh))
1124 ll_rw_block(WRITE, 1, &tbh) ;
1128 atomic_dec(&journal->j_async_throttle);
1130 /* We're skipping the commit if there's an error */
1131 if (retval || reiserfs_is_journal_aborted(journal))
1134 /* wait on everything written so far before writing the commit
1135 * if we are in barrier mode, send the commit down now
1137 barrier = reiserfs_barrier_flush(s);
1140 lock_buffer(jl->j_commit_bh);
1141 ret = submit_barrier_buffer(jl->j_commit_bh);
1142 if (ret == -EOPNOTSUPP) {
1143 set_buffer_uptodate(jl->j_commit_bh);
1148 for (i = 0; i < (jl->j_len + 1); i++) {
1149 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1150 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1151 tbh = journal_find_get_block(s, bn);
1153 reiserfs_write_unlock(s);
1154 wait_on_buffer(tbh);
1155 reiserfs_write_lock(s);
1156 // since we're using ll_rw_blk above, it might have skipped over
1157 // a locked buffer. Double check here
1159 /* redundant, sync_dirty_buffer() checks */
1160 if (buffer_dirty(tbh)) {
1161 reiserfs_write_unlock(s);
1162 sync_dirty_buffer(tbh);
1163 reiserfs_write_lock(s);
1165 if (unlikely(!buffer_uptodate(tbh))) {
1166 #ifdef CONFIG_REISERFS_CHECK
1167 reiserfs_warning(s, "journal-601",
1168 "buffer write failed");
1172 put_bh(tbh); /* once for journal_find_get_block */
1173 put_bh(tbh); /* once due to original getblk in do_journal_end */
1174 atomic_dec(&(jl->j_commit_left));
1177 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1180 /* If there was a write error in the journal - we can't commit
1181 * this transaction - it will be invalid and, if successful,
1182 * will just end up propagating the write error out to
1183 * the file system. */
1184 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1185 if (buffer_dirty(jl->j_commit_bh))
1187 mark_buffer_dirty(jl->j_commit_bh) ;
1188 reiserfs_write_unlock(s);
1189 sync_dirty_buffer(jl->j_commit_bh) ;
1190 reiserfs_write_lock(s);
1193 reiserfs_write_unlock(s);
1194 wait_on_buffer(jl->j_commit_bh);
1195 reiserfs_write_lock(s);
1198 check_barrier_completion(s, jl->j_commit_bh);
1200 /* If there was a write error in the journal - we can't commit this
1201 * transaction - it will be invalid and, if successful, will just end
1202 * up propagating the write error out to the filesystem. */
1203 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1204 #ifdef CONFIG_REISERFS_CHECK
1205 reiserfs_warning(s, "journal-615", "buffer write failed");
1209 bforget(jl->j_commit_bh);
1210 if (journal->j_last_commit_id != 0 &&
1211 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1212 reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu",
1213 journal->j_last_commit_id, jl->j_trans_id);
1215 journal->j_last_commit_id = jl->j_trans_id;
1217 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1218 cleanup_freed_for_journal_list(s, jl);
1220 retval = retval ? retval : journal->j_errno;
1222 /* mark the metadata dirty */
1224 dirty_one_transaction(s, jl);
1225 atomic_dec(&(jl->j_commit_left));
1228 atomic_set(&(jl->j_older_commits_done), 1);
1230 mutex_unlock(&jl->j_commit_mutex);
1232 put_journal_list(s, jl);
1235 reiserfs_abort(s, retval, "Journal write error in %s",
1242 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1243 ** returns NULL if it can't find anything
1245 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1246 reiserfs_journal_cnode
1249 struct super_block *sb = cn->sb;
1250 b_blocknr_t blocknr = cn->blocknr;
1254 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1262 static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1264 struct super_block *sb = cn->sb;
1265 b_blocknr_t blocknr = cn->blocknr;
1269 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1270 atomic_read(&cn->jlist->j_commit_left) != 0)
1277 static void remove_journal_hash(struct super_block *,
1278 struct reiserfs_journal_cnode **,
1279 struct reiserfs_journal_list *, unsigned long,
1283 ** once all the real blocks have been flushed, it is safe to remove them from the
1284 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
1285 ** block to be reallocated for data blocks if it had been deleted.
1287 static void remove_all_from_journal_list(struct super_block *sb,
1288 struct reiserfs_journal_list *jl,
1291 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1292 struct reiserfs_journal_cnode *cn, *last;
1293 cn = jl->j_realblock;
1295 /* which is better, to lock once around the whole loop, or
1296 ** to lock for each call to remove_journal_hash?
1299 if (cn->blocknr != 0) {
1301 reiserfs_warning(sb, "reiserfs-2201",
1302 "block %u, bh is %d, state %ld",
1303 cn->blocknr, cn->bh ? 1 : 0,
1307 remove_journal_hash(sb, journal->j_list_hash_table,
1308 jl, cn->blocknr, 1);
1312 free_cnode(sb, last);
1314 jl->j_realblock = NULL;
1318 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1319 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1320 ** releasing blocks in this transaction for reuse as data blocks.
1321 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1324 static int _update_journal_header_block(struct super_block *sb,
1325 unsigned long offset,
1326 unsigned int trans_id)
1328 struct reiserfs_journal_header *jh;
1329 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1331 if (reiserfs_is_journal_aborted(journal))
1334 if (trans_id >= journal->j_last_flush_trans_id) {
1335 if (buffer_locked((journal->j_header_bh))) {
1336 reiserfs_write_unlock(sb);
1337 wait_on_buffer((journal->j_header_bh));
1338 reiserfs_write_lock(sb);
1339 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1340 #ifdef CONFIG_REISERFS_CHECK
1341 reiserfs_warning(sb, "journal-699",
1342 "buffer write failed");
1347 journal->j_last_flush_trans_id = trans_id;
1348 journal->j_first_unflushed_offset = offset;
1349 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1351 jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1352 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1353 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1355 if (reiserfs_barrier_flush(sb)) {
1357 lock_buffer(journal->j_header_bh);
1358 ret = submit_barrier_buffer(journal->j_header_bh);
1359 if (ret == -EOPNOTSUPP) {
1360 set_buffer_uptodate(journal->j_header_bh);
1361 disable_barrier(sb);
1364 reiserfs_write_unlock(sb);
1365 wait_on_buffer(journal->j_header_bh);
1366 reiserfs_write_lock(sb);
1367 check_barrier_completion(sb, journal->j_header_bh);
1370 set_buffer_dirty(journal->j_header_bh);
1371 reiserfs_write_unlock(sb);
1372 sync_dirty_buffer(journal->j_header_bh);
1373 reiserfs_write_lock(sb);
1375 if (!buffer_uptodate(journal->j_header_bh)) {
1376 reiserfs_warning(sb, "journal-837",
1377 "IO error during journal replay");
1384 static int update_journal_header_block(struct super_block *sb,
1385 unsigned long offset,
1386 unsigned int trans_id)
1388 return _update_journal_header_block(sb, offset, trans_id);
1392 ** flush any and all journal lists older than you are
1393 ** can only be called from flush_journal_list
1395 static int flush_older_journal_lists(struct super_block *sb,
1396 struct reiserfs_journal_list *jl)
1398 struct list_head *entry;
1399 struct reiserfs_journal_list *other_jl;
1400 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1401 unsigned int trans_id = jl->j_trans_id;
1403 /* we know we are the only ones flushing things, no extra race
1404 * protection is required.
1407 entry = journal->j_journal_list.next;
1409 if (entry == &journal->j_journal_list)
1411 other_jl = JOURNAL_LIST_ENTRY(entry);
1412 if (other_jl->j_trans_id < trans_id) {
1413 BUG_ON(other_jl->j_refcount <= 0);
1414 /* do not flush all */
1415 flush_journal_list(sb, other_jl, 0);
1417 /* other_jl is now deleted from the list */
1423 static void del_from_work_list(struct super_block *s,
1424 struct reiserfs_journal_list *jl)
1426 struct reiserfs_journal *journal = SB_JOURNAL(s);
1427 if (!list_empty(&jl->j_working_list)) {
1428 list_del_init(&jl->j_working_list);
1429 journal->j_num_work_lists--;
1433 /* flush a journal list, both commit and real blocks
1435 ** always set flushall to 1, unless you are calling from inside
1436 ** flush_journal_list
1438 ** IMPORTANT. This can only be called while there are no journal writers,
1439 ** and the journal is locked. That means it can only be called from
1440 ** do_journal_end, or by journal_release
1442 static int flush_journal_list(struct super_block *s,
1443 struct reiserfs_journal_list *jl, int flushall)
1445 struct reiserfs_journal_list *pjl;
1446 struct reiserfs_journal_cnode *cn, *last;
1450 struct buffer_head *saved_bh;
1451 unsigned long j_len_saved = jl->j_len;
1452 struct reiserfs_journal *journal = SB_JOURNAL(s);
1455 BUG_ON(j_len_saved <= 0);
1457 if (atomic_read(&journal->j_wcount) != 0) {
1458 reiserfs_warning(s, "clm-2048", "called with wcount %d",
1459 atomic_read(&journal->j_wcount));
1461 BUG_ON(jl->j_trans_id == 0);
1463 /* if flushall == 0, the lock is already held */
1465 reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1466 } else if (mutex_trylock(&journal->j_flush_mutex)) {
1471 if (j_len_saved > journal->j_trans_max) {
1472 reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu",
1473 j_len_saved, jl->j_trans_id);
1479 /* if all the work is already done, get out of here */
1480 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1481 atomic_read(&(jl->j_commit_left)) <= 0) {
1482 goto flush_older_and_return;
1485 /* start by putting the commit list on disk. This will also flush
1486 ** the commit lists of any olders transactions
1488 flush_commit_list(s, jl, 1);
1490 if (!(jl->j_state & LIST_DIRTY)
1491 && !reiserfs_is_journal_aborted(journal))
1494 /* are we done now? */
1495 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1496 atomic_read(&(jl->j_commit_left)) <= 0) {
1497 goto flush_older_and_return;
1500 /* loop through each cnode, see if we need to write it,
1501 ** or wait on a more recent transaction, or just ignore it
1503 if (atomic_read(&(journal->j_wcount)) != 0) {
1504 reiserfs_panic(s, "journal-844", "journal list is flushing, "
1507 cn = jl->j_realblock;
1512 /* blocknr of 0 is no longer in the hash, ignore it */
1513 if (cn->blocknr == 0) {
1517 /* This transaction failed commit. Don't write out to the disk */
1518 if (!(jl->j_state & LIST_DIRTY))
1521 pjl = find_newer_jl_for_cn(cn);
1522 /* the order is important here. We check pjl to make sure we
1523 ** don't clear BH_JDirty_wait if we aren't the one writing this
1526 if (!pjl && cn->bh) {
1529 /* we do this to make sure nobody releases the buffer while
1530 ** we are working with it
1534 if (buffer_journal_dirty(saved_bh)) {
1535 BUG_ON(!can_dirty(cn));
1538 } else if (can_dirty(cn)) {
1539 /* everything with !pjl && jwait should be writable */
1544 /* if someone has this block in a newer transaction, just make
1545 ** sure they are committed, and don't try writing it to disk
1548 if (atomic_read(&pjl->j_commit_left))
1549 flush_commit_list(s, pjl, 1);
1553 /* bh == NULL when the block got to disk on its own, OR,
1554 ** the block got freed in a future transaction
1556 if (saved_bh == NULL) {
1560 /* this should never happen. kupdate_one_transaction has this list
1561 ** locked while it works, so we should never see a buffer here that
1562 ** is not marked JDirty_wait
1564 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1565 reiserfs_warning(s, "journal-813",
1566 "BAD! buffer %llu %cdirty %cjwait, "
1567 "not in a newer tranasction",
1568 (unsigned long long)saved_bh->
1569 b_blocknr, was_dirty ? ' ' : '!',
1570 was_jwait ? ' ' : '!');
1573 /* we inc again because saved_bh gets decremented at free_cnode */
1575 set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1576 lock_buffer(saved_bh);
1577 BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1578 if (buffer_dirty(saved_bh))
1579 submit_logged_buffer(saved_bh);
1581 unlock_buffer(saved_bh);
1584 reiserfs_warning(s, "clm-2082",
1585 "Unable to flush buffer %llu in %s",
1586 (unsigned long long)saved_bh->
1587 b_blocknr, __func__);
1593 /* we incremented this to keep others from taking the buffer head away */
1595 if (atomic_read(&(saved_bh->b_count)) < 0) {
1596 reiserfs_warning(s, "journal-945",
1597 "saved_bh->b_count < 0");
1602 cn = jl->j_realblock;
1604 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1606 reiserfs_panic(s, "journal-1011",
1610 reiserfs_write_unlock(s);
1611 wait_on_buffer(cn->bh);
1612 reiserfs_write_lock(s);
1615 reiserfs_panic(s, "journal-1012",
1618 if (unlikely(!buffer_uptodate(cn->bh))) {
1619 #ifdef CONFIG_REISERFS_CHECK
1620 reiserfs_warning(s, "journal-949",
1621 "buffer write failed");
1625 /* note, we must clear the JDirty_wait bit after the up to date
1626 ** check, otherwise we race against our flushpage routine
1628 BUG_ON(!test_clear_buffer_journal_dirty
1631 /* drop one ref for us */
1633 /* drop one ref for journal_mark_dirty */
1634 release_buffer_page(cn->bh);
1641 reiserfs_abort(s, -EIO,
1642 "Write error while pushing transaction to disk in %s",
1644 flush_older_and_return:
1646 /* before we can update the journal header block, we _must_ flush all
1647 ** real blocks from all older transactions to disk. This is because
1648 ** once the header block is updated, this transaction will not be
1649 ** replayed after a crash
1652 flush_older_journal_lists(s, jl);
1655 err = journal->j_errno;
1656 /* before we can remove everything from the hash tables for this
1657 ** transaction, we must make sure it can never be replayed
1659 ** since we are only called from do_journal_end, we know for sure there
1660 ** are no allocations going on while we are flushing journal lists. So,
1661 ** we only need to update the journal header block for the last list
1664 if (!err && flushall) {
1666 update_journal_header_block(s,
1667 (jl->j_start + jl->j_len +
1668 2) % SB_ONDISK_JOURNAL_SIZE(s),
1671 reiserfs_abort(s, -EIO,
1672 "Write error while updating journal header in %s",
1675 remove_all_from_journal_list(s, jl, 0);
1676 list_del_init(&jl->j_list);
1677 journal->j_num_lists--;
1678 del_from_work_list(s, jl);
1680 if (journal->j_last_flush_id != 0 &&
1681 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1682 reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu",
1683 journal->j_last_flush_id, jl->j_trans_id);
1685 journal->j_last_flush_id = jl->j_trans_id;
1687 /* not strictly required since we are freeing the list, but it should
1688 * help find code using dead lists later on
1691 atomic_set(&(jl->j_nonzerolen), 0);
1693 jl->j_realblock = NULL;
1694 jl->j_commit_bh = NULL;
1697 put_journal_list(s, jl);
1699 mutex_unlock(&journal->j_flush_mutex);
1704 static int test_transaction(struct super_block *s,
1705 struct reiserfs_journal_list *jl)
1707 struct reiserfs_journal_cnode *cn;
1709 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1712 cn = jl->j_realblock;
1714 /* if the blocknr == 0, this has been cleared from the hash,
1717 if (cn->blocknr == 0) {
1720 if (cn->bh && !newer_jl_done(cn))
1729 static int write_one_transaction(struct super_block *s,
1730 struct reiserfs_journal_list *jl,
1731 struct buffer_chunk *chunk)
1733 struct reiserfs_journal_cnode *cn;
1736 jl->j_state |= LIST_TOUCHED;
1737 del_from_work_list(s, jl);
1738 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1742 cn = jl->j_realblock;
1744 /* if the blocknr == 0, this has been cleared from the hash,
1747 if (cn->blocknr == 0) {
1750 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1751 struct buffer_head *tmp_bh;
1752 /* we can race against journal_mark_freed when we try
1753 * to lock_buffer(cn->bh), so we have to inc the buffer
1754 * count, and recheck things after locking
1758 lock_buffer(tmp_bh);
1759 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1760 if (!buffer_journal_dirty(tmp_bh) ||
1761 buffer_journal_prepared(tmp_bh))
1763 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1766 /* note, cn->bh might be null now */
1767 unlock_buffer(tmp_bh);
1778 /* used by flush_commit_list */
1779 static int dirty_one_transaction(struct super_block *s,
1780 struct reiserfs_journal_list *jl)
1782 struct reiserfs_journal_cnode *cn;
1783 struct reiserfs_journal_list *pjl;
1786 jl->j_state |= LIST_DIRTY;
1787 cn = jl->j_realblock;
1789 /* look for a more recent transaction that logged this
1790 ** buffer. Only the most recent transaction with a buffer in
1791 ** it is allowed to send that buffer to disk
1793 pjl = find_newer_jl_for_cn(cn);
1794 if (!pjl && cn->blocknr && cn->bh
1795 && buffer_journal_dirty(cn->bh)) {
1796 BUG_ON(!can_dirty(cn));
1797 /* if the buffer is prepared, it will either be logged
1798 * or restored. If restored, we need to make sure
1799 * it actually gets marked dirty
1801 clear_buffer_journal_new(cn->bh);
1802 if (buffer_journal_prepared(cn->bh)) {
1803 set_buffer_journal_restore_dirty(cn->bh);
1805 set_buffer_journal_test(cn->bh);
1806 mark_buffer_dirty(cn->bh);
1814 static int kupdate_transactions(struct super_block *s,
1815 struct reiserfs_journal_list *jl,
1816 struct reiserfs_journal_list **next_jl,
1817 unsigned int *next_trans_id,
1818 int num_blocks, int num_trans)
1822 int transactions_flushed = 0;
1823 unsigned int orig_trans_id = jl->j_trans_id;
1824 struct buffer_chunk chunk;
1825 struct list_head *entry;
1826 struct reiserfs_journal *journal = SB_JOURNAL(s);
1829 reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s);
1830 if (!journal_list_still_alive(s, orig_trans_id)) {
1834 /* we've got j_flush_mutex held, nobody is going to delete any
1835 * of these lists out from underneath us
1837 while ((num_trans && transactions_flushed < num_trans) ||
1838 (!num_trans && written < num_blocks)) {
1840 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1841 atomic_read(&jl->j_commit_left)
1842 || !(jl->j_state & LIST_DIRTY)) {
1843 del_from_work_list(s, jl);
1846 ret = write_one_transaction(s, jl, &chunk);
1850 transactions_flushed++;
1852 entry = jl->j_list.next;
1855 if (entry == &journal->j_journal_list) {
1858 jl = JOURNAL_LIST_ENTRY(entry);
1860 /* don't bother with older transactions */
1861 if (jl->j_trans_id <= orig_trans_id)
1865 write_chunk(&chunk);
1869 mutex_unlock(&journal->j_flush_mutex);
1873 /* for o_sync and fsync heavy applications, they tend to use
1874 ** all the journa list slots with tiny transactions. These
1875 ** trigger lots and lots of calls to update the header block, which
1876 ** adds seeks and slows things down.
1878 ** This function tries to clear out a large chunk of the journal lists
1879 ** at once, which makes everything faster since only the newest journal
1880 ** list updates the header block
1882 static int flush_used_journal_lists(struct super_block *s,
1883 struct reiserfs_journal_list *jl)
1885 unsigned long len = 0;
1886 unsigned long cur_len;
1890 struct reiserfs_journal_list *tjl;
1891 struct reiserfs_journal_list *flush_jl;
1892 unsigned int trans_id;
1893 struct reiserfs_journal *journal = SB_JOURNAL(s);
1895 flush_jl = tjl = jl;
1897 /* in data logging mode, try harder to flush a lot of blocks */
1898 if (reiserfs_data_log(s))
1900 /* flush for 256 transactions or limit blocks, whichever comes first */
1901 for (i = 0; i < 256 && len < limit; i++) {
1902 if (atomic_read(&tjl->j_commit_left) ||
1903 tjl->j_trans_id < jl->j_trans_id) {
1906 cur_len = atomic_read(&tjl->j_nonzerolen);
1908 tjl->j_state &= ~LIST_TOUCHED;
1912 if (tjl->j_list.next == &journal->j_journal_list)
1914 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1916 /* try to find a group of blocks we can flush across all the
1917 ** transactions, but only bother if we've actually spanned
1918 ** across multiple lists
1920 if (flush_jl != jl) {
1921 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1923 flush_journal_list(s, flush_jl, 1);
1928 ** removes any nodes in table with name block and dev as bh.
1929 ** only touchs the hnext and hprev pointers.
1931 void remove_journal_hash(struct super_block *sb,
1932 struct reiserfs_journal_cnode **table,
1933 struct reiserfs_journal_list *jl,
1934 unsigned long block, int remove_freed)
1936 struct reiserfs_journal_cnode *cur;
1937 struct reiserfs_journal_cnode **head;
1939 head = &(journal_hash(table, sb, block));
1945 if (cur->blocknr == block && cur->sb == sb
1946 && (jl == NULL || jl == cur->jlist)
1947 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1949 cur->hnext->hprev = cur->hprev;
1952 cur->hprev->hnext = cur->hnext;
1959 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1960 atomic_dec(&(cur->jlist->j_nonzerolen));
1968 static void free_journal_ram(struct super_block *sb)
1970 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1971 kfree(journal->j_current_jl);
1972 journal->j_num_lists--;
1974 vfree(journal->j_cnode_free_orig);
1975 free_list_bitmaps(sb, journal->j_list_bitmap);
1976 free_bitmap_nodes(sb); /* must be after free_list_bitmaps */
1977 if (journal->j_header_bh) {
1978 brelse(journal->j_header_bh);
1980 /* j_header_bh is on the journal dev, make sure not to release the journal
1981 * dev until we brelse j_header_bh
1983 release_journal_dev(sb, journal);
1988 ** call on unmount. Only set error to 1 if you haven't made your way out
1989 ** of read_super() yet. Any other caller must keep error at 0.
1991 static int do_journal_release(struct reiserfs_transaction_handle *th,
1992 struct super_block *sb, int error)
1994 struct reiserfs_transaction_handle myth;
1996 struct reiserfs_journal *journal = SB_JOURNAL(sb);
1998 /* we only want to flush out transactions if we were called with error == 0
2000 if (!error && !(sb->s_flags & MS_RDONLY)) {
2001 /* end the current trans */
2002 BUG_ON(!th->t_trans_id);
2003 do_journal_end(th, sb, 10, FLUSH_ALL);
2005 /* make sure something gets logged to force our way into the flush code */
2006 if (!journal_join(&myth, sb, 1)) {
2007 reiserfs_prepare_for_journal(sb,
2008 SB_BUFFER_WITH_SB(sb),
2010 journal_mark_dirty(&myth, sb,
2011 SB_BUFFER_WITH_SB(sb));
2012 do_journal_end(&myth, sb, 1, FLUSH_ALL);
2017 /* this also catches errors during the do_journal_end above */
2018 if (!error && reiserfs_is_journal_aborted(journal)) {
2019 memset(&myth, 0, sizeof(myth));
2020 if (!journal_join_abort(&myth, sb, 1)) {
2021 reiserfs_prepare_for_journal(sb,
2022 SB_BUFFER_WITH_SB(sb),
2024 journal_mark_dirty(&myth, sb,
2025 SB_BUFFER_WITH_SB(sb));
2026 do_journal_end(&myth, sb, 1, FLUSH_ALL);
2030 reiserfs_mounted_fs_count--;
2031 /* wait for all commits to finish */
2032 cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
2035 * We must release the write lock here because
2036 * the workqueue job (flush_async_commit) needs this lock
2038 reiserfs_write_unlock(sb);
2039 flush_workqueue(commit_wq);
2041 if (!reiserfs_mounted_fs_count) {
2042 destroy_workqueue(commit_wq);
2045 reiserfs_write_lock(sb);
2047 free_journal_ram(sb);
2053 ** call on unmount. flush all journal trans, release all alloc'd ram
2055 int journal_release(struct reiserfs_transaction_handle *th,
2056 struct super_block *sb)
2058 return do_journal_release(th, sb, 0);
2062 ** only call from an error condition inside reiserfs_read_super!
2064 int journal_release_error(struct reiserfs_transaction_handle *th,
2065 struct super_block *sb)
2067 return do_journal_release(th, sb, 1);
2070 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
2071 static int journal_compare_desc_commit(struct super_block *sb,
2072 struct reiserfs_journal_desc *desc,
2073 struct reiserfs_journal_commit *commit)
2075 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
2076 get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
2077 get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max ||
2078 get_commit_trans_len(commit) <= 0) {
2084 /* returns 0 if it did not find a description block
2085 ** returns -1 if it found a corrupt commit block
2086 ** returns 1 if both desc and commit were valid
2088 static int journal_transaction_is_valid(struct super_block *sb,
2089 struct buffer_head *d_bh,
2090 unsigned int *oldest_invalid_trans_id,
2091 unsigned long *newest_mount_id)
2093 struct reiserfs_journal_desc *desc;
2094 struct reiserfs_journal_commit *commit;
2095 struct buffer_head *c_bh;
2096 unsigned long offset;
2101 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2102 if (get_desc_trans_len(desc) > 0
2103 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
2104 if (oldest_invalid_trans_id && *oldest_invalid_trans_id
2105 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
2106 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2107 "journal-986: transaction "
2108 "is valid returning because trans_id %d is greater than "
2109 "oldest_invalid %lu",
2110 get_desc_trans_id(desc),
2111 *oldest_invalid_trans_id);
2115 && *newest_mount_id > get_desc_mount_id(desc)) {
2116 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2117 "journal-1087: transaction "
2118 "is valid returning because mount_id %d is less than "
2119 "newest_mount_id %lu",
2120 get_desc_mount_id(desc),
2124 if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) {
2125 reiserfs_warning(sb, "journal-2018",
2126 "Bad transaction length %d "
2127 "encountered, ignoring transaction",
2128 get_desc_trans_len(desc));
2131 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2133 /* ok, we have a journal description block, lets see if the transaction was valid */
2136 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2137 ((offset + get_desc_trans_len(desc) +
2138 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
2141 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2142 if (journal_compare_desc_commit(sb, desc, commit)) {
2143 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2144 "journal_transaction_is_valid, commit offset %ld had bad "
2145 "time %d or length %d",
2147 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2148 get_commit_trans_id(commit),
2149 get_commit_trans_len(commit));
2151 if (oldest_invalid_trans_id) {
2152 *oldest_invalid_trans_id =
2153 get_desc_trans_id(desc);
2154 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2156 "transaction_is_valid setting oldest invalid trans_id "
2158 get_desc_trans_id(desc));
2163 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2164 "journal-1006: found valid "
2165 "transaction start offset %llu, len %d id %d",
2167 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2168 get_desc_trans_len(desc),
2169 get_desc_trans_id(desc));
2176 static void brelse_array(struct buffer_head **heads, int num)
2179 for (i = 0; i < num; i++) {
2185 ** given the start, and values for the oldest acceptable transactions,
2186 ** this either reads in a replays a transaction, or returns because the transaction
2187 ** is invalid, or too old.
2189 static int journal_read_transaction(struct super_block *sb,
2190 unsigned long cur_dblock,
2191 unsigned long oldest_start,
2192 unsigned int oldest_trans_id,
2193 unsigned long newest_mount_id)
2195 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2196 struct reiserfs_journal_desc *desc;
2197 struct reiserfs_journal_commit *commit;
2198 unsigned int trans_id = 0;
2199 struct buffer_head *c_bh;
2200 struct buffer_head *d_bh;
2201 struct buffer_head **log_blocks = NULL;
2202 struct buffer_head **real_blocks = NULL;
2203 unsigned int trans_offset;
2207 d_bh = journal_bread(sb, cur_dblock);
2210 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2211 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2212 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: "
2213 "journal_read_transaction, offset %llu, len %d mount_id %d",
2214 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2215 get_desc_trans_len(desc), get_desc_mount_id(desc));
2216 if (get_desc_trans_id(desc) < oldest_trans_id) {
2217 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: "
2218 "journal_read_trans skipping because %lu is too old",
2220 SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2224 if (get_desc_mount_id(desc) != newest_mount_id) {
2225 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: "
2226 "journal_read_trans skipping because %d is != "
2227 "newest_mount_id %lu", get_desc_mount_id(desc),
2232 c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2233 ((trans_offset + get_desc_trans_len(desc) + 1) %
2234 SB_ONDISK_JOURNAL_SIZE(sb)));
2239 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2240 if (journal_compare_desc_commit(sb, desc, commit)) {
2241 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2242 "journal_read_transaction, "
2243 "commit offset %llu had bad time %d or length %d",
2245 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2246 get_commit_trans_id(commit),
2247 get_commit_trans_len(commit));
2252 trans_id = get_desc_trans_id(desc);
2253 /* now we know we've got a good transaction, and it was inside the valid time ranges */
2254 log_blocks = kmalloc(get_desc_trans_len(desc) *
2255 sizeof(struct buffer_head *), GFP_NOFS);
2256 real_blocks = kmalloc(get_desc_trans_len(desc) *
2257 sizeof(struct buffer_head *), GFP_NOFS);
2258 if (!log_blocks || !real_blocks) {
2263 reiserfs_warning(sb, "journal-1169",
2264 "kmalloc failed, unable to mount FS");
2267 /* get all the buffer heads */
2268 trans_half = journal_trans_half(sb->s_blocksize);
2269 for (i = 0; i < get_desc_trans_len(desc); i++) {
2272 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2274 i) % SB_ONDISK_JOURNAL_SIZE(sb));
2275 if (i < trans_half) {
2278 le32_to_cpu(desc->j_realblock[i]));
2282 le32_to_cpu(commit->
2283 j_realblock[i - trans_half]));
2285 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) {
2286 reiserfs_warning(sb, "journal-1207",
2287 "REPLAY FAILURE fsck required! "
2288 "Block to replay is outside of "
2292 /* make sure we don't try to replay onto log or reserved area */
2293 if (is_block_in_log_or_reserved_area
2294 (sb, real_blocks[i]->b_blocknr)) {
2295 reiserfs_warning(sb, "journal-1204",
2296 "REPLAY FAILURE fsck required! "
2297 "Trying to replay onto a log block");
2299 brelse_array(log_blocks, i);
2300 brelse_array(real_blocks, i);
2308 /* read in the log blocks, memcpy to the corresponding real block */
2309 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2310 for (i = 0; i < get_desc_trans_len(desc); i++) {
2312 reiserfs_write_unlock(sb);
2313 wait_on_buffer(log_blocks[i]);
2314 reiserfs_write_lock(sb);
2316 if (!buffer_uptodate(log_blocks[i])) {
2317 reiserfs_warning(sb, "journal-1212",
2318 "REPLAY FAILURE fsck required! "
2319 "buffer write failed");
2320 brelse_array(log_blocks + i,
2321 get_desc_trans_len(desc) - i);
2322 brelse_array(real_blocks, get_desc_trans_len(desc));
2329 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2330 real_blocks[i]->b_size);
2331 set_buffer_uptodate(real_blocks[i]);
2332 brelse(log_blocks[i]);
2334 /* flush out the real blocks */
2335 for (i = 0; i < get_desc_trans_len(desc); i++) {
2336 set_buffer_dirty(real_blocks[i]);
2337 ll_rw_block(SWRITE, 1, real_blocks + i);
2339 for (i = 0; i < get_desc_trans_len(desc); i++) {
2340 wait_on_buffer(real_blocks[i]);
2341 if (!buffer_uptodate(real_blocks[i])) {
2342 reiserfs_warning(sb, "journal-1226",
2343 "REPLAY FAILURE, fsck required! "
2344 "buffer write failed");
2345 brelse_array(real_blocks + i,
2346 get_desc_trans_len(desc) - i);
2353 brelse(real_blocks[i]);
2356 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2357 ((trans_offset + get_desc_trans_len(desc) +
2358 2) % SB_ONDISK_JOURNAL_SIZE(sb));
2359 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2360 "journal-1095: setting journal " "start to offset %ld",
2361 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb));
2363 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2364 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2365 journal->j_last_flush_trans_id = trans_id;
2366 journal->j_trans_id = trans_id + 1;
2367 /* check for trans_id overflow */
2368 if (journal->j_trans_id == 0)
2369 journal->j_trans_id = 10;
2377 /* This function reads blocks starting from block and to max_block of bufsize
2378 size (but no more than BUFNR blocks at a time). This proved to improve
2379 mounting speed on self-rebuilding raid5 arrays at least.
2380 Right now it is only used from journal code. But later we might use it
2382 Note: Do not use journal_getblk/sb_getblk functions here! */
2383 static struct buffer_head *reiserfs_breada(struct block_device *dev,
2384 b_blocknr_t block, int bufsize,
2385 b_blocknr_t max_block)
2387 struct buffer_head *bhlist[BUFNR];
2388 unsigned int blocks = BUFNR;
2389 struct buffer_head *bh;
2392 bh = __getblk(dev, block, bufsize);
2393 if (buffer_uptodate(bh))
2396 if (block + BUFNR > max_block) {
2397 blocks = max_block - block;
2401 for (i = 1; i < blocks; i++) {
2402 bh = __getblk(dev, block + i, bufsize);
2403 if (buffer_uptodate(bh)) {
2409 ll_rw_block(READ, j, bhlist);
2410 for (i = 1; i < j; i++)
2414 if (buffer_uptodate(bh))
2421 ** read and replay the log
2422 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2423 ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2425 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2427 ** On exit, it sets things up so the first transaction will work correctly.
2429 static int journal_read(struct super_block *sb)
2431 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2432 struct reiserfs_journal_desc *desc;
2433 unsigned int oldest_trans_id = 0;
2434 unsigned int oldest_invalid_trans_id = 0;
2436 unsigned long oldest_start = 0;
2437 unsigned long cur_dblock = 0;
2438 unsigned long newest_mount_id = 9;
2439 struct buffer_head *d_bh;
2440 struct reiserfs_journal_header *jh;
2441 int valid_journal_header = 0;
2442 int replay_count = 0;
2443 int continue_replay = 1;
2445 char b[BDEVNAME_SIZE];
2447 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
2448 reiserfs_info(sb, "checking transaction log (%s)\n",
2449 bdevname(journal->j_dev_bd, b));
2450 start = get_seconds();
2452 /* step 1, read in the journal header block. Check the transaction it says
2453 ** is the first unflushed, and if that transaction is not valid,
2456 journal->j_header_bh = journal_bread(sb,
2457 SB_ONDISK_JOURNAL_1st_BLOCK(sb)
2458 + SB_ONDISK_JOURNAL_SIZE(sb));
2459 if (!journal->j_header_bh) {
2462 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2463 if (le32_to_cpu(jh->j_first_unflushed_offset) <
2464 SB_ONDISK_JOURNAL_SIZE(sb)
2465 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2467 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2468 le32_to_cpu(jh->j_first_unflushed_offset);
2469 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2470 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2471 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2472 "journal-1153: found in "
2473 "header: first_unflushed_offset %d, last_flushed_trans_id "
2474 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2475 le32_to_cpu(jh->j_last_flush_trans_id));
2476 valid_journal_header = 1;
2478 /* now, we try to read the first unflushed offset. If it is not valid,
2479 ** there is nothing more we can do, and it makes no sense to read
2480 ** through the whole log.
2484 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2485 le32_to_cpu(jh->j_first_unflushed_offset));
2486 ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL);
2488 continue_replay = 0;
2491 goto start_log_replay;
2494 if (continue_replay && bdev_read_only(sb->s_bdev)) {
2495 reiserfs_warning(sb, "clm-2076",
2496 "device is readonly, unable to replay log");
2500 /* ok, there are transactions that need to be replayed. start with the first log block, find
2501 ** all the valid transactions, and pick out the oldest.
2503 while (continue_replay
2505 (SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2506 SB_ONDISK_JOURNAL_SIZE(sb))) {
2507 /* Note that it is required for blocksize of primary fs device and journal
2508 device to be the same */
2510 reiserfs_breada(journal->j_dev_bd, cur_dblock,
2512 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2513 SB_ONDISK_JOURNAL_SIZE(sb));
2515 journal_transaction_is_valid(sb, d_bh,
2516 &oldest_invalid_trans_id,
2519 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2520 if (oldest_start == 0) { /* init all oldest_ values */
2521 oldest_trans_id = get_desc_trans_id(desc);
2522 oldest_start = d_bh->b_blocknr;
2523 newest_mount_id = get_desc_mount_id(desc);
2524 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2525 "journal-1179: Setting "
2526 "oldest_start to offset %llu, trans_id %lu",
2528 SB_ONDISK_JOURNAL_1st_BLOCK
2529 (sb), oldest_trans_id);
2530 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2531 /* one we just read was older */
2532 oldest_trans_id = get_desc_trans_id(desc);
2533 oldest_start = d_bh->b_blocknr;
2534 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2535 "journal-1180: Resetting "
2536 "oldest_start to offset %lu, trans_id %lu",
2538 SB_ONDISK_JOURNAL_1st_BLOCK
2539 (sb), oldest_trans_id);
2541 if (newest_mount_id < get_desc_mount_id(desc)) {
2542 newest_mount_id = get_desc_mount_id(desc);
2543 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2544 "journal-1299: Setting "
2545 "newest_mount_id to %d",
2546 get_desc_mount_id(desc));
2548 cur_dblock += get_desc_trans_len(desc) + 2;
2556 cur_dblock = oldest_start;
2557 if (oldest_trans_id) {
2558 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2559 "journal-1206: Starting replay "
2560 "from offset %llu, trans_id %lu",
2561 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2566 while (continue_replay && oldest_trans_id > 0) {
2568 journal_read_transaction(sb, cur_dblock, oldest_start,
2569 oldest_trans_id, newest_mount_id);
2572 } else if (ret != 0) {
2576 SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start;
2578 if (cur_dblock == oldest_start)
2582 if (oldest_trans_id == 0) {
2583 reiserfs_debug(sb, REISERFS_DEBUG_CODE,
2584 "journal-1225: No valid " "transactions found");
2586 /* j_start does not get set correctly if we don't replay any transactions.
2587 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2588 ** copy the trans_id from the header
2590 if (valid_journal_header && replay_count == 0) {
2591 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2592 journal->j_trans_id =
2593 le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2594 /* check for trans_id overflow */
2595 if (journal->j_trans_id == 0)
2596 journal->j_trans_id = 10;
2597 journal->j_last_flush_trans_id =
2598 le32_to_cpu(jh->j_last_flush_trans_id);
2599 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2601 journal->j_mount_id = newest_mount_id + 1;
2603 reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2604 "newest_mount_id to %lu", journal->j_mount_id);
2605 journal->j_first_unflushed_offset = journal->j_start;
2606 if (replay_count > 0) {
2608 "replayed %d transactions in %lu seconds\n",
2609 replay_count, get_seconds() - start);
2611 if (!bdev_read_only(sb->s_bdev) &&
2612 _update_journal_header_block(sb, journal->j_start,
2613 journal->j_last_flush_trans_id)) {
2614 /* replay failed, caller must call free_journal_ram and abort
2622 static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2624 struct reiserfs_journal_list *jl;
2625 jl = kzalloc(sizeof(struct reiserfs_journal_list),
2626 GFP_NOFS | __GFP_NOFAIL);
2627 INIT_LIST_HEAD(&jl->j_list);
2628 INIT_LIST_HEAD(&jl->j_working_list);
2629 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2630 INIT_LIST_HEAD(&jl->j_bh_list);
2631 mutex_init(&jl->j_commit_mutex);
2632 SB_JOURNAL(s)->j_num_lists++;
2633 get_journal_list(jl);
2637 static void journal_list_init(struct super_block *sb)
2639 SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
2642 static int release_journal_dev(struct super_block *super,
2643 struct reiserfs_journal *journal)
2649 if (journal->j_dev_bd != NULL) {
2650 if (journal->j_dev_bd->bd_dev != super->s_dev)
2651 bd_release(journal->j_dev_bd);
2652 result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
2653 journal->j_dev_bd = NULL;
2657 reiserfs_warning(super, "sh-457",
2658 "Cannot release journal device: %i", result);
2663 static int journal_init_dev(struct super_block *super,
2664 struct reiserfs_journal *journal,
2665 const char *jdev_name)
2669 fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE;
2670 char b[BDEVNAME_SIZE];
2674 journal->j_dev_bd = NULL;
2675 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2676 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2678 if (bdev_read_only(super->s_bdev))
2679 blkdev_mode = FMODE_READ;
2681 /* there is no "jdev" option and journal is on separate device */
2682 if ((!jdev_name || !jdev_name[0])) {
2683 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2684 journal->j_dev_mode = blkdev_mode;
2685 if (IS_ERR(journal->j_dev_bd)) {
2686 result = PTR_ERR(journal->j_dev_bd);
2687 journal->j_dev_bd = NULL;
2688 reiserfs_warning(super, "sh-458",
2689 "cannot init journal device '%s': %i",
2690 __bdevname(jdev, b), result);
2692 } else if (jdev != super->s_dev) {
2693 result = bd_claim(journal->j_dev_bd, journal);
2695 blkdev_put(journal->j_dev_bd, blkdev_mode);
2699 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2705 journal->j_dev_mode = blkdev_mode;
2706 journal->j_dev_bd = open_bdev_exclusive(jdev_name,
2707 blkdev_mode, journal);
2708 if (IS_ERR(journal->j_dev_bd)) {
2709 result = PTR_ERR(journal->j_dev_bd);
2710 journal->j_dev_bd = NULL;
2711 reiserfs_warning(super,
2712 "journal_init_dev: Cannot open '%s': %i",
2717 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2718 reiserfs_info(super,
2719 "journal_init_dev: journal device: %s\n",
2720 bdevname(journal->j_dev_bd, b));
2725 * When creating/tuning a file system user can assign some
2726 * journal params within boundaries which depend on the ratio
2727 * blocksize/standard_blocksize.
2729 * For blocks >= standard_blocksize transaction size should
2730 * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2731 * then JOURNAL_TRANS_MAX_DEFAULT.
2733 * For blocks < standard_blocksize these boundaries should be
2734 * decreased proportionally.
2736 #define REISERFS_STANDARD_BLKSIZE (4096)
2738 static int check_advise_trans_params(struct super_block *sb,
2739 struct reiserfs_journal *journal)
2741 if (journal->j_trans_max) {
2742 /* Non-default journal params.
2743 Do sanity check for them. */
2745 if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
2746 ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize;
2748 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio ||
2749 journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
2750 SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max <
2751 JOURNAL_MIN_RATIO) {
2752 reiserfs_warning(sb, "sh-462",
2753 "bad transaction max size (%u). "
2754 "FSCK?", journal->j_trans_max);
2757 if (journal->j_max_batch != (journal->j_trans_max) *
2758 JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
2759 reiserfs_warning(sb, "sh-463",
2760 "bad transaction max batch (%u). "
2761 "FSCK?", journal->j_max_batch);
2765 /* Default journal params.
2766 The file system was created by old version
2767 of mkreiserfs, so some fields contain zeros,
2768 and we need to advise proper values for them */
2769 if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
2770 reiserfs_warning(sb, "sh-464", "bad blocksize (%u)",
2774 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2775 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2776 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2782 ** must be called once on fs mount. calls journal_read for you
2784 int journal_init(struct super_block *sb, const char *j_dev_name,
2785 int old_format, unsigned int commit_max_age)
2787 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2;
2788 struct buffer_head *bhjh;
2789 struct reiserfs_super_block *rs;
2790 struct reiserfs_journal_header *jh;
2791 struct reiserfs_journal *journal;
2792 struct reiserfs_journal_list *jl;
2793 char b[BDEVNAME_SIZE];
2795 journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal));
2797 reiserfs_warning(sb, "journal-1256",
2798 "unable to get memory for journal structure");
2801 memset(journal, 0, sizeof(struct reiserfs_journal));
2802 INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2803 INIT_LIST_HEAD(&journal->j_prealloc_list);
2804 INIT_LIST_HEAD(&journal->j_working_list);
2805 INIT_LIST_HEAD(&journal->j_journal_list);
2806 journal->j_persistent_trans = 0;
2807 if (reiserfs_allocate_list_bitmaps(sb,
2808 journal->j_list_bitmap,
2809 reiserfs_bmap_count(sb)))
2810 goto free_and_return;
2811 allocate_bitmap_nodes(sb);
2813 /* reserved for journal area support */
2814 SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ?
2815 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2817 reiserfs_bmap_count(sb) +
2819 REISERFS_DISK_OFFSET_IN_BYTES /
2820 sb->s_blocksize + 2);
2822 /* Sanity check to see is the standard journal fitting withing first bitmap
2823 (actual for small blocksizes) */
2824 if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
2825 (SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
2826 SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) {
2827 reiserfs_warning(sb, "journal-1393",
2828 "journal does not fit for area addressed "
2829 "by first of bitmap blocks. It starts at "
2830 "%u and its size is %u. Block size %ld",
2831 SB_JOURNAL_1st_RESERVED_BLOCK(sb),
2832 SB_ONDISK_JOURNAL_SIZE(sb),
2834 goto free_and_return;
2837 if (journal_init_dev(sb, journal, j_dev_name) != 0) {
2838 reiserfs_warning(sb, "sh-462",
2839 "unable to initialize jornal device");
2840 goto free_and_return;
2843 rs = SB_DISK_SUPER_BLOCK(sb);
2845 /* read journal header */
2846 bhjh = journal_bread(sb,
2847 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
2848 SB_ONDISK_JOURNAL_SIZE(sb));
2850 reiserfs_warning(sb, "sh-459",
2851 "unable to read journal header");
2852 goto free_and_return;
2854 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2856 /* make sure that journal matches to the super block */
2857 if (is_reiserfs_jr(rs)
2858 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2859 sb_jp_journal_magic(rs))) {
2860 reiserfs_warning(sb, "sh-460",
2861 "journal header magic %x (device %s) does "
2862 "not match to magic found in super block %x",
2863 jh->jh_journal.jp_journal_magic,
2864 bdevname(journal->j_dev_bd, b),
2865 sb_jp_journal_magic(rs));
2867 goto free_and_return;
2870 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2871 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2872 journal->j_max_commit_age =
2873 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2874 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2876 if (check_advise_trans_params(sb, journal) != 0)
2877 goto free_and_return;
2878 journal->j_default_max_commit_age = journal->j_max_commit_age;
2880 if (commit_max_age != 0) {
2881 journal->j_max_commit_age = commit_max_age;
2882 journal->j_max_trans_age = commit_max_age;
2885 reiserfs_info(sb, "journal params: device %s, size %u, "
2886 "journal first block %u, max trans len %u, max batch %u, "
2887 "max commit age %u, max trans age %u\n",
2888 bdevname(journal->j_dev_bd, b),
2889 SB_ONDISK_JOURNAL_SIZE(sb),
2890 SB_ONDISK_JOURNAL_1st_BLOCK(sb),
2891 journal->j_trans_max,
2892 journal->j_max_batch,
2893 journal->j_max_commit_age, journal->j_max_trans_age);
2897 journal->j_list_bitmap_index = 0;
2898 journal_list_init(sb);
2900 memset(journal->j_list_hash_table, 0,
2901 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2903 INIT_LIST_HEAD(&journal->j_dirty_buffers);
2904 spin_lock_init(&journal->j_dirty_buffers_lock);
2906 journal->j_start = 0;
2908 journal->j_len_alloc = 0;
2909 atomic_set(&(journal->j_wcount), 0);
2910 atomic_set(&(journal->j_async_throttle), 0);
2911 journal->j_bcount = 0;
2912 journal->j_trans_start_time = 0;
2913 journal->j_last = NULL;
2914 journal->j_first = NULL;
2915 init_waitqueue_head(&(journal->j_join_wait));
2916 mutex_init(&journal->j_mutex);
2917 mutex_init(&journal->j_flush_mutex);
2919 journal->j_trans_id = 10;
2920 journal->j_mount_id = 10;
2921 journal->j_state = 0;
2922 atomic_set(&(journal->j_jlock), 0);
2923 journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2924 journal->j_cnode_free_orig = journal->j_cnode_free_list;
2925 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2926 journal->j_cnode_used = 0;
2927 journal->j_must_wait = 0;
2929 if (journal->j_cnode_free == 0) {
2930 reiserfs_warning(sb, "journal-2004", "Journal cnode memory "
2931 "allocation failed (%ld bytes). Journal is "
2932 "too large for available memory. Usually "
2933 "this is due to a journal that is too large.",
2934 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2935 goto free_and_return;
2938 init_journal_hash(sb);
2939 jl = journal->j_current_jl;
2940 jl->j_list_bitmap = get_list_bitmap(sb, jl);
2941 if (!jl->j_list_bitmap) {
2942 reiserfs_warning(sb, "journal-2005",
2943 "get_list_bitmap failed for journal list 0");
2944 goto free_and_return;
2946 if (journal_read(sb) < 0) {
2947 reiserfs_warning(sb, "reiserfs-2006",
2948 "Replay Failure, unable to mount");
2949 goto free_and_return;
2952 reiserfs_mounted_fs_count++;
2953 if (reiserfs_mounted_fs_count <= 1)
2954 commit_wq = create_workqueue("reiserfs");
2956 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2957 journal->j_work_sb = sb;
2960 free_journal_ram(sb);
2965 ** test for a polite end of the current transaction. Used by file_write, and should
2966 ** be used by delete to make sure they don't write more than can fit inside a single
2969 int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2972 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2973 time_t now = get_seconds();
2974 /* cannot restart while nested */
2975 BUG_ON(!th->t_trans_id);
2976 if (th->t_refcount > 1)
2978 if (journal->j_must_wait > 0 ||
2979 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2980 atomic_read(&(journal->j_jlock)) ||
2981 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2982 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2985 /* protected by the BKL here */
2986 journal->j_len_alloc += new_alloc;
2987 th->t_blocks_allocated += new_alloc ;
2991 /* this must be called inside a transaction, and requires the
2992 ** kernel_lock to be held
2994 void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2996 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2997 BUG_ON(!th->t_trans_id);
2998 journal->j_must_wait = 1;
2999 set_bit(J_WRITERS_BLOCKED, &journal->j_state);
3003 /* this must be called without a transaction started, and does not
3006 void reiserfs_allow_writes(struct super_block *s)
3008 struct reiserfs_journal *journal = SB_JOURNAL(s);
3009 clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
3010 wake_up(&journal->j_join_wait);
3013 /* this must be called without a transaction started, and does not
3016 void reiserfs_wait_on_write_block(struct super_block *s)
3018 struct reiserfs_journal *journal = SB_JOURNAL(s);
3019 wait_event(journal->j_join_wait,
3020 !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
3023 static void queue_log_writer(struct super_block *s)
3026 struct reiserfs_journal *journal = SB_JOURNAL(s);
3027 set_bit(J_WRITERS_QUEUED, &journal->j_state);
3030 * we don't want to use wait_event here because
3031 * we only want to wait once.
3033 init_waitqueue_entry(&wait, current);
3034 add_wait_queue(&journal->j_join_wait, &wait);
3035 set_current_state(TASK_UNINTERRUPTIBLE);
3036 if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
3037 reiserfs_write_unlock(s);
3039 reiserfs_write_lock(s);
3041 __set_current_state(TASK_RUNNING);
3042 remove_wait_queue(&journal->j_join_wait, &wait);
3045 static void wake_queued_writers(struct super_block *s)
3047 struct reiserfs_journal *journal = SB_JOURNAL(s);
3048 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
3049 wake_up(&journal->j_join_wait);
3052 static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
3054 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3055 unsigned long bcount = journal->j_bcount;
3057 reiserfs_write_unlock(sb);
3058 schedule_timeout_uninterruptible(1);
3059 reiserfs_write_lock(sb);
3060 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
3061 while ((atomic_read(&journal->j_wcount) > 0 ||
3062 atomic_read(&journal->j_jlock)) &&
3063 journal->j_trans_id == trans_id) {
3064 queue_log_writer(sb);
3066 if (journal->j_trans_id != trans_id)
3068 if (bcount == journal->j_bcount)
3070 bcount = journal->j_bcount;
3074 /* join == true if you must join an existing transaction.
3075 ** join == false if you can deal with waiting for others to finish
3077 ** this will block until the transaction is joinable. send the number of blocks you
3078 ** expect to use in nblocks.
3080 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3081 struct super_block *sb, unsigned long nblocks,
3084 time_t now = get_seconds();
3085 unsigned int old_trans_id;
3086 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3087 struct reiserfs_transaction_handle myth;
3088 int sched_count = 0;
3091 reiserfs_check_lock_depth(sb, "journal_begin");
3092 BUG_ON(nblocks > journal->j_trans_max);
3094 PROC_INFO_INC(sb, journal.journal_being);
3095 /* set here for journal_join */
3101 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
3103 retval = journal->j_errno;
3106 journal->j_bcount++;
3108 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
3110 reiserfs_write_unlock(sb);
3111 reiserfs_wait_on_write_block(sb);
3112 reiserfs_write_lock(sb);
3113 PROC_INFO_INC(sb, journal.journal_relock_writers);
3116 now = get_seconds();
3118 /* if there is no room in the journal OR
3119 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
3120 ** we don't sleep if there aren't other writers
3123 if ((!join && journal->j_must_wait > 0) ||
3125 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
3126 || (!join && atomic_read(&journal->j_wcount) > 0
3127 && journal->j_trans_start_time > 0
3128 && (now - journal->j_trans_start_time) >
3129 journal->j_max_trans_age) || (!join
3130 && atomic_read(&journal->j_jlock))
3131 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
3133 old_trans_id = journal->j_trans_id;
3134 unlock_journal(sb); /* allow others to finish this transaction */
3136 if (!join && (journal->j_len_alloc + nblocks + 2) >=
3137 journal->j_max_batch &&
3138 ((journal->j_len + nblocks + 2) * 100) <
3139 (journal->j_len_alloc * 75)) {
3140 if (atomic_read(&journal->j_wcount) > 10) {
3142 queue_log_writer(sb);
3146 /* don't mess with joining the transaction if all we have to do is
3147 * wait for someone else to do a commit
3149 if (atomic_read(&journal->j_jlock)) {
3150 while (journal->j_trans_id == old_trans_id &&
3151 atomic_read(&journal->j_jlock)) {
3152 queue_log_writer(sb);
3156 retval = journal_join(&myth, sb, 1);
3160 /* someone might have ended the transaction while we joined */
3161 if (old_trans_id != journal->j_trans_id) {
3162 retval = do_journal_end(&myth, sb, 1, 0);
3164 retval = do_journal_end(&myth, sb, 1, COMMIT_NOW);
3170 PROC_INFO_INC(sb, journal.journal_relock_wcount);
3173 /* we are the first writer, set trans_id */
3174 if (journal->j_trans_start_time == 0) {
3175 journal->j_trans_start_time = get_seconds();
3177 atomic_inc(&(journal->j_wcount));
3178 journal->j_len_alloc += nblocks;
3179 th->t_blocks_logged = 0;
3180 th->t_blocks_allocated = nblocks;
3181 th->t_trans_id = journal->j_trans_id;
3183 INIT_LIST_HEAD(&th->t_list);
3188 memset(th, 0, sizeof(*th));
3189 /* Re-set th->t_super, so we can properly keep track of how many
3190 * persistent transactions there are. We need to do this so if this
3191 * call is part of a failed restart_transaction, we can free it later */
3196 struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3202 struct reiserfs_transaction_handle *th;
3204 /* if we're nesting into an existing transaction. It will be
3205 ** persistent on its own
3207 if (reiserfs_transaction_running(s)) {
3208 th = current->journal_info;
3210 BUG_ON(th->t_refcount < 2);
3214 th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
3217 ret = journal_begin(th, s, nblocks);
3223 SB_JOURNAL(s)->j_persistent_trans++;
3227 int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3229 struct super_block *s = th->t_super;
3232 ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3235 if (th->t_refcount == 0) {
3236 SB_JOURNAL(s)->j_persistent_trans--;
3242 static int journal_join(struct reiserfs_transaction_handle *th,
3243 struct super_block *sb, unsigned long nblocks)
3245 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3247 /* this keeps do_journal_end from NULLing out the current->journal_info
3250 th->t_handle_save = cur_th;
3251 BUG_ON(cur_th && cur_th->t_refcount > 1);
3252 return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN);
3255 int journal_join_abort(struct reiserfs_transaction_handle *th,
3256 struct super_block *sb, unsigned long nblocks)
3258 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3260 /* this keeps do_journal_end from NULLing out the current->journal_info
3263 th->t_handle_save = cur_th;
3264 BUG_ON(cur_th && cur_th->t_refcount > 1);
3265 return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT);
3268 int journal_begin(struct reiserfs_transaction_handle *th,
3269 struct super_block *sb, unsigned long nblocks)
3271 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3274 th->t_handle_save = NULL;
3276 /* we are nesting into the current transaction */
3277 if (cur_th->t_super == sb) {
3278 BUG_ON(!cur_th->t_refcount);
3279 cur_th->t_refcount++;
3280 memcpy(th, cur_th, sizeof(*th));
3281 if (th->t_refcount <= 1)
3282 reiserfs_warning(sb, "reiserfs-2005",
3283 "BAD: refcount <= 1, but "
3284 "journal_info != 0");
3287 /* we've ended up with a handle from a different filesystem.
3288 ** save it and restore on journal_end. This should never
3291 reiserfs_warning(sb, "clm-2100",
3292 "nesting info a different FS");
3293 th->t_handle_save = current->journal_info;
3294 current->journal_info = th;
3297 current->journal_info = th;
3299 ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG);
3300 BUG_ON(current->journal_info != th);
3302 /* I guess this boils down to being the reciprocal of clm-2100 above.
3303 * If do_journal_begin_r fails, we need to put it back, since journal_end
3304 * won't be called to do it. */
3306 current->journal_info = th->t_handle_save;
3308 BUG_ON(!th->t_refcount);
3314 ** puts bh into the current transaction. If it was already there, reorders removes the
3315 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3317 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3318 ** transaction is committed.
3320 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3322 int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3323 struct super_block *sb, struct buffer_head *bh)
3325 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3326 struct reiserfs_journal_cnode *cn = NULL;
3327 int count_already_incd = 0;
3329 BUG_ON(!th->t_trans_id);
3331 PROC_INFO_INC(sb, journal.mark_dirty);
3332 if (th->t_trans_id != journal->j_trans_id) {
3333 reiserfs_panic(th->t_super, "journal-1577",
3334 "handle trans id %ld != current trans id %ld",
3335 th->t_trans_id, journal->j_trans_id);
3340 prepared = test_clear_buffer_journal_prepared(bh);
3341 clear_buffer_journal_restore_dirty(bh);
3342 /* already in this transaction, we are done */
3343 if (buffer_journaled(bh)) {
3344 PROC_INFO_INC(sb, journal.mark_dirty_already);
3348 /* this must be turned into a panic instead of a warning. We can't allow
3349 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3350 ** could get to disk too early. NOT GOOD.
3352 if (!prepared || buffer_dirty(bh)) {
3353 reiserfs_warning(sb, "journal-1777",
3354 "buffer %llu bad state "
3355 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3356 (unsigned long long)bh->b_blocknr,
3357 prepared ? ' ' : '!',
3358 buffer_locked(bh) ? ' ' : '!',
3359 buffer_dirty(bh) ? ' ' : '!',
3360 buffer_journal_dirty(bh) ? ' ' : '!');
3363 if (atomic_read(&(journal->j_wcount)) <= 0) {
3364 reiserfs_warning(sb, "journal-1409",
3365 "returning because j_wcount was %d",
3366 atomic_read(&(journal->j_wcount)));
3369 /* this error means I've screwed up, and we've overflowed the transaction.
3370 ** Nothing can be done here, except make the FS readonly or panic.
3372 if (journal->j_len >= journal->j_trans_max) {
3373 reiserfs_panic(th->t_super, "journal-1413",
3374 "j_len (%lu) is too big",
3378 if (buffer_journal_dirty(bh)) {
3379 count_already_incd = 1;
3380 PROC_INFO_INC(sb, journal.mark_dirty_notjournal);
3381 clear_buffer_journal_dirty(bh);
3384 if (journal->j_len > journal->j_len_alloc) {
3385 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3388 set_buffer_journaled(bh);
3390 /* now put this guy on the end */
3394 reiserfs_panic(sb, "journal-4", "get_cnode failed!");
3397 if (th->t_blocks_logged == th->t_blocks_allocated) {
3398 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3399 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3401 th->t_blocks_logged++;
3405 cn->blocknr = bh->b_blocknr;
3408 insert_journal_hash(journal->j_hash_table, cn);
3409 if (!count_already_incd) {
3414 cn->prev = journal->j_last;
3416 if (journal->j_last) {
3417 journal->j_last->next = cn;
3418 journal->j_last = cn;
3420 journal->j_first = cn;
3421 journal->j_last = cn;
3426 int journal_end(struct reiserfs_transaction_handle *th,
3427 struct super_block *sb, unsigned long nblocks)
3429 if (!current->journal_info && th->t_refcount > 1)
3430 reiserfs_warning(sb, "REISER-NESTING",
3431 "th NULL, refcount %d", th->t_refcount);
3433 if (!th->t_trans_id) {
3439 if (th->t_refcount > 0) {
3440 struct reiserfs_transaction_handle *cur_th =
3441 current->journal_info;
3443 /* we aren't allowed to close a nested transaction on a different
3444 ** filesystem from the one in the task struct
3446 BUG_ON(cur_th->t_super != th->t_super);
3449 memcpy(current->journal_info, th, sizeof(*th));
3454 return do_journal_end(th, sb, nblocks, 0);
3458 /* removes from the current transaction, relsing and descrementing any counters.
3459 ** also files the removed buffer directly onto the clean list
3461 ** called by journal_mark_freed when a block has been deleted
3463 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3465 static int remove_from_transaction(struct super_block *sb,
3466 b_blocknr_t blocknr, int already_cleaned)
3468 struct buffer_head *bh;
3469 struct reiserfs_journal_cnode *cn;
3470 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3473 cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3474 if (!cn || !cn->bh) {
3479 cn->prev->next = cn->next;
3482 cn->next->prev = cn->prev;
3484 if (cn == journal->j_first) {
3485 journal->j_first = cn->next;
3487 if (cn == journal->j_last) {
3488 journal->j_last = cn->prev;
3491 remove_journal_hash(sb, journal->j_hash_table, NULL,
3493 clear_buffer_journaled(bh); /* don't log this one */
3495 if (!already_cleaned) {
3496 clear_buffer_journal_dirty(bh);
3497 clear_buffer_dirty(bh);
3498 clear_buffer_journal_test(bh);
3500 if (atomic_read(&(bh->b_count)) < 0) {
3501 reiserfs_warning(sb, "journal-1752",
3507 journal->j_len_alloc--;
3513 ** for any cnode in a journal list, it can only be dirtied of all the
3514 ** transactions that include it are committed to disk.
3515 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3516 ** and 0 if you aren't
3518 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3519 ** blocks for a given transaction on disk
3522 static int can_dirty(struct reiserfs_journal_cnode *cn)
3524 struct super_block *sb = cn->sb;
3525 b_blocknr_t blocknr = cn->blocknr;
3526 struct reiserfs_journal_cnode *cur = cn->hprev;
3529 /* first test hprev. These are all newer than cn, so any node here
3530 ** with the same block number and dev means this node can't be sent
3531 ** to disk right now.
3533 while (cur && can_dirty) {
3534 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3535 cur->blocknr == blocknr) {
3540 /* then test hnext. These are all older than cn. As long as they
3541 ** are committed to the log, it is safe to write cn to disk
3544 while (cur && can_dirty) {
3545 if (cur->jlist && cur->jlist->j_len > 0 &&
3546 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3547 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3555 /* syncs the commit blocks, but does not force the real buffers to disk
3556 ** will wait until the current transaction is done/committed before returning
3558 int journal_end_sync(struct reiserfs_transaction_handle *th,
3559 struct super_block *sb, unsigned long nblocks)
3561 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3563 BUG_ON(!th->t_trans_id);
3564 /* you can sync while nested, very, very bad */
3565 BUG_ON(th->t_refcount > 1);
3566 if (journal->j_len == 0) {
3567 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3569 journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
3571 return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT);
3575 ** writeback the pending async commits to disk
3577 static void flush_async_commits(struct work_struct *work)
3579 struct reiserfs_journal *journal =
3580 container_of(work, struct reiserfs_journal, j_work.work);
3581 struct super_block *sb = journal->j_work_sb;
3582 struct reiserfs_journal_list *jl;
3583 struct list_head *entry;
3585 reiserfs_write_lock(sb);
3586 if (!list_empty(&journal->j_journal_list)) {
3587 /* last entry is the youngest, commit it and you get everything */
3588 entry = journal->j_journal_list.prev;
3589 jl = JOURNAL_LIST_ENTRY(entry);
3590 flush_commit_list(sb, jl, 1);
3592 reiserfs_write_unlock(sb);
3596 ** flushes any old transactions to disk
3597 ** ends the current transaction if it is too old
3599 int reiserfs_flush_old_commits(struct super_block *sb)
3602 struct reiserfs_transaction_handle th;
3603 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3605 now = get_seconds();
3606 /* safety check so we don't flush while we are replaying the log during
3609 if (list_empty(&journal->j_journal_list)) {
3613 /* check the current transaction. If there are no writers, and it is
3614 * too old, finish it, and force the commit blocks to disk
3616 if (atomic_read(&journal->j_wcount) <= 0 &&
3617 journal->j_trans_start_time > 0 &&
3618 journal->j_len > 0 &&
3619 (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3620 if (!journal_join(&th, sb, 1)) {
3621 reiserfs_prepare_for_journal(sb,
3622 SB_BUFFER_WITH_SB(sb),
3624 journal_mark_dirty(&th, sb,
3625 SB_BUFFER_WITH_SB(sb));
3627 /* we're only being called from kreiserfsd, it makes no sense to do
3628 ** an async commit so that kreiserfsd can do it later
3630 do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
3637 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3639 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3640 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3641 ** flushes the commit list and returns 0.
3643 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3645 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3647 static int check_journal_end(struct reiserfs_transaction_handle *th,
3648 struct super_block *sb, unsigned long nblocks,
3653 int flush = flags & FLUSH_ALL;
3654 int commit_now = flags & COMMIT_NOW;
3655 int wait_on_commit = flags & WAIT;
3656 struct reiserfs_journal_list *jl;
3657 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3659 BUG_ON(!th->t_trans_id);
3661 if (th->t_trans_id != journal->j_trans_id) {
3662 reiserfs_panic(th->t_super, "journal-1577",
3663 "handle trans id %ld != current trans id %ld",
3664 th->t_trans_id, journal->j_trans_id);
3667 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3668 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3669 atomic_dec(&(journal->j_wcount));
3672 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3673 ** will be dealt with by next transaction that actually writes something, but should be taken
3674 ** care of in this trans
3676 BUG_ON(journal->j_len == 0);
3678 /* if wcount > 0, and we are called to with flush or commit_now,
3679 ** we wait on j_join_wait. We will wake up when the last writer has
3680 ** finished the transaction, and started it on its way to the disk.
3681 ** Then, we flush the commit or journal list, and just return 0
3682 ** because the rest of journal end was already done for this transaction.
3684 if (atomic_read(&(journal->j_wcount)) > 0) {
3685 if (flush || commit_now) {
3688 jl = journal->j_current_jl;
3689 trans_id = jl->j_trans_id;
3691 jl->j_state |= LIST_COMMIT_PENDING;
3692 atomic_set(&(journal->j_jlock), 1);
3694 journal->j_next_full_flush = 1;
3698 /* sleep while the current transaction is still j_jlocked */
3699 while (journal->j_trans_id == trans_id) {
3700 if (atomic_read(&journal->j_jlock)) {
3701 queue_log_writer(sb);
3704 if (journal->j_trans_id == trans_id) {
3705 atomic_set(&(journal->j_jlock),
3711 BUG_ON(journal->j_trans_id == trans_id);
3714 && journal_list_still_alive(sb, trans_id)
3715 && wait_on_commit) {
3716 flush_commit_list(sb, jl, 1);
3724 /* deal with old transactions where we are the last writers */
3725 now = get_seconds();
3726 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3728 journal->j_next_async_flush = 1;
3730 /* don't batch when someone is waiting on j_join_wait */
3731 /* don't batch when syncing the commit or flushing the whole trans */
3732 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3733 && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3734 && journal->j_len_alloc < journal->j_max_batch
3735 && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3736 journal->j_bcount++;
3741 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) {
3742 reiserfs_panic(sb, "journal-003",
3743 "j_start (%ld) is too high",
3750 ** Does all the work that makes deleting blocks safe.
3751 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3754 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3755 ** before this transaction has finished.
3757 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3758 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3759 ** the block can't be reallocated yet.
3761 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3763 int journal_mark_freed(struct reiserfs_transaction_handle *th,
3764 struct super_block *sb, b_blocknr_t blocknr)
3766 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3767 struct reiserfs_journal_cnode *cn = NULL;
3768 struct buffer_head *bh = NULL;
3769 struct reiserfs_list_bitmap *jb = NULL;
3771 BUG_ON(!th->t_trans_id);
3773 cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
3778 /* if it is journal new, we just remove it from this transaction */
3779 if (bh && buffer_journal_new(bh)) {
3780 clear_buffer_journal_new(bh);
3781 clear_prepared_bits(bh);
3782 reiserfs_clean_and_file_buffer(bh);
3783 cleaned = remove_from_transaction(sb, blocknr, cleaned);
3785 /* set the bit for this block in the journal bitmap for this transaction */
3786 jb = journal->j_current_jl->j_list_bitmap;
3788 reiserfs_panic(sb, "journal-1702",
3789 "journal_list_bitmap is NULL");
3791 set_bit_in_list_bitmap(sb, blocknr, jb);
3793 /* Note, the entire while loop is not allowed to schedule. */
3796 clear_prepared_bits(bh);
3797 reiserfs_clean_and_file_buffer(bh);
3799 cleaned = remove_from_transaction(sb, blocknr, cleaned);
3801 /* find all older transactions with this block, make sure they don't try to write it out */
3802 cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
3805 if (sb == cn->sb && blocknr == cn->blocknr) {
3806 set_bit(BLOCK_FREED, &cn->state);
3809 /* remove_from_transaction will brelse the buffer if it was
3810 ** in the current trans
3812 clear_buffer_journal_dirty(cn->
3814 clear_buffer_dirty(cn->bh);
3815 clear_buffer_journal_test(cn->
3820 (&(cn->bh->b_count)) < 0) {
3821 reiserfs_warning(sb,
3823 "cn->bh->b_count < 0");
3826 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
3839 release_buffer_page(bh); /* get_hash grabs the buffer */
3843 void reiserfs_update_inode_transaction(struct inode *inode)
3845 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3846 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3847 REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
3851 * returns -1 on error, 0 if no commits/barriers were done and 1
3852 * if a transaction was actually committed and the barrier was done
3854 static int __commit_trans_jl(struct inode *inode, unsigned long id,
3855 struct reiserfs_journal_list *jl)
3857 struct reiserfs_transaction_handle th;
3858 struct super_block *sb = inode->i_sb;
3859 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3862 /* is it from the current transaction, or from an unknown transaction? */
3863 if (id == journal->j_trans_id) {
3864 jl = journal->j_current_jl;
3865 /* try to let other writers come in and grow this transaction */
3866 let_transaction_grow(sb, id);
3867 if (journal->j_trans_id != id) {
3868 goto flush_commit_only;
3871 ret = journal_begin(&th, sb, 1);
3875 /* someone might have ended this transaction while we joined */
3876 if (journal->j_trans_id != id) {
3877 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3879 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3880 ret = journal_end(&th, sb, 1);
3881 goto flush_commit_only;
3884 ret = journal_end_sync(&th, sb, 1);
3889 /* this gets tricky, we have to make sure the journal list in
3890 * the inode still exists. We know the list is still around
3891 * if we've got a larger transaction id than the oldest list
3894 if (journal_list_still_alive(inode->i_sb, id)) {
3896 * we only set ret to 1 when we know for sure
3897 * the barrier hasn't been started yet on the commit
3900 if (atomic_read(&jl->j_commit_left) > 1)
3902 flush_commit_list(sb, jl, 1);
3903 if (journal->j_errno)
3904 ret = journal->j_errno;
3907 /* otherwise the list is gone, and long since committed */
3911 int reiserfs_commit_for_inode(struct inode *inode)
3913 unsigned int id = REISERFS_I(inode)->i_trans_id;
3914 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3916 /* for the whole inode, assume unset id means it was
3917 * changed in the current transaction. More conservative
3920 reiserfs_update_inode_transaction(inode);
3921 id = REISERFS_I(inode)->i_trans_id;
3922 /* jl will be updated in __commit_trans_jl */
3925 return __commit_trans_jl(inode, id, jl);
3928 void reiserfs_restore_prepared_buffer(struct super_block *sb,
3929 struct buffer_head *bh)
3931 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3932 PROC_INFO_INC(sb, journal.restore_prepared);
3936 if (test_clear_buffer_journal_restore_dirty(bh) &&
3937 buffer_journal_dirty(bh)) {
3938 struct reiserfs_journal_cnode *cn;
3939 cn = get_journal_hash_dev(sb,
3940 journal->j_list_hash_table,
3942 if (cn && can_dirty(cn)) {
3943 set_buffer_journal_test(bh);
3944 mark_buffer_dirty(bh);
3947 clear_buffer_journal_prepared(bh);
3950 extern struct tree_balance *cur_tb;
3952 ** before we can change a metadata block, we have to make sure it won't
3953 ** be written to disk while we are altering it. So, we must:
3958 int reiserfs_prepare_for_journal(struct super_block *sb,
3959 struct buffer_head *bh, int wait)
3961 PROC_INFO_INC(sb, journal.prepare);
3963 if (!trylock_buffer(bh)) {
3968 set_buffer_journal_prepared(bh);
3969 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3970 clear_buffer_journal_test(bh);
3971 set_buffer_journal_restore_dirty(bh);
3977 static void flush_old_journal_lists(struct super_block *s)
3979 struct reiserfs_journal *journal = SB_JOURNAL(s);
3980 struct reiserfs_journal_list *jl;
3981 struct list_head *entry;
3982 time_t now = get_seconds();
3984 while (!list_empty(&journal->j_journal_list)) {
3985 entry = journal->j_journal_list.next;
3986 jl = JOURNAL_LIST_ENTRY(entry);
3987 /* this check should always be run, to send old lists to disk */
3988 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3989 atomic_read(&jl->j_commit_left) == 0 &&
3990 test_transaction(s, jl)) {
3991 flush_used_journal_lists(s, jl);
3999 ** long and ugly. If flush, will not return until all commit
4000 ** blocks and all real buffers in the trans are on disk.
4001 ** If no_async, won't return until all commit blocks are on disk.
4003 ** keep reading, there are comments as you go along
4005 ** If the journal is aborted, we just clean up. Things like flushing
4006 ** journal lists, etc just won't happen.
4008 static int do_journal_end(struct reiserfs_transaction_handle *th,
4009 struct super_block *sb, unsigned long nblocks,
4012 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4013 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
4014 struct reiserfs_journal_cnode *last_cn = NULL;
4015 struct reiserfs_journal_desc *desc;
4016 struct reiserfs_journal_commit *commit;
4017 struct buffer_head *c_bh; /* commit bh */
4018 struct buffer_head *d_bh; /* desc bh */
4019 int cur_write_start = 0; /* start index of current log write */
4024 struct reiserfs_journal_list *jl, *temp_jl;
4025 struct list_head *entry, *safe;
4026 unsigned long jindex;
4027 unsigned int commit_trans_id;
4030 BUG_ON(th->t_refcount > 1);
4031 BUG_ON(!th->t_trans_id);
4033 /* protect flush_older_commits from doing mistakes if the
4034 transaction ID counter gets overflowed. */
4035 if (th->t_trans_id == ~0U)
4036 flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
4037 flush = flags & FLUSH_ALL;
4038 wait_on_commit = flags & WAIT;
4041 current->journal_info = th->t_handle_save;
4042 reiserfs_check_lock_depth(sb, "journal end");
4043 if (journal->j_len == 0) {
4044 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
4046 journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
4050 if (journal->j_next_full_flush) {
4054 if (journal->j_next_async_flush) {
4055 flags |= COMMIT_NOW | WAIT;
4059 /* check_journal_end locks the journal, and unlocks if it does not return 1
4060 ** it tells us if we should continue with the journal_end, or just return
4062 if (!check_journal_end(th, sb, nblocks, flags)) {
4064 wake_queued_writers(sb);
4065 reiserfs_async_progress_wait(sb);
4069 /* check_journal_end might set these, check again */
4070 if (journal->j_next_full_flush) {
4075 ** j must wait means we have to flush the log blocks, and the real blocks for
4078 if (journal->j_must_wait > 0) {
4081 #ifdef REISERFS_PREALLOCATE
4082 /* quota ops might need to nest, setup the journal_info pointer for them
4083 * and raise the refcount so that it is > 0. */
4084 current->journal_info = th;
4086 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
4087 * the transaction */
4089 current->journal_info = th->t_handle_save;
4092 /* setup description block */
4095 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4097 set_buffer_uptodate(d_bh);
4098 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
4099 memset(d_bh->b_data, 0, d_bh->b_size);
4100 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
4101 set_desc_trans_id(desc, journal->j_trans_id);
4103 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
4104 c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4105 ((journal->j_start + journal->j_len +
4106 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
4107 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
4108 memset(c_bh->b_data, 0, c_bh->b_size);
4109 set_commit_trans_id(commit, journal->j_trans_id);
4110 set_buffer_uptodate(c_bh);
4112 /* init this journal list */
4113 jl = journal->j_current_jl;
4115 /* we lock the commit before doing anything because
4116 * we want to make sure nobody tries to run flush_commit_list until
4117 * the new transaction is fully setup, and we've already flushed the
4120 reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb);
4122 /* save the transaction id in case we need to commit it later */
4123 commit_trans_id = jl->j_trans_id;
4125 atomic_set(&jl->j_older_commits_done, 0);
4126 jl->j_trans_id = journal->j_trans_id;
4127 jl->j_timestamp = journal->j_trans_start_time;
4128 jl->j_commit_bh = c_bh;
4129 jl->j_start = journal->j_start;
4130 jl->j_len = journal->j_len;
4131 atomic_set(&jl->j_nonzerolen, journal->j_len);
4132 atomic_set(&jl->j_commit_left, journal->j_len + 2);
4133 jl->j_realblock = NULL;
4135 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
4136 ** for each real block, add it to the journal list hash,
4137 ** copy into real block index array in the commit or desc block
4139 trans_half = journal_trans_half(sb->s_blocksize);
4140 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
4141 if (buffer_journaled(cn->bh)) {
4142 jl_cn = get_cnode(sb);
4144 reiserfs_panic(sb, "journal-1676",
4145 "get_cnode returned NULL");
4148 jl->j_realblock = jl_cn;
4150 jl_cn->prev = last_cn;
4153 last_cn->next = jl_cn;
4156 /* make sure the block we are trying to log is not a block
4157 of journal or reserved area */
4159 if (is_block_in_log_or_reserved_area
4160 (sb, cn->bh->b_blocknr)) {
4161 reiserfs_panic(sb, "journal-2332",
4162 "Trying to log block %lu, "
4163 "which is a log block",
4166 jl_cn->blocknr = cn->bh->b_blocknr;
4171 insert_journal_hash(journal->j_list_hash_table, jl_cn);
4172 if (i < trans_half) {
4173 desc->j_realblock[i] =
4174 cpu_to_le32(cn->bh->b_blocknr);
4176 commit->j_realblock[i - trans_half] =
4177 cpu_to_le32(cn->bh->b_blocknr);
4183 set_desc_trans_len(desc, journal->j_len);
4184 set_desc_mount_id(desc, journal->j_mount_id);
4185 set_desc_trans_id(desc, journal->j_trans_id);
4186 set_commit_trans_len(commit, journal->j_len);
4188 /* special check in case all buffers in the journal were marked for not logging */
4189 BUG_ON(journal->j_len == 0);
4191 /* we're about to dirty all the log blocks, mark the description block
4192 * dirty now too. Don't mark the commit block dirty until all the
4193 * others are on disk
4195 mark_buffer_dirty(d_bh);
4197 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4198 cur_write_start = journal->j_start;
4199 cn = journal->j_first;
4200 jindex = 1; /* start at one so we don't get the desc again */
4202 clear_buffer_journal_new(cn->bh);
4203 /* copy all the real blocks into log area. dirty log blocks */
4204 if (buffer_journaled(cn->bh)) {
4205 struct buffer_head *tmp_bh;
4210 SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
4213 SB_ONDISK_JOURNAL_SIZE(sb)));
4214 set_buffer_uptodate(tmp_bh);
4215 page = cn->bh->b_page;
4217 memcpy(tmp_bh->b_data,
4218 addr + offset_in_page(cn->bh->b_data),
4221 mark_buffer_dirty(tmp_bh);
4223 set_buffer_journal_dirty(cn->bh);
4224 clear_buffer_journaled(cn->bh);
4226 /* JDirty cleared sometime during transaction. don't log this one */
4227 reiserfs_warning(sb, "journal-2048",
4228 "BAD, buffer in journal hash, "
4238 /* we are done with both the c_bh and d_bh, but
4239 ** c_bh must be written after all other commit blocks,
4240 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4243 journal->j_current_jl = alloc_journal_list(sb);
4245 /* now it is safe to insert this transaction on the main list */
4246 list_add_tail(&jl->j_list, &journal->j_journal_list);
4247 list_add_tail(&jl->j_working_list, &journal->j_working_list);
4248 journal->j_num_work_lists++;
4250 /* reset journal values for the next transaction */
4251 old_start = journal->j_start;
4253 (journal->j_start + journal->j_len +
4254 2) % SB_ONDISK_JOURNAL_SIZE(sb);
4255 atomic_set(&(journal->j_wcount), 0);
4256 journal->j_bcount = 0;
4257 journal->j_last = NULL;
4258 journal->j_first = NULL;
4260 journal->j_trans_start_time = 0;
4261 /* check for trans_id overflow */
4262 if (++journal->j_trans_id == 0)
4263 journal->j_trans_id = 10;
4264 journal->j_current_jl->j_trans_id = journal->j_trans_id;
4265 journal->j_must_wait = 0;
4266 journal->j_len_alloc = 0;
4267 journal->j_next_full_flush = 0;
4268 journal->j_next_async_flush = 0;
4269 init_journal_hash(sb);
4271 // make sure reiserfs_add_jh sees the new current_jl before we
4272 // write out the tails
4275 /* tail conversion targets have to hit the disk before we end the
4276 * transaction. Otherwise a later transaction might repack the tail
4277 * before this transaction commits, leaving the data block unflushed and
4278 * clean, if we crash before the later transaction commits, the data block
4281 if (!list_empty(&jl->j_tail_bh_list)) {
4282 reiserfs_write_unlock(sb);
4283 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4284 journal, jl, &jl->j_tail_bh_list);
4285 reiserfs_write_lock(sb);
4287 BUG_ON(!list_empty(&jl->j_tail_bh_list));
4288 mutex_unlock(&jl->j_commit_mutex);
4290 /* honor the flush wishes from the caller, simple commits can
4291 ** be done outside the journal lock, they are done below
4293 ** if we don't flush the commit list right now, we put it into
4294 ** the work queue so the people waiting on the async progress work
4295 ** queue don't wait for this proc to flush journal lists and such.
4298 flush_commit_list(sb, jl, 1);
4299 flush_journal_list(sb, jl, 1);
4300 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
4301 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4303 /* if the next transaction has any chance of wrapping, flush
4304 ** transactions that might get overwritten. If any journal lists are very
4305 ** old flush them as well.
4308 list_for_each_safe(entry, safe, &journal->j_journal_list) {
4309 temp_jl = JOURNAL_LIST_ENTRY(entry);
4310 if (journal->j_start <= temp_jl->j_start) {
4311 if ((journal->j_start + journal->j_trans_max + 1) >=
4313 flush_used_journal_lists(sb, temp_jl);
4315 } else if ((journal->j_start +
4316 journal->j_trans_max + 1) <
4317 SB_ONDISK_JOURNAL_SIZE(sb)) {
4318 /* if we don't cross into the next transaction and we don't
4319 * wrap, there is no way we can overlap any later transactions
4324 } else if ((journal->j_start +
4325 journal->j_trans_max + 1) >
4326 SB_ONDISK_JOURNAL_SIZE(sb)) {
4327 if (((journal->j_start + journal->j_trans_max + 1) %
4328 SB_ONDISK_JOURNAL_SIZE(sb)) >=
4330 flush_used_journal_lists(sb, temp_jl);
4333 /* we don't overlap anything from out start to the end of the
4334 * log, and our wrapped portion doesn't overlap anything at
4335 * the start of the log. We can break
4341 flush_old_journal_lists(sb);
4343 journal->j_current_jl->j_list_bitmap =
4344 get_list_bitmap(sb, journal->j_current_jl);
4346 if (!(journal->j_current_jl->j_list_bitmap)) {
4347 reiserfs_panic(sb, "journal-1996",
4348 "could not get a list bitmap");
4351 atomic_set(&(journal->j_jlock), 0);
4353 /* wake up any body waiting to join. */
4354 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4355 wake_up(&(journal->j_join_wait));
4357 if (!flush && wait_on_commit &&
4358 journal_list_still_alive(sb, commit_trans_id)) {
4359 flush_commit_list(sb, jl, 1);
4362 reiserfs_check_lock_depth(sb, "journal end2");
4364 memset(th, 0, sizeof(*th));
4365 /* Re-set th->t_super, so we can properly keep track of how many
4366 * persistent transactions there are. We need to do this so if this
4367 * call is part of a failed restart_transaction, we can free it later */
4370 return journal->j_errno;
4373 /* Send the file system read only and refuse new transactions */
4374 void reiserfs_abort_journal(struct super_block *sb, int errno)
4376 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4377 if (test_bit(J_ABORTED, &journal->j_state))
4380 if (!journal->j_errno)
4381 journal->j_errno = errno;
4383 sb->s_flags |= MS_RDONLY;
4384 set_bit(J_ABORTED, &journal->j_state);
4386 #ifdef CONFIG_REISERFS_CHECK