xfs: mark inodes dirty before issuing I/O
[safe/jmp/linux-2.6] / fs / jbd / journal.c
index 8c9b28d..f96f850 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * linux/fs/journal.c
+ * linux/fs/jbd/journal.c
  *
  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
  *
 #include <linux/jbd.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/mm.h>
-#include <linux/suspend.h>
+#include <linux/freezer.h>
 #include <linux/pagemap.h>
 #include <linux/kthread.h>
 #include <linux/poison.h>
 #include <linux/proc_fs.h>
+#include <linux/debugfs.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -68,7 +68,6 @@ EXPORT_SYMBOL(journal_set_features);
 EXPORT_SYMBOL(journal_create);
 EXPORT_SYMBOL(journal_load);
 EXPORT_SYMBOL(journal_destroy);
-EXPORT_SYMBOL(journal_update_superblock);
 EXPORT_SYMBOL(journal_abort);
 EXPORT_SYMBOL(journal_errno);
 EXPORT_SYMBOL(journal_ack_err);
@@ -180,7 +179,7 @@ loop:
                                                transaction->t_expires))
                        should_sleep = 0;
                if (journal->j_flags & JFS_UNMOUNT)
-                       should_sleep = 0;
+                       should_sleep = 0;
                if (should_sleep) {
                        spin_unlock(&journal->j_state_lock);
                        schedule();
@@ -210,10 +209,16 @@ end_loop:
        return 0;
 }
 
-static void journal_start_thread(journal_t *journal)
+static int journal_start_thread(journal_t *journal)
 {
-       kthread_run(kjournald, journal, "kjournald");
-       wait_event(journal->j_wait_done_commit, journal->j_task != 0);
+       struct task_struct *t;
+
+       t = kthread_run(kjournald, journal, "kjournald");
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+
+       wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
+       return 0;
 }
 
 static void journal_kill_thread(journal_t *journal)
@@ -224,7 +229,8 @@ static void journal_kill_thread(journal_t *journal)
        while (journal->j_task) {
                wake_up(&journal->j_wait_commit);
                spin_unlock(&journal->j_state_lock);
-               wait_event(journal->j_wait_done_commit, journal->j_task == 0);
+               wait_event(journal->j_wait_done_commit,
+                               journal->j_task == NULL);
                spin_lock(&journal->j_state_lock);
        }
        spin_unlock(&journal->j_state_lock);
@@ -270,7 +276,7 @@ static void journal_kill_thread(journal_t *journal)
 int journal_write_metadata_buffer(transaction_t *transaction,
                                  struct journal_head  *jh_in,
                                  struct journal_head **jh_out,
-                                 int blocknr)
+                                 unsigned long blocknr)
 {
        int need_copy_out = 0;
        int done_copy_out = 0;
@@ -281,6 +287,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
        struct page *new_page;
        unsigned int new_offset;
        struct buffer_head *bh_in = jh2bh(jh_in);
+       journal_t *journal = transaction->t_journal;
 
        /*
         * The buffer really shouldn't be locked: only the current committing
@@ -294,6 +301,11 @@ int journal_write_metadata_buffer(transaction_t *transaction,
        J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
 
        new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
+       /* keep subsequent assertions sane */
+       new_bh->b_state = 0;
+       init_buffer(new_bh, NULL, NULL);
+       atomic_set(&new_bh->b_count, 1);
+       new_jh = journal_add_journal_head(new_bh);      /* This sleeps */
 
        /*
         * If a new transaction has already done a buffer copy-out, then
@@ -328,10 +340,10 @@ repeat:
                char *tmp;
 
                jbd_unlock_bh_state(bh_in);
-               tmp = jbd_rep_kmalloc(bh_in->b_size, GFP_NOFS);
+               tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
                jbd_lock_bh_state(bh_in);
                if (jh_in->b_frozen_data) {
-                       kfree(tmp);
+                       jbd_free(tmp, bh_in->b_size);
                        goto repeat;
                }
 
@@ -355,14 +367,6 @@ repeat:
                kunmap_atomic(mapped_data, KM_USER0);
        }
 
-       /* keep subsequent assertions sane */
-       new_bh->b_state = 0;
-       init_buffer(new_bh, NULL, NULL);
-       atomic_set(&new_bh->b_count, 1);
-       jbd_unlock_bh_state(bh_in);
-
-       new_jh = journal_add_journal_head(new_bh);      /* This sleeps */
-
        set_bh_page(new_bh, new_page, new_offset);
        new_jh->b_transaction = NULL;
        new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -379,7 +383,11 @@ repeat:
         * copying is moved to the transaction's shadow queue.
         */
        JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
-       journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       spin_lock(&journal->j_list_lock);
+       __journal_file_buffer(jh_in, transaction, BJ_Shadow);
+       spin_unlock(&journal->j_list_lock);
+       jbd_unlock_bh_state(bh_in);
+
        JBUFFER_TRACE(new_jh, "file as BJ_IO");
        journal_file_buffer(new_jh, transaction, BJ_IO);
 
@@ -421,7 +429,7 @@ int __log_space_left(journal_t *journal)
 }
 
 /*
- * Called under j_state_lock.  Returns true if a transaction was started.
+ * Called under j_state_lock.  Returns true if a transaction commit was started.
  */
 int __log_start_commit(journal_t *journal, tid_t target)
 {
@@ -489,7 +497,8 @@ int journal_force_commit_nested(journal_t *journal)
 
 /*
  * Start a commit of the current running transaction (if any).  Returns true
- * if a transaction was started, and fills its tid in at *ptid
+ * if a transaction is going to be committed (or is currently already
+ * committing), and fills its tid in at *ptid
  */
 int journal_start_commit(journal_t *journal, tid_t *ptid)
 {
@@ -499,15 +508,19 @@ int journal_start_commit(journal_t *journal, tid_t *ptid)
        if (journal->j_running_transaction) {
                tid_t tid = journal->j_running_transaction->t_tid;
 
-               ret = __log_start_commit(journal, tid);
-               if (ret && ptid)
+               __log_start_commit(journal, tid);
+               /* There's a running transaction and we've just made sure
+                * it's commit has been scheduled. */
+               if (ptid)
                        *ptid = tid;
-       } else if (journal->j_committing_transaction && ptid) {
+               ret = 1;
+       } else if (journal->j_committing_transaction) {
                /*
                 * If ext3_write_super() recently started a commit, then we
                 * have to wait for completion of that transaction
                 */
-               *ptid = journal->j_committing_transaction->t_tid;
+               if (ptid)
+                       *ptid = journal->j_committing_transaction->t_tid;
                ret = 1;
        }
        spin_unlock(&journal->j_state_lock);
@@ -527,7 +540,7 @@ int log_wait_commit(journal_t *journal, tid_t tid)
        if (!tid_geq(journal->j_commit_request, tid)) {
                printk(KERN_EMERG
                       "%s: error: j_commit_request=%d, tid=%d\n",
-                      __FUNCTION__, journal->j_commit_request, tid);
+                      __func__, journal->j_commit_request, tid);
        }
        spin_unlock(&journal->j_state_lock);
 #endif
@@ -577,7 +590,7 @@ int journal_next_log_block(journal_t *journal, unsigned long *retp)
  * this is a no-op.  If needed, we can use j_blk_offset - everything is
  * ready.
  */
-int journal_bmap(journal_t *journal, unsigned long blocknr, 
+int journal_bmap(journal_t *journal, unsigned long blocknr,
                 unsigned long *retp)
 {
        int err = 0;
@@ -592,7 +605,7 @@ int journal_bmap(journal_t *journal, unsigned long blocknr,
 
                        printk(KERN_ALERT "%s: journal block not found "
                                        "at offset %lu on %s\n",
-                               __FUNCTION__,
+                               __func__,
                                blocknr,
                                bdevname(journal->j_dev, b));
                        err = -EIO;
@@ -626,6 +639,8 @@ struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
                return NULL;
 
        bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
+       if (!bh)
+               return NULL;
        lock_buffer(bh);
        memset(bh->b_data, 0, journal->j_blocksize);
        set_buffer_uptodate(bh);
@@ -648,10 +663,9 @@ static journal_t * journal_init_common (void)
        journal_t *journal;
        int err;
 
-       journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL);
+       journal = kzalloc(sizeof(*journal), GFP_KERNEL);
        if (!journal)
                goto fail;
-       memset(journal, 0, sizeof(*journal));
 
        init_waitqueue_head(&journal->j_wait_transaction_locked);
        init_waitqueue_head(&journal->j_wait_logspace);
@@ -691,17 +705,18 @@ fail:
  */
 
 /**
- *  journal_t * journal_init_dev() - creates an initialises a journal structure
+ *  journal_t * journal_init_dev() - creates and initialises a journal structure
  *  @bdev: Block device on which to create the journal
  *  @fs_dev: Device which hold journalled filesystem for this journal.
  *  @start: Block nr Start of journal.
- *  @len:  Lenght of the journal in blocks.
+ *  @len:  Length of the journal in blocks.
  *  @blocksize: blocksize of journalling device
- *  @returns: a newly created journal_t *
- *  
+ *
+ *  Returns: a newly created journal_t *
+ *
  *  journal_init_dev creates a journal which maps a fixed contiguous
  *  range of blocks on an arbitrary block device.
- * 
+ *
  */
 journal_t * journal_init_dev(struct block_device *bdev,
                        struct block_device *fs_dev,
@@ -714,35 +729,41 @@ journal_t * journal_init_dev(struct block_device *bdev,
        if (!journal)
                return NULL;
 
+       /* journal descriptor can store up to n blocks -bzzz */
+       journal->j_blocksize = blocksize;
+       n = journal->j_blocksize / sizeof(journal_block_tag_t);
+       journal->j_wbufsize = n;
+       journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
+       if (!journal->j_wbuf) {
+               printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
+                       __func__);
+               goto out_err;
+       }
        journal->j_dev = bdev;
        journal->j_fs_dev = fs_dev;
        journal->j_blk_offset = start;
        journal->j_maxlen = len;
-       journal->j_blocksize = blocksize;
 
        bh = __getblk(journal->j_dev, start, journal->j_blocksize);
-       J_ASSERT(bh != NULL);
+       if (!bh) {
+               printk(KERN_ERR
+                      "%s: Cannot get buffer for journal superblock\n",
+                      __func__);
+               goto out_err;
+       }
        journal->j_sb_buffer = bh;
        journal->j_superblock = (journal_superblock_t *)bh->b_data;
 
-       /* journal descriptor can store up to n blocks -bzzz */
-       n = journal->j_blocksize / sizeof(journal_block_tag_t);
-       journal->j_wbufsize = n;
-       journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
-       if (!journal->j_wbuf) {
-               printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
-                       __FUNCTION__);
-               kfree(journal);
-               journal = NULL;
-       }
-
        return journal;
+out_err:
+       kfree(journal);
+       return NULL;
 }
-/** 
+
+/**
  *  journal_t * journal_init_inode () - creates a journal which maps to a inode.
  *  @inode: An inode to create the journal in
- *  
+ *
  * journal_init_inode creates a journal which maps an on-disk inode as
  * the journal.  The inode must exist already, must support bmap() and
  * must have all data blocks preallocated.
@@ -762,7 +783,7 @@ journal_t * journal_init_inode (struct inode *inode)
        journal->j_inode = inode;
        jbd_debug(1,
                  "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
-                 journal, inode->i_sb->s_id, inode->i_ino, 
+                 journal, inode->i_sb->s_id, inode->i_ino,
                  (long long) inode->i_size,
                  inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
 
@@ -775,32 +796,38 @@ journal_t * journal_init_inode (struct inode *inode)
        journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
        if (!journal->j_wbuf) {
                printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
-                       __FUNCTION__);
-               kfree(journal);
-               return NULL;
+                       __func__);
+               goto out_err;
        }
 
        err = journal_bmap(journal, 0, &blocknr);
        /* If that failed, give up */
        if (err) {
                printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
-                      __FUNCTION__);
-               kfree(journal);
-               return NULL;
+                      __func__);
+               goto out_err;
        }
 
        bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-       J_ASSERT(bh != NULL);
+       if (!bh) {
+               printk(KERN_ERR
+                      "%s: Cannot get buffer for journal superblock\n",
+                      __func__);
+               goto out_err;
+       }
        journal->j_sb_buffer = bh;
        journal->j_superblock = (journal_superblock_t *)bh->b_data;
 
        return journal;
+out_err:
+       kfree(journal);
+       return NULL;
 }
 
-/* 
+/*
  * If the journal init or create aborts, we need to mark the journal
  * superblock as being NULL to prevent the journal destroy from writing
- * back a bogus superblock. 
+ * back a bogus superblock.
  */
 static void journal_fail_superblock (journal_t *journal)
 {
@@ -819,10 +846,16 @@ static void journal_fail_superblock (journal_t *journal)
 static int journal_reset(journal_t *journal)
 {
        journal_superblock_t *sb = journal->j_superblock;
-       unsigned int first, last;
+       unsigned long first, last;
 
        first = be32_to_cpu(sb->s_first);
        last = be32_to_cpu(sb->s_maxlen);
+       if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
+               printk(KERN_ERR "JBD: Journal too short (blocks %lu-%lu).\n",
+                      first, last);
+               journal_fail_superblock(journal);
+               return -EINVAL;
+       }
 
        journal->j_first = first;
        journal->j_last = last;
@@ -839,17 +872,16 @@ static int journal_reset(journal_t *journal)
 
        /* Add the dynamic fields and write it to disk. */
        journal_update_superblock(journal, 1);
-       journal_start_thread(journal);
-       return 0;
+       return journal_start_thread(journal);
 }
 
-/** 
+/**
  * int journal_create() - Initialise the new journal file
  * @journal: Journal to create. This structure must have been initialised
- * 
+ *
  * Given a journal_t structure which tells us which disk blocks we can
  * use, create a new journal superblock and initialise all of the
- * journal fields from scratch.  
+ * journal fields from scratch.
  **/
 int journal_create(journal_t *journal)
 {
@@ -871,7 +903,7 @@ int journal_create(journal_t *journal)
                 */
                printk(KERN_EMERG
                       "%s: creation of journal on external device!\n",
-                      __FUNCTION__);
+                      __func__);
                BUG();
        }
 
@@ -914,7 +946,7 @@ int journal_create(journal_t *journal)
        return journal_reset(journal);
 }
 
-/** 
+/**
  * void journal_update_superblock() - Update journal sb on disk.
  * @journal: The journal to update.
  * @wait: Set to '0' if you don't want to wait for IO completion.
@@ -938,7 +970,7 @@ void journal_update_superblock(journal_t *journal, int wait)
                                journal->j_transaction_sequence) {
                jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
                        "(start %ld, seq %d, errno %d)\n",
-                       journal->j_tail, journal->j_tail_sequence, 
+                       journal->j_tail, journal->j_tail_sequence,
                        journal->j_errno);
                goto out;
        }
@@ -1061,7 +1093,7 @@ static int load_superblock(journal_t *journal)
 /**
  * int journal_load() - Read journal from disk.
  * @journal: Journal to act on.
- * 
+ *
  * Given a journal_t structure which tells us which disk blocks contain
  * a journal, read the journal from disk to initialise the in-memory
  * structures.
@@ -1069,17 +1101,17 @@ static int load_superblock(journal_t *journal)
 int journal_load(journal_t *journal)
 {
        int err;
+       journal_superblock_t *sb;
 
        err = load_superblock(journal);
        if (err)
                return err;
 
+       sb = journal->j_superblock;
        /* If this is a V2 superblock, then we have to check the
         * features flags on it. */
 
        if (journal->j_format_version >= 2) {
-               journal_superblock_t *sb = journal->j_superblock;
-
                if ((sb->s_feature_ro_compat &
                     ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
                    (sb->s_feature_incompat &
@@ -1116,9 +1148,12 @@ recovery_error:
  *
  * Release a journal_t structure once it is no longer in use by the
  * journaled object.
+ * Return <0 if we couldn't clean up the journal.
  */
-void journal_destroy(journal_t *journal)
+int journal_destroy(journal_t *journal)
 {
+       int err = 0;
+
        /* Wait for the commit thread to wake up and die. */
        journal_kill_thread(journal);
 
@@ -1141,11 +1176,16 @@ void journal_destroy(journal_t *journal)
        J_ASSERT(journal->j_checkpoint_transactions == NULL);
        spin_unlock(&journal->j_list_lock);
 
-       /* We can now mark the journal as empty. */
-       journal->j_tail = 0;
-       journal->j_tail_sequence = ++journal->j_transaction_sequence;
        if (journal->j_sb_buffer) {
-               journal_update_superblock(journal, 1);
+               if (!is_journal_aborted(journal)) {
+                       /* We can now mark the journal as empty. */
+                       journal->j_tail = 0;
+                       journal->j_tail_sequence =
+                               ++journal->j_transaction_sequence;
+                       journal_update_superblock(journal, 1);
+               } else {
+                       err = -EIO;
+               }
                brelse(journal->j_sb_buffer);
        }
 
@@ -1155,6 +1195,8 @@ void journal_destroy(journal_t *journal)
                journal_destroy_revoke(journal);
        kfree(journal->j_wbuf);
        kfree(journal);
+
+       return err;
 }
 
 
@@ -1164,9 +1206,9 @@ void journal_destroy(journal_t *journal)
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
  * @incompat: bitmask of incompatible features
- * 
+ *
  * Check whether the journal uses all of a given set of
- * features.  Return true (non-zero) if it does. 
+ * features.  Return true (non-zero) if it does.
  **/
 
 int journal_check_used_features (journal_t *journal, unsigned long compat,
@@ -1195,7 +1237,7 @@ int journal_check_used_features (journal_t *journal, unsigned long compat,
  * @compat: bitmask of compatible features
  * @ro: bitmask of features that force read-only mount
  * @incompat: bitmask of incompatible features
- * 
+ *
  * Check whether the journaling code supports the use of
  * all of a given set of features on this journal.  Return true
  * (non-zero) if it can. */
@@ -1233,7 +1275,7 @@ int journal_check_available_features (journal_t *journal, unsigned long compat,
  * @incompat: bitmask of incompatible features
  *
  * Mark a given journal feature as present on the
- * superblock.  Returns true if the requested features could be set. 
+ * superblock.  Returns true if the requested features could be set.
  *
  */
 
@@ -1319,7 +1361,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
 /**
  * int journal_flush () - Flush journal
  * @journal: Journal to act on.
- * 
+ *
  * Flush all data for a given journal to disk and empty the journal.
  * Filesystems can use this when remounting readonly to ensure that
  * recovery does not need to happen on remount.
@@ -1354,10 +1396,16 @@ int journal_flush(journal_t *journal)
        spin_lock(&journal->j_list_lock);
        while (!err && journal->j_checkpoint_transactions != NULL) {
                spin_unlock(&journal->j_list_lock);
+               mutex_lock(&journal->j_checkpoint_mutex);
                err = log_do_checkpoint(journal);
+               mutex_unlock(&journal->j_checkpoint_mutex);
                spin_lock(&journal->j_list_lock);
        }
        spin_unlock(&journal->j_list_lock);
+
+       if (is_journal_aborted(journal))
+               return -EIO;
+
        cleanup_journal_tail(journal);
 
        /* Finally, mark the journal as really needing no recovery.
@@ -1379,14 +1427,14 @@ int journal_flush(journal_t *journal)
        J_ASSERT(journal->j_head == journal->j_tail);
        J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
        spin_unlock(&journal->j_state_lock);
-       return err;
+       return 0;
 }
 
 /**
  * int journal_wipe() - Wipe journal contents
  * @journal: Journal to act on.
  * @write: flag (see below)
- * 
+ *
  * Wipe out all of the contents of a journal, safely.  This will produce
  * a warning if the journal contains any valid recovery information.
  * Must be called between journal_init_*() and journal_load().
@@ -1441,7 +1489,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
 
 /*
  * Journal abort has very specific semantics, which we describe
- * for journal abort. 
+ * for journal abort.
  *
  * Two internal function, which provide abort to te jbd layer
  * itself are here.
@@ -1452,7 +1500,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
  * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
  * and don't attempt to make any other journal updates.
  */
-void __journal_abort_hard(journal_t *journal)
+static void __journal_abort_hard(journal_t *journal)
 {
        transaction_t *transaction;
        char b[BDEVNAME_SIZE];
@@ -1496,7 +1544,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
  * Perform a complete, immediate shutdown of the ENTIRE
  * journal (not of a single transaction).  This operation cannot be
  * undone without closing and reopening the journal.
- *           
+ *
  * The journal_abort function is intended to support higher level error
  * recovery mechanisms such as the ext2/ext3 remount-readonly error
  * mode.
@@ -1530,7 +1578,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
  * supply an errno; a null errno implies that absolutely no further
  * writes are done to the journal (unless there are any already in
  * progress).
- * 
+ *
  */
 
 void journal_abort(journal_t *journal, int errno)
@@ -1538,7 +1586,7 @@ void journal_abort(journal_t *journal, int errno)
        __journal_abort_soft(journal, errno);
 }
 
-/** 
+/**
  * int journal_errno () - returns the journal's error state.
  * @journal: journal to examine.
  *
@@ -1562,7 +1610,7 @@ int journal_errno(journal_t *journal)
        return err;
 }
 
-/** 
+/**
  * int journal_clear_err () - clears the journal's error state
  * @journal: journal to act on.
  *
@@ -1582,7 +1630,7 @@ int journal_clear_err(journal_t *journal)
        return err;
 }
 
-/** 
+/**
  * void journal_ack_err() - Ack journal err.
  * @journal: journal to act on.
  *
@@ -1603,18 +1651,9 @@ int journal_blocks_per_page(struct inode *inode)
 }
 
 /*
- * Simple support for retrying memory allocations.  Introduced to help to
- * debug different VM deadlock avoidance strategies. 
- */
-void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
-{
-       return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
-}
-
-/*
  * Journal_head storage management
  */
-static kmem_cache_t *journal_head_cache;
+static struct kmem_cache *journal_head_cache;
 #ifdef CONFIG_JBD_DEBUG
 static atomic_t nr_journal_heads = ATOMIC_INIT(0);
 #endif
@@ -1623,15 +1662,14 @@ static int journal_init_journal_head_cache(void)
 {
        int retval;
 
-       J_ASSERT(journal_head_cache == 0);
+       J_ASSERT(journal_head_cache == NULL);
        journal_head_cache = kmem_cache_create("journal_head",
                                sizeof(struct journal_head),
                                0,              /* offset */
-                               0,              /* flags */
-                               NULL,           /* ctor */
-                               NULL);          /* dtor */
+                               SLAB_TEMPORARY, /* flags */
+                               NULL);          /* ctor */
        retval = 0;
-       if (journal_head_cache == 0) {
+       if (!journal_head_cache) {
                retval = -ENOMEM;
                printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
        }
@@ -1640,9 +1678,10 @@ static int journal_init_journal_head_cache(void)
 
 static void journal_destroy_journal_head_cache(void)
 {
-       J_ASSERT(journal_head_cache != NULL);
-       kmem_cache_destroy(journal_head_cache);
-       journal_head_cache = NULL;
+       if (journal_head_cache) {
+               kmem_cache_destroy(journal_head_cache);
+               journal_head_cache = NULL;
+       }
 }
 
 /*
@@ -1657,14 +1696,14 @@ static struct journal_head *journal_alloc_journal_head(void)
        atomic_inc(&nr_journal_heads);
 #endif
        ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
-       if (ret == 0) {
+       if (ret == NULL) {
                jbd_debug(1, "out of memory for journal_head\n");
                if (time_after(jiffies, last_warning + 5*HZ)) {
                        printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-                              __FUNCTION__);
+                              __func__);
                        last_warning = jiffies;
                }
-               while (ret == 0) {
+               while (ret == NULL) {
                        yield();
                        ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
                }
@@ -1798,14 +1837,14 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
                        if (jh->b_frozen_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_frozen_data\n",
-                                               __FUNCTION__);
-                               kfree(jh->b_frozen_data);
+                                               __func__);
+                               jbd_free(jh->b_frozen_data, bh->b_size);
                        }
                        if (jh->b_committed_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_committed_data\n",
-                                               __FUNCTION__);
-                               kfree(jh->b_committed_data);
+                                               __func__);
+                               jbd_free(jh->b_committed_data, bh->b_size);
                        }
                        bh->b_private = NULL;
                        jh->b_bh = NULL;        /* debug, really */
@@ -1857,76 +1896,52 @@ void journal_put_journal_head(struct journal_head *jh)
 }
 
 /*
- * /proc tunables
+ * debugfs tunables
  */
-#if defined(CONFIG_JBD_DEBUG)
-int journal_enable_debug;
-EXPORT_SYMBOL(journal_enable_debug);
-#endif
+#ifdef CONFIG_JBD_DEBUG
 
-#if defined(CONFIG_JBD_DEBUG) && defined(CONFIG_PROC_FS)
+u8 journal_enable_debug __read_mostly;
+EXPORT_SYMBOL(journal_enable_debug);
 
-static struct proc_dir_entry *proc_jbd_debug;
+static struct dentry *jbd_debugfs_dir;
+static struct dentry *jbd_debug;
 
-static int read_jbd_debug(char *page, char **start, off_t off,
-                         int count, int *eof, void *data)
+static void __init jbd_create_debugfs_entry(void)
 {
-       int ret;
-
-       ret = sprintf(page + off, "%d\n", journal_enable_debug);
-       *eof = 1;
-       return ret;
+       jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
+       if (jbd_debugfs_dir)
+               jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO,
+                                              jbd_debugfs_dir,
+                                              &journal_enable_debug);
 }
 
-static int write_jbd_debug(struct file *file, const char __user *buffer,
-                          unsigned long count, void *data)
+static void __exit jbd_remove_debugfs_entry(void)
 {
-       char buf[32];
-
-       if (count > ARRAY_SIZE(buf) - 1)
-               count = ARRAY_SIZE(buf) - 1;
-       if (copy_from_user(buf, buffer, count))
-               return -EFAULT;
-       buf[ARRAY_SIZE(buf) - 1] = '\0';
-       journal_enable_debug = simple_strtoul(buf, NULL, 10);
-       return count;
+       debugfs_remove(jbd_debug);
+       debugfs_remove(jbd_debugfs_dir);
 }
 
-#define JBD_PROC_NAME "sys/fs/jbd-debug"
+#else
 
-static void __init create_jbd_proc_entry(void)
+static inline void jbd_create_debugfs_entry(void)
 {
-       proc_jbd_debug = create_proc_entry(JBD_PROC_NAME, 0644, NULL);
-       if (proc_jbd_debug) {
-               /* Why is this so hard? */
-               proc_jbd_debug->read_proc = read_jbd_debug;
-               proc_jbd_debug->write_proc = write_jbd_debug;
-       }
 }
 
-static void __exit remove_jbd_proc_entry(void)
+static inline void jbd_remove_debugfs_entry(void)
 {
-       if (proc_jbd_debug)
-               remove_proc_entry(JBD_PROC_NAME, NULL);
 }
 
-#else
-
-#define create_jbd_proc_entry() do {} while (0)
-#define remove_jbd_proc_entry() do {} while (0)
-
 #endif
 
-kmem_cache_t *jbd_handle_cache;
+struct kmem_cache *jbd_handle_cache;
 
 static int __init journal_init_handle_cache(void)
 {
        jbd_handle_cache = kmem_cache_create("journal_handle",
                                sizeof(handle_t),
                                0,              /* offset */
-                               0,              /* flags */
-                               NULL,           /* ctor */
-                               NULL);          /* dtor */
+                               SLAB_TEMPORARY, /* flags */
+                               NULL);          /* ctor */
        if (jbd_handle_cache == NULL) {
                printk(KERN_EMERG "JBD: failed to create handle cache\n");
                return -ENOMEM;
@@ -1967,18 +1982,12 @@ static int __init journal_init(void)
 {
        int ret;
 
-/* Static check for data structure consistency.  There's no code
- * invoked --- we'll just get a linker failure if things aren't right.
- */
-       extern void journal_bad_superblock_size(void);
-       if (sizeof(struct journal_superblock_s) != 1024)
-               journal_bad_superblock_size();
-
+       BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
 
        ret = journal_init_caches();
        if (ret != 0)
                journal_destroy_caches();
-       create_jbd_proc_entry();
+       jbd_create_debugfs_entry();
        return ret;
 }
 
@@ -1989,7 +1998,7 @@ static void __exit journal_exit(void)
        if (n)
                printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
 #endif
-       remove_jbd_proc_entry();
+       jbd_remove_debugfs_entry();
        journal_destroy_caches();
 }