/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
+ * of the GNU General Public License version 2.
*/
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
-#include <asm/semaphore.h>
+#include <linux/gfs2_ondisk.h>
+#include <linux/bio.h>
#include "gfs2.h"
+#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "log.h"
#include "meta_io.h"
-#include "page.h"
#include "recovery.h"
#include "rgrp.h"
+#include "util.h"
+#include "trans.h"
/**
- * meta_go_sync - sync out the metadata for this glock
+ * ail_empty_gl - remove all buffers for a given lock from the AIL
* @gl: the glock
- * @flags: DIO_*
*
- * Called when demoting or unlocking an EX glock. We must flush
- * to disk all dirty buffers/pages relating to this glock, and must not
- * not return to caller to demote/unlock the glock until I/O is complete.
+ * None of the buffers should be dirty, locked, or pinned.
*/
-static void meta_go_sync(struct gfs2_glock *gl, int flags)
+static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
- if (!(flags & DIO_METADATA))
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct list_head *head = &gl->gl_ail_list;
+ struct gfs2_bufdata *bd;
+ struct buffer_head *bh;
+ struct gfs2_trans tr;
+
+ memset(&tr, 0, sizeof(tr));
+ tr.tr_revokes = atomic_read(&gl->gl_ail_count);
+
+ if (!tr.tr_revokes)
return;
- if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
- gfs2_log_flush_glock(gl);
- gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
- if (flags & DIO_RELEASE)
- gfs2_ail_empty_gl(gl);
+ /* A shortened, inline version of gfs2_trans_begin() */
+ tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
+ tr.tr_ip = (unsigned long)__builtin_return_address(0);
+ INIT_LIST_HEAD(&tr.tr_list_buf);
+ gfs2_log_reserve(sdp, tr.tr_reserved);
+ BUG_ON(current->journal_info);
+ current->journal_info = &tr;
+
+ gfs2_log_lock(sdp);
+ while (!list_empty(head)) {
+ bd = list_entry(head->next, struct gfs2_bufdata,
+ bd_ail_gl_list);
+ bh = bd->bd_bh;
+ gfs2_remove_from_ail(bd);
+ bd->bd_bh = NULL;
+ bh->b_private = NULL;
+ bd->bd_blkno = bh->b_blocknr;
+ gfs2_assert_withdraw(sdp, !buffer_busy(bh));
+ gfs2_trans_add_revoke(sdp, bd);
}
+ gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
+ gfs2_log_unlock(sdp);
- clear_bit(GLF_SYNC, &gl->gl_flags);
+ gfs2_trans_end(sdp);
+ gfs2_log_flush(sdp, NULL);
}
/**
- * meta_go_inval - invalidate the metadata for this glock
+ * rgrp_go_sync - sync out the metadata for this glock
* @gl: the glock
- * @flags:
*
+ * Called when demoting or unlocking an EX glock. We must flush
+ * to disk all dirty buffers/pages relating to this glock, and must not
+ * not return to caller to demote/unlock the glock until I/O is complete.
*/
-static void meta_go_inval(struct gfs2_glock *gl, int flags)
+static void rgrp_go_sync(struct gfs2_glock *gl)
{
- if (!(flags & DIO_METADATA))
- return;
-
- gfs2_meta_inval(gl);
- gl->gl_vn++;
-}
+ struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ int error;
-/**
- * meta_go_demote_ok - Check to see if it's ok to unlock a glock
- * @gl: the glock
- *
- * Returns: 1 if we have no cached data; ok to demote meta glock
- */
+ if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
+ return;
+ BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
-static int meta_go_demote_ok(struct gfs2_glock *gl)
-{
- return !gl->gl_aspace->i_mapping->nrpages;
+ gfs2_log_flush(gl->gl_sbd, gl);
+ filemap_fdatawrite(metamapping);
+ error = filemap_fdatawait(metamapping);
+ mapping_set_error(metamapping, error);
+ gfs2_ail_empty_gl(gl);
}
/**
- * inode_go_xmote_th - promote/demote a glock
+ * rgrp_go_inval - invalidate the metadata for this glock
* @gl: the glock
- * @state: the requested state
* @flags:
*
- */
-
-static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
- int flags)
-{
- if (gl->gl_state != LM_ST_UNLOCKED)
- gfs2_pte_inval(gl);
- gfs2_glock_xmote_th(gl, state, flags);
-}
-
-/**
- * inode_go_xmote_bh - After promoting/demoting a glock
- * @gl: the glock
+ * We never used LM_ST_DEFERRED with resource groups, so that we
+ * should always see the metadata flag set here.
*
*/
-static void inode_go_xmote_bh(struct gfs2_glock *gl)
+static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
- struct gfs2_holder *gh = gl->gl_req_gh;
- struct buffer_head *bh;
- int error;
+ struct address_space *mapping = gl->gl_aspace->i_mapping;
- if (gl->gl_state != LM_ST_UNLOCKED &&
- (!gh || !(gh->gh_flags & GL_SKIP))) {
- error = gfs2_meta_read(gl, gl->gl_name.ln_number, DIO_START,
- &bh);
- if (!error)
- brelse(bh);
- }
-}
-
-/**
- * inode_go_drop_th - unlock a glock
- * @gl: the glock
- *
- * Invoked from rq_demote().
- * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
- * is being purged from our node's glock cache; we're dropping lock.
- */
+ BUG_ON(!(flags & DIO_METADATA));
+ gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
+ truncate_inode_pages(mapping, 0);
-static void inode_go_drop_th(struct gfs2_glock *gl)
-{
- gfs2_pte_inval(gl);
- gfs2_glock_drop_th(gl);
+ if (gl->gl_object) {
+ struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
+ rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
+ }
}
/**
* inode_go_sync - Sync the dirty data and/or metadata for an inode glock
* @gl: the glock protecting the inode
- * @flags:
*
*/
-static void inode_go_sync(struct gfs2_glock *gl, int flags)
+static void inode_go_sync(struct gfs2_glock *gl)
{
- int meta = (flags & DIO_METADATA);
- int data = (flags & DIO_DATA);
-
- if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
- if (meta && data) {
- gfs2_page_sync(gl, flags | DIO_START);
- gfs2_log_flush_glock(gl);
- gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
- gfs2_page_sync(gl, flags | DIO_WAIT);
- clear_bit(GLF_DIRTY, &gl->gl_flags);
- } else if (meta) {
- gfs2_log_flush_glock(gl);
- gfs2_meta_sync(gl, flags | DIO_START | DIO_WAIT);
- } else if (data)
- gfs2_page_sync(gl, flags | DIO_START | DIO_WAIT);
- if (flags & DIO_RELEASE)
- gfs2_ail_empty_gl(gl);
- }
+ struct gfs2_inode *ip = gl->gl_object;
+ struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ int error;
+
+ if (ip && !S_ISREG(ip->i_inode.i_mode))
+ ip = NULL;
+ if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+ if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
+ return;
- clear_bit(GLF_SYNC, &gl->gl_flags);
+ BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
+
+ gfs2_log_flush(gl->gl_sbd, gl);
+ filemap_fdatawrite(metamapping);
+ if (ip) {
+ struct address_space *mapping = ip->i_inode.i_mapping;
+ filemap_fdatawrite(mapping);
+ error = filemap_fdatawait(mapping);
+ mapping_set_error(mapping, error);
+ }
+ error = filemap_fdatawait(metamapping);
+ mapping_set_error(metamapping, error);
+ gfs2_ail_empty_gl(gl);
}
/**
* inode_go_inval - prepare a inode glock to be released
* @gl: the glock
* @flags:
+ *
+ * Normally we invlidate everything, but if we are moving into
+ * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
+ * can keep hold of the metadata, since it won't have changed.
*
*/
static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
- int meta = (flags & DIO_METADATA);
- int data = (flags & DIO_DATA);
+ struct gfs2_inode *ip = gl->gl_object;
+
+ gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
- if (meta) {
- gfs2_meta_inval(gl);
- gl->gl_vn++;
+ if (flags & DIO_METADATA) {
+ struct address_space *mapping = gl->gl_aspace->i_mapping;
+ truncate_inode_pages(mapping, 0);
+ if (ip)
+ set_bit(GIF_INVALID, &ip->i_flags);
}
- if (data)
- gfs2_page_inval(gl);
+
+ if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
+ gl->gl_sbd->sd_rindex_uptodate = 0;
+ if (ip && S_ISREG(ip->i_inode.i_mode))
+ truncate_inode_pages(ip->i_inode.i_mapping, 0);
}
/**
* Returns: 1 if it's ok
*/
-static int inode_go_demote_ok(struct gfs2_glock *gl)
+static int inode_go_demote_ok(const struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- int demote = 0;
-
- if (!get_gl2ip(gl) && !gl->gl_aspace->i_mapping->nrpages)
- demote = 1;
- else if (!sdp->sd_args.ar_localcaching &&
- time_after_eq(jiffies, gl->gl_stamp +
- gfs2_tune_get(sdp, gt_demote_secs) * HZ))
- demote = 1;
-
- return demote;
+ if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
+ return 0;
+ return 1;
}
/**
static int inode_go_lock(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_inode *ip = get_gl2ip(gl);
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct gfs2_inode *ip = gl->gl_object;
int error = 0;
- if (!ip)
+ if (!ip || (gh->gh_flags & GL_SKIP))
return 0;
- if (ip->i_vn != gl->gl_vn) {
+ if (test_bit(GIF_INVALID, &ip->i_flags)) {
error = gfs2_inode_refresh(ip);
if (error)
return error;
- gfs2_inode_attr_in(ip);
}
- if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
+ if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
(gl->gl_state == LM_ST_EXCLUSIVE) &&
- (gh->gh_flags & GL_LOCAL_EXCL))
- error = gfs2_truncatei_resume(ip);
+ (gh->gh_state == LM_ST_EXCLUSIVE)) {
+ spin_lock(&sdp->sd_trunc_lock);
+ if (list_empty(&ip->i_trunc_list))
+ list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
+ spin_unlock(&sdp->sd_trunc_lock);
+ wake_up(&sdp->sd_quota_wait);
+ return 1;
+ }
return error;
}
/**
- * inode_go_unlock - operation done before an inode lock is unlocked by a
- * process
- * @gl: the glock
- * @flags:
- *
- */
-
-static void inode_go_unlock(struct gfs2_holder *gh)
-{
- struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_inode *ip = get_gl2ip(gl);
-
- if (ip && test_bit(GLF_DIRTY, &gl->gl_flags))
- gfs2_inode_attr_in(ip);
-
- if (ip)
- gfs2_meta_cache_flush(ip);
-}
-
-/**
- * inode_greedy -
- * @gl: the glock
+ * inode_go_dump - print information about an inode
+ * @seq: The iterator
+ * @ip: the inode
*
+ * Returns: 0 on success, -ENOBUFS when we run out of space
*/
-static void inode_greedy(struct gfs2_glock *gl)
+static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_sbd;
- struct gfs2_inode *ip = get_gl2ip(gl);
- unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
- unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
- unsigned int new_time;
-
- spin_lock(&ip->i_spin);
-
- if (time_after(ip->i_last_pfault + quantum, jiffies)) {
- new_time = ip->i_greedy + quantum;
- if (new_time > max)
- new_time = max;
- } else {
- new_time = ip->i_greedy - quantum;
- if (!new_time || new_time > max)
- new_time = 1;
- }
-
- ip->i_greedy = new_time;
-
- spin_unlock(&ip->i_spin);
-
- gfs2_inode_put(ip);
+ const struct gfs2_inode *ip = gl->gl_object;
+ if (ip == NULL)
+ return 0;
+ gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n",
+ (unsigned long long)ip->i_no_formal_ino,
+ (unsigned long long)ip->i_no_addr,
+ IF2DT(ip->i_inode.i_mode), ip->i_flags,
+ (unsigned int)ip->i_diskflags,
+ (unsigned long long)ip->i_inode.i_size,
+ (unsigned long long)ip->i_disksize);
+ return 0;
}
/**
* Returns: 1 if it's ok
*/
-static int rgrp_go_demote_ok(struct gfs2_glock *gl)
+static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
{
return !gl->gl_aspace->i_mapping->nrpages;
}
static int rgrp_go_lock(struct gfs2_holder *gh)
{
- return gfs2_rgrp_bh_get(get_gl2rgd(gh->gh_gl));
+ return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
}
/**
static void rgrp_go_unlock(struct gfs2_holder *gh)
{
- gfs2_rgrp_bh_put(get_gl2rgd(gh->gh_gl));
+ gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
+}
+
+/**
+ * rgrp_go_dump - print out an rgrp
+ * @seq: The iterator
+ * @gl: The glock in question
+ *
+ */
+
+static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
+{
+ const struct gfs2_rgrpd *rgd = gl->gl_object;
+ if (rgd == NULL)
+ return 0;
+ gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u\n",
+ (unsigned long long)rgd->rd_addr, rgd->rd_flags,
+ rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes);
+ return 0;
}
/**
- * trans_go_xmote_th - promote/demote the transaction glock
+ * trans_go_sync - promote/demote the transaction glock
* @gl: the glock
* @state: the requested state
* @flags:
*
*/
-static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
- int flags)
+static void trans_go_sync(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
gfs2_meta_syncfs(sdp);
gfs2_log_shutdown(sdp);
}
-
- gfs2_glock_xmote_th(gl, state, flags);
}
/**
*
*/
-static void trans_go_xmote_bh(struct gfs2_glock *gl)
+static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- struct gfs2_glock *j_gl = sdp->sd_jdesc->jd_inode->i_gl;
- struct gfs2_log_header head;
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
+ struct gfs2_glock *j_gl = ip->i_gl;
+ struct gfs2_log_header_host head;
int error;
- if (gl->gl_state != LM_ST_UNLOCKED &&
- test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
- gfs2_meta_cache_flush(sdp->sd_jdesc->jd_inode);
- j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
if (error)
gfs2_log_pointers_init(sdp, head.lh_blkno);
}
}
+ return 0;
}
/**
- * trans_go_drop_th - unlock the transaction glock
- * @gl: the glock
- *
- * We want to sync the device even with localcaching. Remember
- * that localcaching journal replay only marks buffers dirty.
- */
-
-static void trans_go_drop_th(struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_sbd;
-
- if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
- gfs2_meta_syncfs(sdp);
- gfs2_log_shutdown(sdp);
- }
-
- gfs2_glock_drop_th(gl);
-}
-
-/**
- * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
+ * trans_go_demote_ok
* @gl: the glock
*
- * Returns: 1 if it's ok
+ * Always returns 0
*/
-static int quota_go_demote_ok(struct gfs2_glock *gl)
+static int trans_go_demote_ok(const struct gfs2_glock *gl)
{
- return !atomic_read(&gl->gl_lvb_count);
+ return 0;
}
-struct gfs2_glock_operations gfs2_meta_glops = {
- .go_xmote_th = gfs2_glock_xmote_th,
- .go_drop_th = gfs2_glock_drop_th,
- .go_sync = meta_go_sync,
- .go_inval = meta_go_inval,
- .go_demote_ok = meta_go_demote_ok,
- .go_type = LM_TYPE_META
+const struct gfs2_glock_operations gfs2_meta_glops = {
+ .go_type = LM_TYPE_META,
};
-struct gfs2_glock_operations gfs2_inode_glops = {
- .go_xmote_th = inode_go_xmote_th,
- .go_xmote_bh = inode_go_xmote_bh,
- .go_drop_th = inode_go_drop_th,
- .go_sync = inode_go_sync,
+const struct gfs2_glock_operations gfs2_inode_glops = {
+ .go_xmote_th = inode_go_sync,
.go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok,
.go_lock = inode_go_lock,
- .go_unlock = inode_go_unlock,
- .go_greedy = inode_greedy,
- .go_type = LM_TYPE_INODE
+ .go_dump = inode_go_dump,
+ .go_type = LM_TYPE_INODE,
+ .go_min_hold_time = HZ / 5,
};
-struct gfs2_glock_operations gfs2_rgrp_glops = {
- .go_xmote_th = gfs2_glock_xmote_th,
- .go_drop_th = gfs2_glock_drop_th,
- .go_sync = meta_go_sync,
- .go_inval = meta_go_inval,
+const struct gfs2_glock_operations gfs2_rgrp_glops = {
+ .go_xmote_th = rgrp_go_sync,
+ .go_inval = rgrp_go_inval,
.go_demote_ok = rgrp_go_demote_ok,
.go_lock = rgrp_go_lock,
.go_unlock = rgrp_go_unlock,
- .go_type = LM_TYPE_RGRP
+ .go_dump = rgrp_go_dump,
+ .go_type = LM_TYPE_RGRP,
+ .go_min_hold_time = HZ / 5,
};
-struct gfs2_glock_operations gfs2_trans_glops = {
- .go_xmote_th = trans_go_xmote_th,
+const struct gfs2_glock_operations gfs2_trans_glops = {
+ .go_xmote_th = trans_go_sync,
.go_xmote_bh = trans_go_xmote_bh,
- .go_drop_th = trans_go_drop_th,
- .go_type = LM_TYPE_NONDISK
+ .go_demote_ok = trans_go_demote_ok,
+ .go_type = LM_TYPE_NONDISK,
+};
+
+const struct gfs2_glock_operations gfs2_iopen_glops = {
+ .go_type = LM_TYPE_IOPEN,
};
-struct gfs2_glock_operations gfs2_iopen_glops = {
- .go_xmote_th = gfs2_glock_xmote_th,
- .go_drop_th = gfs2_glock_drop_th,
- .go_callback = gfs2_iopen_go_callback,
- .go_type = LM_TYPE_IOPEN
+const struct gfs2_glock_operations gfs2_flock_glops = {
+ .go_type = LM_TYPE_FLOCK,
};
-struct gfs2_glock_operations gfs2_flock_glops = {
- .go_xmote_th = gfs2_glock_xmote_th,
- .go_drop_th = gfs2_glock_drop_th,
- .go_type = LM_TYPE_FLOCK
+const struct gfs2_glock_operations gfs2_nondisk_glops = {
+ .go_type = LM_TYPE_NONDISK,
};
-struct gfs2_glock_operations gfs2_nondisk_glops = {
- .go_xmote_th = gfs2_glock_xmote_th,
- .go_drop_th = gfs2_glock_drop_th,
- .go_type = LM_TYPE_NONDISK
+const struct gfs2_glock_operations gfs2_quota_glops = {
+ .go_type = LM_TYPE_QUOTA,
};
-struct gfs2_glock_operations gfs2_quota_glops = {
- .go_xmote_th = gfs2_glock_xmote_th,
- .go_drop_th = gfs2_glock_drop_th,
- .go_demote_ok = quota_go_demote_ok,
- .go_type = LM_TYPE_QUOTA
+const struct gfs2_glock_operations gfs2_journal_glops = {
+ .go_type = LM_TYPE_JOURNAL,
};
-struct gfs2_glock_operations gfs2_journal_glops = {
- .go_xmote_th = gfs2_glock_xmote_th,
- .go_drop_th = gfs2_glock_drop_th,
- .go_type = LM_TYPE_JOURNAL
+const struct gfs2_glock_operations *gfs2_glops_list[] = {
+ [LM_TYPE_META] = &gfs2_meta_glops,
+ [LM_TYPE_INODE] = &gfs2_inode_glops,
+ [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
+ [LM_TYPE_NONDISK] = &gfs2_trans_glops,
+ [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
+ [LM_TYPE_FLOCK] = &gfs2_flock_glops,
+ [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
+ [LM_TYPE_QUOTA] = &gfs2_quota_glops,
+ [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
};