GFS2: Banish struct gfs2_dinode_host
[safe/jmp/linux-2.6] / fs / gfs2 / rgrp.c
index 4291375..bdad0df 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/fs.h>
 #include <linux/gfs2_ondisk.h>
 #include <linux/lm_interface.h>
+#include <linux/prefetch.h>
 
 #include "gfs2.h"
 #include "incore.h"
 #define BFITNOENT ((u32)~0)
 #define NO_BLOCK ((u64)~0)
 
+#if BITS_PER_LONG == 32
+#define LBITMASK   (0x55555555UL)
+#define LBITSKIP55 (0x55555555UL)
+#define LBITSKIP00 (0x00000000UL)
+#else
+#define LBITMASK   (0x5555555555555555UL)
+#define LBITSKIP55 (0x5555555555555555UL)
+#define LBITSKIP00 (0x0000000000000000UL)
+#endif
+
 /*
  * These routines are used by the resource group routines (rgrp.c)
  * to keep track of block allocation.  Each block is represented by two
@@ -138,45 +149,63 @@ static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
 static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal,
                       u8 old_state)
 {
-       const u8 *byte;
-       u32 blk = goal;
-       unsigned int bit, bitlong;
-       const unsigned long *plong;
-#if BITS_PER_LONG == 32
-       const unsigned long plong55 = 0x55555555;
-#else
-       const unsigned long plong55 = 0x5555555555555555;
-#endif
-
-       byte = buffer + (goal / GFS2_NBBY);
-       plong = (const unsigned long *)(buffer + (goal / GFS2_NBBY));
-       bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
-       bitlong = bit;
-
-       while (byte < buffer + buflen) {
-
-               if (bitlong == 0 && old_state == 0 && *plong == plong55) {
-                       plong++;
-                       byte += sizeof(unsigned long);
-                       blk += sizeof(unsigned long) * GFS2_NBBY;
-                       continue;
+       const u8 *byte, *start, *end;
+       int bit, startbit;
+       u32 g1, g2, misaligned;
+       unsigned long *plong;
+       unsigned long lskipval;
+
+       lskipval = (old_state & GFS2_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
+       g1 = (goal / GFS2_NBBY);
+       start = buffer + g1;
+       byte = start;
+        end = buffer + buflen;
+       g2 = ALIGN(g1, sizeof(unsigned long));
+       plong = (unsigned long *)(buffer + g2);
+       startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
+       misaligned = g2 - g1;
+       if (!misaligned)
+               goto ulong_aligned;
+/* parse the bitmap a byte at a time */
+misaligned:
+       while (byte < end) {
+               if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) {
+                       return goal +
+                               (((byte - start) * GFS2_NBBY) +
+                                ((bit - startbit) >> 1));
                }
-               if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
-                       return blk;
                bit += GFS2_BIT_SIZE;
-               if (bit >= 8) {
+               if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
                        bit = 0;
                        byte++;
+                       misaligned--;
+                       if (!misaligned) {
+                               plong = (unsigned long *)byte;
+                               goto ulong_aligned;
+                       }
                }
-               bitlong += GFS2_BIT_SIZE;
-               if (bitlong >= sizeof(unsigned long) * 8) {
-                       bitlong = 0;
-                       plong++;
-               }
-
-               blk++;
        }
+       return BFITNOENT;
 
+/* parse the bitmap a unsigned long at a time */
+ulong_aligned:
+       /* Stop at "end - 1" or else prefetch can go past the end and segfault.
+          We could "if" it but we'd lose some of the performance gained.
+          This way will only slow down searching the very last 4/8 bytes
+          depending on architecture.  I've experimented with several ways
+          of writing this section such as using an else before the goto
+          but this one seems to be the fastest. */
+       while ((unsigned char *)plong < end - sizeof(unsigned long)) {
+               prefetch(plong + 1);
+               if (((*plong) & LBITMASK) != lskipval)
+                       break;
+               plong++;
+       }
+       if ((unsigned char *)plong < end) {
+               byte = (const u8 *)plong;
+               misaligned += sizeof(unsigned long) - 1;
+               goto misaligned;
+       }
        return BFITNOENT;
 }
 
@@ -342,11 +371,6 @@ static void clear_rgrpdi(struct gfs2_sbd *sdp)
 
        spin_lock(&sdp->sd_rindex_spin);
        sdp->sd_rindex_forward = NULL;
-       head = &sdp->sd_rindex_recent_list;
-       while (!list_empty(head)) {
-               rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
-               list_del(&rgd->rd_recent);
-       }
        spin_unlock(&sdp->sd_rindex_spin);
 
        head = &sdp->sd_rindex_list;
@@ -477,7 +501,7 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp)
        for (rgrps = 0;; rgrps++) {
                loff_t pos = rgrps * sizeof(struct gfs2_rindex);
 
-               if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size)
+               if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize)
                        break;
                error = gfs2_internal_read(ip, &ra_state, buf, &pos,
                                           sizeof(struct gfs2_rindex));
@@ -566,7 +590,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct inode *inode = &ip->i_inode;
        struct file_ra_state ra_state;
-       u64 rgrp_count = ip->i_di.di_size;
+       u64 rgrp_count = ip->i_disksize;
        int error;
 
        if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) {
@@ -610,7 +634,7 @@ static int gfs2_ri_update_special(struct gfs2_inode *ip)
        for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
                /* Ignore partials */
                if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
-                   ip->i_di.di_size)
+                   ip->i_disksize)
                        break;
                error = read_rindex_entry(ip, &ra_state);
                if (error) {
@@ -916,107 +940,30 @@ static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
 }
 
 /**
- * recent_rgrp_first - get first RG from "recent" list
- * @sdp: The GFS2 superblock
- * @rglast: address of the rgrp used last
- *
- * Returns: The first rgrp in the recent list
- */
-
-static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
-                                           u64 rglast)
-{
-       struct gfs2_rgrpd *rgd;
-
-       spin_lock(&sdp->sd_rindex_spin);
-
-       if (rglast) {
-               list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
-                       if (rgrp_contains_block(rgd, rglast))
-                               goto out;
-               }
-       }
-       rgd = NULL;
-       if (!list_empty(&sdp->sd_rindex_recent_list))
-               rgd = list_entry(sdp->sd_rindex_recent_list.next,
-                                struct gfs2_rgrpd, rd_recent);
-out:
-       spin_unlock(&sdp->sd_rindex_spin);
-       return rgd;
-}
-
-/**
  * recent_rgrp_next - get next RG from "recent" list
  * @cur_rgd: current rgrp
- * @remove:
  *
  * Returns: The next rgrp in the recent list
  */
 
-static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
-                                          int remove)
+static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd)
 {
        struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
        struct list_head *head;
        struct gfs2_rgrpd *rgd;
 
        spin_lock(&sdp->sd_rindex_spin);
-
-       head = &sdp->sd_rindex_recent_list;
-
-       list_for_each_entry(rgd, head, rd_recent) {
-               if (rgd == cur_rgd) {
-                       if (cur_rgd->rd_recent.next != head)
-                               rgd = list_entry(cur_rgd->rd_recent.next,
-                                                struct gfs2_rgrpd, rd_recent);
-                       else
-                               rgd = NULL;
-
-                       if (remove)
-                               list_del(&cur_rgd->rd_recent);
-
-                       goto out;
-               }
+       head = &sdp->sd_rindex_mru_list;
+       if (unlikely(cur_rgd->rd_list_mru.next == head)) {
+               spin_unlock(&sdp->sd_rindex_spin);
+               return NULL;
        }
-
-       rgd = NULL;
-       if (!list_empty(head))
-               rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
-
-out:
+       rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru);
        spin_unlock(&sdp->sd_rindex_spin);
        return rgd;
 }
 
 /**
- * recent_rgrp_add - add an RG to tail of "recent" list
- * @new_rgd: The rgrp to add
- *
- */
-
-static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
-{
-       struct gfs2_sbd *sdp = new_rgd->rd_sbd;
-       struct gfs2_rgrpd *rgd;
-       unsigned int count = 0;
-       unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
-
-       spin_lock(&sdp->sd_rindex_spin);
-
-       list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
-               if (rgd == new_rgd)
-                       goto out;
-
-               if (++count >= max)
-                       goto out;
-       }
-       list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
-
-out:
-       spin_unlock(&sdp->sd_rindex_spin);
-}
-
-/**
  * forward_rgrp_get - get an rgrp to try next from full list
  * @sdp: The GFS2 superblock
  *
@@ -1083,9 +1030,7 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
        int loops = 0;
        int error, rg_locked;
 
-       /* Try recently successful rgrps */
-
-       rgd = recent_rgrp_first(sdp, ip->i_goal);
+       rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
 
        while (rgd) {
                rg_locked = 0;
@@ -1107,11 +1052,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
                                gfs2_glock_dq_uninit(&al->al_rgd_gh);
                        if (inode)
                                return inode;
-                       rgd = recent_rgrp_next(rgd, 1);
-                       break;
-
+                       /* fall through */
                case GLR_TRYFAILED:
-                       rgd = recent_rgrp_next(rgd, 0);
+                       rgd = recent_rgrp_next(rgd);
                        break;
 
                default:
@@ -1170,7 +1113,9 @@ static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
 
 out:
        if (begin) {
-               recent_rgrp_add(rgd);
+               spin_lock(&sdp->sd_rindex_spin);
+               list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
+               spin_unlock(&sdp->sd_rindex_spin);
                rgd = gfs2_rgrpd_get_next(rgd);
                if (!rgd)
                        rgd = gfs2_rgrpd_get_first(sdp);
@@ -1466,7 +1411,7 @@ u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
 
        al->al_alloced += *n;
 
-       gfs2_statfs_change(sdp, 0, -*n, 0);
+       gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
        gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
 
        spin_lock(&sdp->sd_rindex_spin);