dm crypt: add plain64 iv
[safe/jmp/linux-2.6] / drivers / md / bitmap.c
index 1b1ef31..60e2b32 100644 (file)
@@ -16,6 +16,7 @@
  * wait if count gets too high, wake when it drops to half.
  */
 
+#include <linux/blkdev.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
@@ -26,8 +27,8 @@
 #include <linux/file.h>
 #include <linux/mount.h>
 #include <linux/buffer_head.h>
-#include <linux/raid/md.h>
-#include <linux/raid/bitmap.h>
+#include "md.h"
+#include "bitmap.h"
 
 /* debug macros */
 
@@ -107,13 +108,16 @@ static void bitmap_free_page(struct bitmap *bitmap, unsigned char *page)
  * allocated while we're using it
  */
 static int bitmap_checkpage(struct bitmap *bitmap, unsigned long page, int create)
+__releases(bitmap->lock)
+__acquires(bitmap->lock)
 {
        unsigned char *mappage;
 
        if (page >= bitmap->pages) {
-               printk(KERN_ALERT
-                       "%s: invalid bitmap page request: %lu (> %lu)\n",
-                       bmname(bitmap), page, bitmap->pages-1);
+               /* This can happen if bitmap_start_sync goes beyond
+                * End-of-device while looking for a whole page.
+                * It is harmless.
+                */
                return -EINVAL;
        }
 
@@ -203,48 +207,35 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page)
  * bitmap file handling - read and write the bitmap file and its superblock
  */
 
-/* copy the pathname of a file to a buffer */
-char *file_path(struct file *file, char *buf, int count)
-{
-       struct dentry *d;
-       struct vfsmount *v;
-
-       if (!buf)
-               return NULL;
-
-       d = file->f_path.dentry;
-       v = file->f_path.mnt;
-
-       buf = d_path(d, v, buf, count);
-
-       return IS_ERR(buf) ? NULL : buf;
-}
-
 /*
  * basic page I/O operations
  */
 
 /* IO operations when bitmap is stored near all superblocks */
-static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index)
+static struct page *read_sb_page(mddev_t *mddev, long offset,
+                                struct page *page,
+                                unsigned long index, int size)
 {
        /* choose a good rdev and read the page from there */
 
        mdk_rdev_t *rdev;
-       struct list_head *tmp;
-       struct page *page = alloc_page(GFP_KERNEL);
        sector_t target;
 
        if (!page)
+               page = alloc_page(GFP_KERNEL);
+       if (!page)
                return ERR_PTR(-ENOMEM);
 
-       ITERATE_RDEV(mddev, rdev, tmp) {
+       list_for_each_entry(rdev, &mddev->disks, same_set) {
                if (! test_bit(In_sync, &rdev->flags)
                    || test_bit(Faulty, &rdev->flags))
                        continue;
 
-               target = (rdev->sb_offset << 1) + offset + index * (PAGE_SIZE/512);
+               target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
 
-               if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) {
+               if (sync_page_io(rdev->bdev, target,
+                                roundup(size, bdev_logical_block_size(rdev->bdev)),
+                                page, READ)) {
                        page->index = index;
                        attach_page_buffers(page, NULL); /* so that free_buffer will
                                                          * quietly no-op */
@@ -255,19 +246,50 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
 
 }
 
+static mdk_rdev_t *next_active_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
+{
+       /* Iterate the disks of an mddev, using rcu to protect access to the
+        * linked list, and raising the refcount of devices we return to ensure
+        * they don't disappear while in use.
+        * As devices are only added or removed when raid_disk is < 0 and
+        * nr_pending is 0 and In_sync is clear, the entries we return will
+        * still be in the same position on the list when we re-enter
+        * list_for_each_continue_rcu.
+        */
+       struct list_head *pos;
+       rcu_read_lock();
+       if (rdev == NULL)
+               /* start at the beginning */
+               pos = &mddev->disks;
+       else {
+               /* release the previous rdev and start from there. */
+               rdev_dec_pending(rdev, mddev);
+               pos = &rdev->same_set;
+       }
+       list_for_each_continue_rcu(pos, &mddev->disks) {
+               rdev = list_entry(pos, mdk_rdev_t, same_set);
+               if (rdev->raid_disk >= 0 &&
+                   !test_bit(Faulty, &rdev->flags)) {
+                       /* this is a usable devices */
+                       atomic_inc(&rdev->nr_pending);
+                       rcu_read_unlock();
+                       return rdev;
+               }
+       }
+       rcu_read_unlock();
+       return NULL;
+}
+
 static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
 {
-       mdk_rdev_t *rdev;
-       struct list_head *tmp;
+       mdk_rdev_t *rdev = NULL;
        mddev_t *mddev = bitmap->mddev;
 
-       ITERATE_RDEV(mddev, rdev, tmp)
-               if (test_bit(In_sync, &rdev->flags)
-                   && !test_bit(Faulty, &rdev->flags)) {
+       while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
                        int size = PAGE_SIZE;
                        if (page->index == bitmap->file_pages-1)
                                size = roundup(bitmap->last_page_size,
-                                              bdev_hardsect_size(rdev->bdev));
+                                              bdev_logical_block_size(rdev->bdev));
                        /* Just make sure we aren't corrupting data or
                         * metadata
                         */
@@ -277,32 +299,35 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
                                    + (long)(page->index * (PAGE_SIZE/512))
                                    + size/512 > 0)
                                        /* bitmap runs in to metadata */
-                                       return -EINVAL;
-                               if (rdev->data_offset + mddev->size*2
-                                   > rdev->sb_offset*2 + bitmap->offset)
+                                       goto bad_alignment;
+                               if (rdev->data_offset + mddev->dev_sectors
+                                   > rdev->sb_start + bitmap->offset)
                                        /* data runs in to bitmap */
-                                       return -EINVAL;
-                       } else if (rdev->sb_offset*2 < rdev->data_offset) {
+                                       goto bad_alignment;
+                       } else if (rdev->sb_start < rdev->data_offset) {
                                /* METADATA BITMAP DATA */
-                               if (rdev->sb_offset*2
+                               if (rdev->sb_start
                                    + bitmap->offset
                                    + page->index*(PAGE_SIZE/512) + size/512
                                    > rdev->data_offset)
                                        /* bitmap runs in to data */
-                                       return -EINVAL;
+                                       goto bad_alignment;
                        } else {
                                /* DATA METADATA BITMAP - no problems */
                        }
                        md_super_write(mddev, rdev,
-                                      (rdev->sb_offset<<1) + bitmap->offset
+                                      rdev->sb_start + bitmap->offset
                                       + page->index * (PAGE_SIZE/512),
                                       size,
                                       page);
-               }
+       }
 
        if (wait)
                md_super_wait(mddev);
        return 0;
+
+ bad_alignment:
+       return -EINVAL;
 }
 
 static void bitmap_file_kick(struct bitmap *bitmap);
@@ -471,8 +496,11 @@ void bitmap_update_sb(struct bitmap *bitmap)
        spin_unlock_irqrestore(&bitmap->lock, flags);
        sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
        sb->events = cpu_to_le64(bitmap->mddev->events);
-       if (!bitmap->mddev->degraded)
-               sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+       if (bitmap->mddev->events < bitmap->events_cleared) {
+               /* rocking back to read-only */
+               bitmap->events_cleared = bitmap->mddev->events;
+               sb->events_cleared = cpu_to_le64(bitmap->events_cleared);
+       }
        kunmap_atomic(sb, KM_USER0);
        write_page(bitmap, bitmap->sb_page, 1);
 }
@@ -522,7 +550,9 @@ static int bitmap_read_sb(struct bitmap *bitmap)
 
                bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes);
        } else {
-               bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0);
+               bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset,
+                                              NULL,
+                                              0, sizeof(bitmap_super_t));
        }
        if (IS_ERR(bitmap->sb_page)) {
                err = PTR_ERR(bitmap->sb_page);
@@ -542,7 +572,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
        else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO ||
                 le32_to_cpu(sb->version) > BITMAP_MAJOR_HI)
                reason = "unrecognized superblock version";
-       else if (chunksize < PAGE_SIZE)
+       else if (chunksize < 512)
                reason = "bitmap chunksize too small";
        else if ((1 << ffz(~chunksize)) != chunksize)
                reason = "bitmap chunksize not a power of 2";
@@ -727,11 +757,13 @@ static void bitmap_file_kick(struct bitmap *bitmap)
                if (bitmap->file) {
                        path = kmalloc(PAGE_SIZE, GFP_KERNEL);
                        if (path)
-                               ptr = file_path(bitmap->file, path, PAGE_SIZE);
+                               ptr = d_path(&bitmap->file->f_path, path,
+                                            PAGE_SIZE);
+
 
                        printk(KERN_ALERT
                              "%s: kicking failed bitmap file %s from array!\n",
-                             bmname(bitmap), ptr ? ptr : "");
+                             bmname(bitmap), IS_ERR(ptr) ? "" : ptr);
 
                        kfree(path);
                } else
@@ -933,11 +965,18 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                                 */
                                page = bitmap->sb_page;
                                offset = sizeof(bitmap_super_t);
+                               if (!file)
+                                       read_sb_page(bitmap->mddev,
+                                                    bitmap->offset,
+                                                    page,
+                                                    index, count);
                        } else if (file) {
                                page = read_page(file, index, bitmap, count);
                                offset = 0;
                        } else {
-                               page = read_sb_page(bitmap->mddev, bitmap->offset, index);
+                               page = read_sb_page(bitmap->mddev, bitmap->offset,
+                                                   NULL,
+                                                   index, count);
                                offset = 0;
                        }
                        if (IS_ERR(page)) { /* read error */
@@ -948,6 +987,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                        oldindex = index;
                        oldpage = page;
 
+                       bitmap->filemap[bitmap->file_pages++] = page;
+                       bitmap->last_page_size = count;
+
                        if (outofdate) {
                                /*
                                 * if bitmap is out of date, dirty the
@@ -960,15 +1002,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                                write_page(bitmap, page, 1);
 
                                ret = -EIO;
-                               if (bitmap->flags & BITMAP_WRITE_ERROR) {
-                                       /* release, page not in filemap yet */
-                                       put_page(page);
+                               if (bitmap->flags & BITMAP_WRITE_ERROR)
                                        goto err;
-                               }
                        }
-
-                       bitmap->filemap[bitmap->file_pages++] = page;
-                       bitmap->last_page_size = count;
                }
                paddr = kmap_atomic(page, KM_USER0);
                if (bitmap->flags & BITMAP_HOSTENDIAN)
@@ -978,9 +1014,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
                kunmap_atomic(paddr, KM_USER0);
                if (b) {
                        /* if the disk bit is set, set the memory bit */
-                       bitmap_set_memory_bits(bitmap, i << CHUNK_BLOCK_SHIFT(bitmap),
-                                              ((i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) >= start)
-                               );
+                       int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
+                                     >= start);
+                       bitmap_set_memory_bits(bitmap,
+                                              (sector_t)i << CHUNK_BLOCK_SHIFT(bitmap),
+                                              needed);
                        bit_cnt++;
                        set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
                }
@@ -1051,17 +1089,21 @@ void bitmap_daemon_work(struct bitmap *bitmap)
        if (bitmap == NULL)
                return;
        if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
-               return;
+               goto done;
+
        bitmap->daemon_lastrun = jiffies;
+       if (bitmap->allclean) {
+               bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+               return;
+       }
+       bitmap->allclean = 1;
 
+       spin_lock_irqsave(&bitmap->lock, flags);
        for (j = 0; j < bitmap->chunks; j++) {
                bitmap_counter_t *bmc;
-               spin_lock_irqsave(&bitmap->lock, flags);
-               if (!bitmap->filemap) {
+               if (!bitmap->filemap)
                        /* error or shutdown */
-                       spin_unlock_irqrestore(&bitmap->lock, flags);
                        break;
-               }
 
                page = filemap_get_page(bitmap, j);
 
@@ -1074,8 +1116,12 @@ void bitmap_daemon_work(struct bitmap *bitmap)
                                        clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
 
                                spin_unlock_irqrestore(&bitmap->lock, flags);
-                               if (need_write)
+                               if (need_write) {
                                        write_page(bitmap, page, 0);
+                                       bitmap->allclean = 0;
+                               }
+                               spin_lock_irqsave(&bitmap->lock, flags);
+                               j |= (PAGE_BITS - 1);
                                continue;
                        }
 
@@ -1092,25 +1138,40 @@ void bitmap_daemon_work(struct bitmap *bitmap)
                        } else
                                spin_unlock_irqrestore(&bitmap->lock, flags);
                        lastpage = page;
-/*
-                       printk("bitmap clean at page %lu\n", j);
-*/
+
+                       /* We are possibly going to clear some bits, so make
+                        * sure that events_cleared is up-to-date.
+                        */
+                       if (bitmap->need_sync) {
+                               bitmap_super_t *sb;
+                               bitmap->need_sync = 0;
+                               sb = kmap_atomic(bitmap->sb_page, KM_USER0);
+                               sb->events_cleared =
+                                       cpu_to_le64(bitmap->events_cleared);
+                               kunmap_atomic(sb, KM_USER0);
+                               write_page(bitmap, bitmap->sb_page, 1);
+                       }
                        spin_lock_irqsave(&bitmap->lock, flags);
                        clear_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
                }
-               bmc = bitmap_get_counter(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
-                                       &blocks, 0);
+               bmc = bitmap_get_counter(bitmap,
+                                        (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
+                                        &blocks, 0);
                if (bmc) {
 /*
   if (j < 100) printk("bitmap: j=%lu, *bmc = 0x%x\n", j, *bmc);
 */
+                       if (*bmc)
+                               bitmap->allclean = 0;
+
                        if (*bmc == 2) {
                                *bmc=1; /* maybe clear the bit next time */
                                set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
                        } else if (*bmc == 1) {
                                /* we can clear the bit */
                                *bmc = 0;
-                               bitmap_count_page(bitmap, j << CHUNK_BLOCK_SHIFT(bitmap),
+                               bitmap_count_page(bitmap,
+                                                 (sector_t)j << CHUNK_BLOCK_SHIFT(bitmap),
                                                  -1);
 
                                /* clear the bit */
@@ -1121,9 +1182,10 @@ void bitmap_daemon_work(struct bitmap *bitmap)
                                        ext2_clear_bit(file_page_offset(j), paddr);
                                kunmap_atomic(paddr, KM_USER0);
                        }
-               }
-               spin_unlock_irqrestore(&bitmap->lock, flags);
+               } else
+                       j |= PAGE_COUNTER_MASK;
        }
+       spin_unlock_irqrestore(&bitmap->lock, flags);
 
        /* now sync the final page */
        if (lastpage != NULL) {
@@ -1138,11 +1200,16 @@ void bitmap_daemon_work(struct bitmap *bitmap)
                }
        }
 
+ done:
+       if (bitmap->allclean == 0)
+               bitmap->mddev->thread->timeout = bitmap->daemon_sleep * HZ;
 }
 
 static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
                                            sector_t offset, int *blocks,
                                            int create)
+__releases(bitmap->lock)
+__acquires(bitmap->lock)
 {
        /* If 'create', we might release the lock and reclaim it.
         * The lock must have been taken with interrupts enabled.
@@ -1217,7 +1284,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
                case 0:
                        bitmap_file_set_bit(bitmap, offset);
                        bitmap_count_page(bitmap,offset, 1);
-                       blk_plug_device(bitmap->mddev->queue);
+                       blk_plug_device_unlocked(bitmap->mddev->queue);
                        /* fall through */
                case 1:
                        *bmc = 2;
@@ -1232,6 +1299,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
                        sectors -= blocks;
                else sectors = 0;
        }
+       bitmap->allclean = 0;
        return 0;
 }
 
@@ -1244,6 +1312,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                PRINTK(KERN_DEBUG "dec write-behind count %d/%d\n",
                  atomic_read(&bitmap->behind_writes), bitmap->max_write_behind);
        }
+       if (bitmap->mddev->degraded)
+               /* Never clear bits or update events_cleared when degraded */
+               success = 0;
 
        while (sectors) {
                int blocks;
@@ -1257,6 +1328,12 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        return;
                }
 
+               if (success &&
+                   bitmap->events_cleared < bitmap->mddev->events) {
+                       bitmap->events_cleared = bitmap->mddev->events;
+                       bitmap->need_sync = 1;
+               }
+
                if (!success && ! (*bmc & NEEDED_MASK))
                        *bmc |= NEEDED_MASK;
 
@@ -1277,8 +1354,8 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
        }
 }
 
-int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
-                       int degraded)
+static int __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
+                              int degraded)
 {
        bitmap_counter_t *bmc;
        int rv;
@@ -1302,6 +1379,30 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
                }
        }
        spin_unlock_irq(&bitmap->lock);
+       bitmap->allclean = 0;
+       return rv;
+}
+
+int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks,
+                     int degraded)
+{
+       /* bitmap_start_sync must always report on multiples of whole
+        * pages, otherwise resync (which is very PAGE_SIZE based) will
+        * get confused.
+        * So call __bitmap_start_sync repeatedly (if needed) until
+        * At least PAGE_SIZE>>9 blocks are covered.
+        * Return the 'or' of the result.
+        */
+       int rv = 0;
+       int blocks1;
+
+       *blocks = 0;
+       while (*blocks < (PAGE_SIZE>>9)) {
+               rv |= __bitmap_start_sync(bitmap, offset,
+                                         &blocks1, degraded);
+               offset += blocks1;
+               *blocks += blocks1;
+       }
        return rv;
 }
 
@@ -1338,6 +1439,7 @@ void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int ab
        }
  unlock:
        spin_unlock_irqrestore(&bitmap->lock, flags);
+       bitmap->allclean = 0;
 }
 
 void bitmap_close_sync(struct bitmap *bitmap)
@@ -1348,16 +1450,43 @@ void bitmap_close_sync(struct bitmap *bitmap)
         */
        sector_t sector = 0;
        int blocks;
-       if (!bitmap) return;
+       if (!bitmap)
+               return;
        while (sector < bitmap->mddev->resync_max_sectors) {
                bitmap_end_sync(bitmap, sector, &blocks, 0);
-/*
-               if (sector < 500) printk("bitmap_close_sync: sec %llu blks %d\n",
-                                        (unsigned long long)sector, blocks);
-*/             sector += blocks;
+               sector += blocks;
        }
 }
 
+void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
+{
+       sector_t s = 0;
+       int blocks;
+
+       if (!bitmap)
+               return;
+       if (sector == 0) {
+               bitmap->last_end_sync = jiffies;
+               return;
+       }
+       if (time_before(jiffies, (bitmap->last_end_sync
+                                 + bitmap->daemon_sleep * HZ)))
+               return;
+       wait_event(bitmap->mddev->recovery_wait,
+                  atomic_read(&bitmap->mddev->recovery_active) == 0);
+
+       bitmap->mddev->curr_resync_completed = bitmap->mddev->curr_resync;
+       set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags);
+       sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
+       s = 0;
+       while (s < sector && s < bitmap->mddev->resync_max_sectors) {
+               bitmap_end_sync(bitmap, s, &blocks, 0);
+               s += blocks;
+       }
+       bitmap->last_end_sync = jiffies;
+       sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
+}
+
 static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
 {
        /* For each chunk covered by any of these sectors, set the
@@ -1381,7 +1510,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
                set_page_attr(bitmap, page, BITMAP_PAGE_CLEAN);
        }
        spin_unlock_irq(&bitmap->lock);
-
+       bitmap->allclean = 0;
 }
 
 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
@@ -1390,7 +1519,7 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
        unsigned long chunk;
 
        for (chunk = s; chunk <= e; chunk++) {
-               sector_t sec = chunk << CHUNK_BLOCK_SHIFT(bitmap);
+               sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
                bitmap_set_memory_bits(bitmap, sec, 1);
                bitmap_file_set_bit(bitmap, sec);
        }
@@ -1466,7 +1595,7 @@ void bitmap_destroy(mddev_t *mddev)
 int bitmap_create(mddev_t *mddev)
 {
        struct bitmap *bitmap;
-       unsigned long blocks = mddev->resync_max_sectors;
+       sector_t blocks = mddev->resync_max_sectors;
        unsigned long chunks;
        unsigned long pages;
        struct file *file = mddev->bitmap_file;
@@ -1495,10 +1624,11 @@ int bitmap_create(mddev_t *mddev)
        bitmap->offset = mddev->bitmap_offset;
        if (file) {
                get_file(file);
-               do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX,
-                                     SYNC_FILE_RANGE_WAIT_BEFORE |
-                                     SYNC_FILE_RANGE_WRITE |
-                                     SYNC_FILE_RANGE_WAIT_AFTER);
+               /* As future accesses to this file will use bmap,
+                * and bypass the page cache, we must sync the file
+                * first.
+                */
+               vfs_fsync(file, file->f_dentry, 1);
        }
        /* read superblock from bitmap file (this sets bitmap->chunksize) */
        err = bitmap_read_sb(bitmap);
@@ -1508,8 +1638,8 @@ int bitmap_create(mddev_t *mddev)
        bitmap->chunkshift = ffz(~bitmap->chunksize);
 
        /* now that chunksize and chunkshift are set, we can use these macros */
-       chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) /
-                       CHUNK_BLOCK_RATIO(bitmap);
+       chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
+                       CHUNK_BLOCK_SHIFT(bitmap);
        pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
 
        BUG_ON(!pages);
@@ -1565,3 +1695,4 @@ EXPORT_SYMBOL(bitmap_start_sync);
 EXPORT_SYMBOL(bitmap_end_sync);
 EXPORT_SYMBOL(bitmap_unplug);
 EXPORT_SYMBOL(bitmap_close_sync);
+EXPORT_SYMBOL(bitmap_cond_end_sync);