ALSA: usb-audio: add support for Akai MPD16
[safe/jmp/linux-2.6] / drivers / md / dm-snap-persistent.c
index 936b34e..c097d8a 100644 (file)
@@ -6,7 +6,6 @@
  */
 
 #include "dm-exception-store.h"
-#include "dm-snap.h"
 
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -56,6 +55,8 @@
  */
 #define SNAPSHOT_DISK_VERSION 1
 
+#define NUM_SNAPSHOT_HDR_CHUNKS 1
+
 struct disk_header {
        uint32_t magic;
 
@@ -89,7 +90,7 @@ struct commit_callback {
  * The top level structure for a persistent exception store.
  */
 struct pstore {
-       struct dm_snapshot *snap;       /* up pointer to my snapshot */
+       struct dm_exception_store *store;
        int version;
        int valid;
        uint32_t exceptions_per_area;
@@ -107,6 +108,13 @@ struct pstore {
        void *zero_area;
 
        /*
+        * An area used for header. The header can be written
+        * concurrently with metadata (when invalidating the snapshot),
+        * so it needs a separate buffer.
+        */
+       void *header_area;
+
+       /*
         * Used to keep track of which metadata area the data in
         * 'chunk' refers to.
         */
@@ -114,7 +122,22 @@ struct pstore {
 
        /*
         * The next free chunk for an exception.
+        *
+        * When creating exceptions, all the chunks here and above are
+        * free.  It holds the next chunk to be allocated.  On rare
+        * occasions (e.g. after a system crash) holes can be left in
+        * the exception store because chunks can be committed out of
+        * order.
+        *
+        * When merging exceptions, it does not necessarily mean all the
+        * chunks here and above are free.  It holds the value it would
+        * have held if all chunks had been committed in order of
+        * allocation.  Consequently the value may occasionally be
+        * slightly too low, but since it's only used for 'status' and
+        * it can never reach its minimum value too early this doesn't
+        * matter.
         */
+
        chunk_t next_free;
 
        /*
@@ -141,7 +164,7 @@ static int alloc_area(struct pstore *ps)
        int r = -ENOMEM;
        size_t len;
 
-       len = ps->snap->chunk_size << SECTOR_SHIFT;
+       len = ps->store->chunk_size << SECTOR_SHIFT;
 
        /*
         * Allocate the chunk_size block of memory that will hold
@@ -149,24 +172,42 @@ static int alloc_area(struct pstore *ps)
         */
        ps->area = vmalloc(len);
        if (!ps->area)
-               return r;
+               goto err_area;
 
        ps->zero_area = vmalloc(len);
-       if (!ps->zero_area) {
-               vfree(ps->area);
-               return r;
-       }
+       if (!ps->zero_area)
+               goto err_zero_area;
        memset(ps->zero_area, 0, len);
 
+       ps->header_area = vmalloc(len);
+       if (!ps->header_area)
+               goto err_header_area;
+
        return 0;
+
+err_header_area:
+       vfree(ps->zero_area);
+
+err_zero_area:
+       vfree(ps->area);
+
+err_area:
+       return r;
 }
 
 static void free_area(struct pstore *ps)
 {
-       vfree(ps->area);
+       if (ps->area)
+               vfree(ps->area);
        ps->area = NULL;
-       vfree(ps->zero_area);
+
+       if (ps->zero_area)
+               vfree(ps->zero_area);
        ps->zero_area = NULL;
+
+       if (ps->header_area)
+               vfree(ps->header_area);
+       ps->header_area = NULL;
 }
 
 struct mdata_req {
@@ -186,17 +227,18 @@ static void do_metadata(struct work_struct *work)
 /*
  * Read or write a chunk aligned and sized block of data from a device.
  */
-static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
+static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
+                   int metadata)
 {
        struct dm_io_region where = {
-               .bdev = ps->snap->cow->bdev,
-               .sector = ps->snap->chunk_size * chunk,
-               .count = ps->snap->chunk_size,
+               .bdev = dm_snap_cow(ps->store->snap)->bdev,
+               .sector = ps->store->chunk_size * chunk,
+               .count = ps->store->chunk_size,
        };
        struct dm_io_request io_req = {
                .bi_rw = rw,
                .mem.type = DM_IO_VMA,
-               .mem.ptr.vma = ps->area,
+               .mem.ptr.vma = area,
                .client = ps->io_client,
                .notify.fn = NULL,
        };
@@ -212,7 +254,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
         * Issue the synchronous I/O from a different thread
         * to avoid generic_make_request recursion.
         */
-       INIT_WORK(&req.work, do_metadata);
+       INIT_WORK_ON_STACK(&req.work, do_metadata);
        queue_work(ps->metadata_wq, &req.work);
        flush_workqueue(ps->metadata_wq);
 
@@ -238,7 +280,7 @@ static int area_io(struct pstore *ps, int rw)
 
        chunk = area_location(ps, ps->current_area);
 
-       r = chunk_io(ps, chunk, rw, 0);
+       r = chunk_io(ps, ps->area, chunk, rw, 0);
        if (r)
                return r;
 
@@ -247,46 +289,36 @@ static int area_io(struct pstore *ps, int rw)
 
 static void zero_memory_area(struct pstore *ps)
 {
-       memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
+       memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 }
 
 static int zero_disk_area(struct pstore *ps, chunk_t area)
 {
-       struct dm_io_region where = {
-               .bdev = ps->snap->cow->bdev,
-               .sector = ps->snap->chunk_size * area_location(ps, area),
-               .count = ps->snap->chunk_size,
-       };
-       struct dm_io_request io_req = {
-               .bi_rw = WRITE,
-               .mem.type = DM_IO_VMA,
-               .mem.ptr.vma = ps->zero_area,
-               .client = ps->io_client,
-               .notify.fn = NULL,
-       };
-
-       return dm_io(&io_req, 1, &where, NULL);
+       return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 }
 
 static int read_header(struct pstore *ps, int *new_snapshot)
 {
        int r;
        struct disk_header *dh;
-       chunk_t chunk_size;
+       unsigned chunk_size;
        int chunk_size_supplied = 1;
+       char *chunk_err;
 
        /*
-        * Use default chunk size (or hardsect_size, if larger) if none supplied
+        * Use default chunk size (or logical_block_size, if larger)
+        * if none supplied
         */
-       if (!ps->snap->chunk_size) {
-               ps->snap->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
-                   bdev_hardsect_size(ps->snap->cow->bdev) >> 9);
-               ps->snap->chunk_mask = ps->snap->chunk_size - 1;
-               ps->snap->chunk_shift = ffs(ps->snap->chunk_size) - 1;
+       if (!ps->store->chunk_size) {
+               ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
+                   bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
+                                           bdev) >> 9);
+               ps->store->chunk_mask = ps->store->chunk_size - 1;
+               ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
                chunk_size_supplied = 0;
        }
 
-       ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
+       ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
                                                             chunk_size));
        if (IS_ERR(ps->io_client))
                return PTR_ERR(ps->io_client);
@@ -295,11 +327,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
        if (r)
                return r;
 
-       r = chunk_io(ps, 0, READ, 1);
+       r = chunk_io(ps, ps->header_area, 0, READ, 1);
        if (r)
                goto bad;
 
-       dh = (struct disk_header *) ps->area;
+       dh = ps->header_area;
 
        if (le32_to_cpu(dh->magic) == 0) {
                *new_snapshot = 1;
@@ -317,22 +349,26 @@ static int read_header(struct pstore *ps, int *new_snapshot)
        ps->version = le32_to_cpu(dh->version);
        chunk_size = le32_to_cpu(dh->chunk_size);
 
-       if (!chunk_size_supplied || ps->snap->chunk_size == chunk_size)
+       if (ps->store->chunk_size == chunk_size)
                return 0;
 
-       DMWARN("chunk size %llu in device metadata overrides "
-              "table chunk size of %llu.",
-              (unsigned long long)chunk_size,
-              (unsigned long long)ps->snap->chunk_size);
+       if (chunk_size_supplied)
+               DMWARN("chunk size %u in device metadata overrides "
+                      "table chunk size of %u.",
+                      chunk_size, ps->store->chunk_size);
 
        /* We had a bogus chunk_size. Fix stuff up. */
        free_area(ps);
 
-       ps->snap->chunk_size = chunk_size;
-       ps->snap->chunk_mask = chunk_size - 1;
-       ps->snap->chunk_shift = ffs(chunk_size) - 1;
+       r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
+                                             &chunk_err);
+       if (r) {
+               DMERR("invalid on-disk chunk size %u: %s.",
+                     chunk_size, chunk_err);
+               return r;
+       }
 
-       r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
+       r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
                                ps->io_client);
        if (r)
                return r;
@@ -349,15 +385,15 @@ static int write_header(struct pstore *ps)
 {
        struct disk_header *dh;
 
-       memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
+       memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 
-       dh = (struct disk_header *) ps->area;
+       dh = ps->header_area;
        dh->magic = cpu_to_le32(SNAP_MAGIC);
        dh->valid = cpu_to_le32(ps->valid);
        dh->version = cpu_to_le32(ps->version);
-       dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
+       dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
 
-       return chunk_io(ps, 0, WRITE, 1);
+       return chunk_io(ps, ps->header_area, 0, WRITE, 1);
 }
 
 /*
@@ -390,6 +426,15 @@ static void write_exception(struct pstore *ps,
        e->new_chunk = cpu_to_le64(de->new_chunk);
 }
 
+static void clear_exception(struct pstore *ps, uint32_t index)
+{
+       struct disk_exception *e = get_exception(ps, index);
+
+       /* clear it */
+       e->old_chunk = 0;
+       e->new_chunk = 0;
+}
+
 /*
  * Registers the exceptions that are present in the current area.
  * 'full' is filled in to indicate if the area has been
@@ -471,21 +516,40 @@ static struct pstore *get_info(struct dm_exception_store *store)
        return (struct pstore *) store->context;
 }
 
-static void persistent_fraction_full(struct dm_exception_store *store,
-                                    sector_t *numerator, sector_t *denominator)
+static void persistent_usage(struct dm_exception_store *store,
+                            sector_t *total_sectors,
+                            sector_t *sectors_allocated,
+                            sector_t *metadata_sectors)
 {
-       *numerator = get_info(store)->next_free * store->snap->chunk_size;
-       *denominator = get_dev_size(store->snap->cow->bdev);
+       struct pstore *ps = get_info(store);
+
+       *sectors_allocated = ps->next_free * store->chunk_size;
+       *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
+
+       /*
+        * First chunk is the fixed header.
+        * Then there are (ps->current_area + 1) metadata chunks, each one
+        * separated from the next by ps->exceptions_per_area data chunks.
+        */
+       *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
+                           store->chunk_size;
 }
 
-static void persistent_destroy(struct dm_exception_store *store)
+static void persistent_dtr(struct dm_exception_store *store)
 {
        struct pstore *ps = get_info(store);
 
        destroy_workqueue(ps->metadata_wq);
-       dm_io_client_destroy(ps->io_client);
-       vfree(ps->callbacks);
+
+       /* Created in read_header */
+       if (ps->io_client)
+               dm_io_client_destroy(ps->io_client);
        free_area(ps);
+
+       /* Allocated in persistent_read_metadata */
+       if (ps->callbacks)
+               vfree(ps->callbacks);
+
        kfree(ps);
 }
 
@@ -507,7 +571,7 @@ static int persistent_read_metadata(struct dm_exception_store *store,
        /*
         * Now we know correct chunk_size, complete the initialisation.
         */
-       ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
+       ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
                                  sizeof(struct disk_exception);
        ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
                        sizeof(*ps->callbacks));
@@ -527,47 +591,43 @@ static int persistent_read_metadata(struct dm_exception_store *store,
                ps->current_area = 0;
                zero_memory_area(ps);
                r = zero_disk_area(ps, 0);
-               if (r) {
+               if (r)
                        DMWARN("zero_disk_area(0) failed");
-                       return r;
-               }
-       } else {
-               /*
-                * Sanity checks.
-                */
-               if (ps->version != SNAPSHOT_DISK_VERSION) {
-                       DMWARN("unable to handle snapshot disk version %d",
-                              ps->version);
-                       return -EINVAL;
-               }
+               return r;
+       }
+       /*
+        * Sanity checks.
+        */
+       if (ps->version != SNAPSHOT_DISK_VERSION) {
+               DMWARN("unable to handle snapshot disk version %d",
+                      ps->version);
+               return -EINVAL;
+       }
 
-               /*
-                * Metadata are valid, but snapshot is invalidated
-                */
-               if (!ps->valid)
-                       return 1;
+       /*
+        * Metadata are valid, but snapshot is invalidated
+        */
+       if (!ps->valid)
+               return 1;
 
-               /*
-                * Read the metadata.
-                */
-               r = read_exceptions(ps, callback, callback_context);
-               if (r)
-                       return r;
-       }
+       /*
+        * Read the metadata.
+        */
+       r = read_exceptions(ps, callback, callback_context);
 
-       return 0;
+       return r;
 }
 
 static int persistent_prepare_exception(struct dm_exception_store *store,
-                                       struct dm_snap_exception *e)
+                                       struct dm_exception *e)
 {
        struct pstore *ps = get_info(store);
        uint32_t stride;
        chunk_t next_free;
-       sector_t size = get_dev_size(store->snap->cow->bdev);
+       sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 
        /* Is there enough room ? */
-       if (size < ((ps->next_free + 1) * store->snap->chunk_size))
+       if (size < ((ps->next_free + 1) * store->chunk_size))
                return -ENOSPC;
 
        e->new_chunk = ps->next_free;
@@ -586,7 +646,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
 }
 
 static void persistent_commit_exception(struct dm_exception_store *store,
-                                       struct dm_snap_exception *e,
+                                       struct dm_exception *e,
                                        void (*callback) (void *, int success),
                                        void *callback_context)
 {
@@ -627,7 +687,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        /*
         * Commit exceptions to disk.
         */
-       if (ps->valid && area_io(ps, WRITE))
+       if (ps->valid && area_io(ps, WRITE_BARRIER))
                ps->valid = 0;
 
        /*
@@ -647,6 +707,85 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        ps->callback_count = 0;
 }
 
+static int persistent_prepare_merge(struct dm_exception_store *store,
+                                   chunk_t *last_old_chunk,
+                                   chunk_t *last_new_chunk)
+{
+       struct pstore *ps = get_info(store);
+       struct disk_exception de;
+       int nr_consecutive;
+       int r;
+
+       /*
+        * When current area is empty, move back to preceding area.
+        */
+       if (!ps->current_committed) {
+               /*
+                * Have we finished?
+                */
+               if (!ps->current_area)
+                       return 0;
+
+               ps->current_area--;
+               r = area_io(ps, READ);
+               if (r < 0)
+                       return r;
+               ps->current_committed = ps->exceptions_per_area;
+       }
+
+       read_exception(ps, ps->current_committed - 1, &de);
+       *last_old_chunk = de.old_chunk;
+       *last_new_chunk = de.new_chunk;
+
+       /*
+        * Find number of consecutive chunks within the current area,
+        * working backwards.
+        */
+       for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
+            nr_consecutive++) {
+               read_exception(ps, ps->current_committed - 1 - nr_consecutive,
+                              &de);
+               if (de.old_chunk != *last_old_chunk - nr_consecutive ||
+                   de.new_chunk != *last_new_chunk - nr_consecutive)
+                       break;
+       }
+
+       return nr_consecutive;
+}
+
+static int persistent_commit_merge(struct dm_exception_store *store,
+                                  int nr_merged)
+{
+       int r, i;
+       struct pstore *ps = get_info(store);
+
+       BUG_ON(nr_merged > ps->current_committed);
+
+       for (i = 0; i < nr_merged; i++)
+               clear_exception(ps, ps->current_committed - 1 - i);
+
+       r = area_io(ps, WRITE);
+       if (r < 0)
+               return r;
+
+       ps->current_committed -= nr_merged;
+
+       /*
+        * At this stage, only persistent_usage() uses ps->next_free, so
+        * we make no attempt to keep ps->next_free strictly accurate
+        * as exceptions may have been committed out-of-order originally.
+        * Once a snapshot has become merging, we set it to the value it
+        * would have held had all the exceptions been committed in order.
+        *
+        * ps->current_area does not get reduced by prepare_merge() until
+        * after commit_merge() has removed the nr_merged previous exceptions.
+        */
+       ps->next_free = (area_location(ps, ps->current_area) - 1) +
+                       (ps->current_committed + 1) + NUM_SNAPSHOT_HDR_CHUNKS;
+
+       return 0;
+}
+
 static void persistent_drop_snapshot(struct dm_exception_store *store)
 {
        struct pstore *ps = get_info(store);
@@ -656,20 +795,23 @@ static void persistent_drop_snapshot(struct dm_exception_store *store)
                DMWARN("write header failed");
 }
 
-int dm_create_persistent(struct dm_exception_store *store)
+static int persistent_ctr(struct dm_exception_store *store,
+                         unsigned argc, char **argv)
 {
        struct pstore *ps;
 
        /* allocate the pstore */
-       ps = kmalloc(sizeof(*ps), GFP_KERNEL);
+       ps = kzalloc(sizeof(*ps), GFP_KERNEL);
        if (!ps)
                return -ENOMEM;
 
-       ps->snap = store->snap;
+       ps->store = store;
        ps->valid = 1;
        ps->version = SNAPSHOT_DISK_VERSION;
        ps->area = NULL;
-       ps->next_free = 2;      /* skipping the header and first area */
+       ps->zero_area = NULL;
+       ps->header_area = NULL;
+       ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
        ps->current_committed = 0;
 
        ps->callback_count = 0;
@@ -683,22 +825,80 @@ int dm_create_persistent(struct dm_exception_store *store)
                return -ENOMEM;
        }
 
-       store->destroy = persistent_destroy;
-       store->read_metadata = persistent_read_metadata;
-       store->prepare_exception = persistent_prepare_exception;
-       store->commit_exception = persistent_commit_exception;
-       store->drop_snapshot = persistent_drop_snapshot;
-       store->fraction_full = persistent_fraction_full;
        store->context = ps;
 
        return 0;
 }
 
+static unsigned persistent_status(struct dm_exception_store *store,
+                                 status_type_t status, char *result,
+                                 unsigned maxlen)
+{
+       unsigned sz = 0;
+
+       switch (status) {
+       case STATUSTYPE_INFO:
+               break;
+       case STATUSTYPE_TABLE:
+               DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
+       }
+
+       return sz;
+}
+
+static struct dm_exception_store_type _persistent_type = {
+       .name = "persistent",
+       .module = THIS_MODULE,
+       .ctr = persistent_ctr,
+       .dtr = persistent_dtr,
+       .read_metadata = persistent_read_metadata,
+       .prepare_exception = persistent_prepare_exception,
+       .commit_exception = persistent_commit_exception,
+       .prepare_merge = persistent_prepare_merge,
+       .commit_merge = persistent_commit_merge,
+       .drop_snapshot = persistent_drop_snapshot,
+       .usage = persistent_usage,
+       .status = persistent_status,
+};
+
+static struct dm_exception_store_type _persistent_compat_type = {
+       .name = "P",
+       .module = THIS_MODULE,
+       .ctr = persistent_ctr,
+       .dtr = persistent_dtr,
+       .read_metadata = persistent_read_metadata,
+       .prepare_exception = persistent_prepare_exception,
+       .commit_exception = persistent_commit_exception,
+       .prepare_merge = persistent_prepare_merge,
+       .commit_merge = persistent_commit_merge,
+       .drop_snapshot = persistent_drop_snapshot,
+       .usage = persistent_usage,
+       .status = persistent_status,
+};
+
 int dm_persistent_snapshot_init(void)
 {
-       return 0;
+       int r;
+
+       r = dm_exception_store_type_register(&_persistent_type);
+       if (r) {
+               DMERR("Unable to register persistent exception store type");
+               return r;
+       }
+
+       r = dm_exception_store_type_register(&_persistent_compat_type);
+       if (r) {
+               DMERR("Unable to register old-style persistent exception "
+                     "store type");
+               dm_exception_store_type_unregister(&_persistent_type);
+               return r;
+       }
+
+       return r;
 }
 
 void dm_persistent_snapshot_exit(void)
 {
+       dm_exception_store_type_unregister(&_persistent_type);
+       dm_exception_store_type_unregister(&_persistent_compat_type);
 }