*/
#include <linux/blkdev.h>
-#include <linux/config.h>
#include <linux/ctype.h>
#include <linux/device-mapper.h>
+#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <linux/log2.h>
+#include <linux/dm-kcopyd.h>
+#include "dm-exception-store.h"
#include "dm-snap.h"
#include "dm-bio-list.h"
-#include "kcopyd.h"
+
+#define DM_MSG_PREFIX "snapshots"
/*
* The percentage increment we will wake up users at
#define SNAPSHOT_COPY_PRIORITY 2
/*
- * Each snapshot reserves this many pages for io
+ * Reserve 1MB for each snapshot initially (with minimum of 1 page).
*/
-#define SNAPSHOT_PAGES 256
+#define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
+
+/*
+ * The size of the mempool used to track chunks in use.
+ */
+#define MIN_IOS 256
+
+static struct workqueue_struct *ksnapd;
+static void flush_queued_bios(struct work_struct *work);
-struct pending_exception {
- struct exception e;
+struct dm_snap_pending_exception {
+ struct dm_snap_exception e;
/*
* Origin buffers waiting for this to complete are held
struct bio_list snapshot_bios;
/*
- * Other pending_exceptions that are processing this
- * chunk. When this list is empty, we know we can
- * complete the origins.
+ * Short-term queue of pending exceptions prior to submission.
*/
- struct list_head siblings;
+ struct list_head list;
+
+ /*
+ * The primary pending_exception is the one that holds
+ * the ref_count and the list of origin_bios for a
+ * group of pending_exceptions. It is always last to get freed.
+ * These fields get set up when writing to the origin.
+ */
+ struct dm_snap_pending_exception *primary_pe;
+
+ /*
+ * Number of pending_exceptions processing this chunk.
+ * When this drops to zero we must complete the origin bios.
+ * If incrementing or decrementing this, hold pe->snap->lock for
+ * the sibling concerned and not pe->primary_pe->snap->lock unless
+ * they are the same.
+ */
+ atomic_t ref_count;
/* Pointer back to snapshot context */
struct dm_snapshot *snap;
* Hash table mapping origin volumes to lists of snapshots and
* a lock to protect it
*/
-static kmem_cache_t *exception_cache;
-static kmem_cache_t *pending_cache;
-static mempool_t *pending_pool;
+static struct kmem_cache *exception_cache;
+static struct kmem_cache *pending_cache;
+
+struct dm_snap_tracked_chunk {
+ struct hlist_node node;
+ chunk_t chunk;
+};
+
+static struct kmem_cache *tracked_chunk_cache;
+
+static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
+ chunk_t chunk)
+{
+ struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
+ GFP_NOIO);
+ unsigned long flags;
+
+ c->chunk = chunk;
+
+ spin_lock_irqsave(&s->tracked_chunk_lock, flags);
+ hlist_add_head(&c->node,
+ &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
+ spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
+
+ return c;
+}
+
+static void stop_tracking_chunk(struct dm_snapshot *s,
+ struct dm_snap_tracked_chunk *c)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->tracked_chunk_lock, flags);
+ hlist_del(&c->node);
+ spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
+
+ mempool_free(c, s->tracked_chunk_pool);
+}
+
+static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
+{
+ struct dm_snap_tracked_chunk *c;
+ struct hlist_node *hn;
+ int found = 0;
+
+ spin_lock_irq(&s->tracked_chunk_lock);
+
+ hlist_for_each_entry(c, hn,
+ &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
+ if (c->chunk == chunk) {
+ found = 1;
+ break;
+ }
+ }
+
+ spin_unlock_irq(&s->tracked_chunk_lock);
+
+ return found;
+}
/*
* One of these per registered origin, held in the snapshot_origins hash
_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
GFP_KERNEL);
if (!_origins) {
- DMERR("Device mapper: Snapshot: unable to allocate memory");
+ DMERR("unable to allocate memory");
return -ENOMEM;
}
kfree(_origins);
}
-static inline unsigned int origin_hash(struct block_device *bdev)
+static unsigned origin_hash(struct block_device *bdev)
{
return bdev->bd_dev & ORIGIN_MASK;
}
*/
static int register_snapshot(struct dm_snapshot *snap)
{
- struct origin *o;
+ struct origin *o, *new_o;
struct block_device *bdev = snap->origin->bdev;
+ new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
+ if (!new_o)
+ return -ENOMEM;
+
down_write(&_origins_lock);
o = __lookup_origin(bdev);
- if (!o) {
+ if (o)
+ kfree(new_o);
+ else {
/* New origin */
- o = kmalloc(sizeof(*o), GFP_KERNEL);
- if (!o) {
- up_write(&_origins_lock);
- return -ENOMEM;
- }
+ o = new_o;
/* Initialise the struct */
INIT_LIST_HEAD(&o->snapshots);
/*
* Implementation of the exception hash tables.
+ * The lowest hash_shift bits of the chunk number are ignored, allowing
+ * some consecutive chunks to be grouped together.
*/
-static int init_exception_table(struct exception_table *et, uint32_t size)
+static int init_exception_table(struct exception_table *et, uint32_t size,
+ unsigned hash_shift)
{
unsigned int i;
+ et->hash_shift = hash_shift;
et->hash_mask = size - 1;
et->table = dm_vcalloc(size, sizeof(struct list_head));
if (!et->table)
return 0;
}
-static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
+static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{
struct list_head *slot;
- struct exception *ex, *next;
+ struct dm_snap_exception *ex, *next;
int i, size;
size = et->hash_mask + 1;
vfree(et->table);
}
-static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
+static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
{
- return chunk & et->hash_mask;
+ return (chunk >> et->hash_shift) & et->hash_mask;
}
-static void insert_exception(struct exception_table *eh, struct exception *e)
+static void insert_exception(struct exception_table *eh,
+ struct dm_snap_exception *e)
{
struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
list_add(&e->hash_list, l);
}
-static inline void remove_exception(struct exception *e)
+static void remove_exception(struct dm_snap_exception *e)
{
list_del(&e->hash_list);
}
* Return the exception data for a sector, or NULL if not
* remapped.
*/
-static struct exception *lookup_exception(struct exception_table *et,
- chunk_t chunk)
+static struct dm_snap_exception *lookup_exception(struct exception_table *et,
+ chunk_t chunk)
{
struct list_head *slot;
- struct exception *e;
+ struct dm_snap_exception *e;
slot = &et->table[exception_hash(et, chunk)];
list_for_each_entry (e, slot, hash_list)
- if (e->old_chunk == chunk)
+ if (chunk >= e->old_chunk &&
+ chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e;
return NULL;
}
-static inline struct exception *alloc_exception(void)
+static struct dm_snap_exception *alloc_exception(void)
{
- struct exception *e;
+ struct dm_snap_exception *e;
e = kmem_cache_alloc(exception_cache, GFP_NOIO);
if (!e)
return e;
}
-static inline void free_exception(struct exception *e)
+static void free_exception(struct dm_snap_exception *e)
{
kmem_cache_free(exception_cache, e);
}
-static inline struct pending_exception *alloc_pending_exception(void)
+static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
+{
+ struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
+ GFP_NOIO);
+
+ atomic_inc(&s->pending_exceptions_count);
+ pe->snap = s;
+
+ return pe;
+}
+
+static void free_pending_exception(struct dm_snap_pending_exception *pe)
{
- return mempool_alloc(pending_pool, GFP_NOIO);
+ struct dm_snapshot *s = pe->snap;
+
+ mempool_free(pe, s->pending_pool);
+ smp_mb__before_atomic_dec();
+ atomic_dec(&s->pending_exceptions_count);
}
-static inline void free_pending_exception(struct pending_exception *pe)
+static void insert_completed_exception(struct dm_snapshot *s,
+ struct dm_snap_exception *new_e)
{
- mempool_free(pe, pending_pool);
+ struct exception_table *eh = &s->complete;
+ struct list_head *l;
+ struct dm_snap_exception *e = NULL;
+
+ l = &eh->table[exception_hash(eh, new_e->old_chunk)];
+
+ /* Add immediately if this table doesn't support consecutive chunks */
+ if (!eh->hash_shift)
+ goto out;
+
+ /* List is ordered by old_chunk */
+ list_for_each_entry_reverse(e, l, hash_list) {
+ /* Insert after an existing chunk? */
+ if (new_e->old_chunk == (e->old_chunk +
+ dm_consecutive_chunk_count(e) + 1) &&
+ new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
+ dm_consecutive_chunk_count(e) + 1)) {
+ dm_consecutive_chunk_count_inc(e);
+ free_exception(new_e);
+ return;
+ }
+
+ /* Insert before an existing chunk? */
+ if (new_e->old_chunk == (e->old_chunk - 1) &&
+ new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
+ dm_consecutive_chunk_count_inc(e);
+ e->old_chunk--;
+ e->new_chunk--;
+ free_exception(new_e);
+ return;
+ }
+
+ if (new_e->old_chunk > e->old_chunk)
+ break;
+ }
+
+out:
+ list_add(&new_e->hash_list, e ? &e->hash_list : l);
}
-int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
+/*
+ * Callback used by the exception stores to load exceptions when
+ * initialising.
+ */
+static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
- struct exception *e;
+ struct dm_snapshot *s = context;
+ struct dm_snap_exception *e;
e = alloc_exception();
if (!e)
return -ENOMEM;
e->old_chunk = old;
+
+ /* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
- insert_exception(&s->complete, e);
+
+ insert_completed_exception(s, e);
+
return 0;
}
}
/*
- * Rounds a number down to a power of 2.
- */
-static inline uint32_t round_down(uint32_t n)
-{
- while (n & (n - 1))
- n &= (n - 1);
- return n;
-}
-
-/*
* Allocate room for a suitable hash table.
*/
static int init_hash_tables(struct dm_snapshot *s)
hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
hash_size = min(hash_size, max_buckets);
- /* Round it down to a power of 2 */
- hash_size = round_down(hash_size);
- if (init_exception_table(&s->complete, hash_size))
+ hash_size = rounddown_pow_of_two(hash_size);
+ if (init_exception_table(&s->complete, hash_size,
+ DM_CHUNK_CONSECUTIVE_BITS))
return -ENOMEM;
/*
if (hash_size < 64)
hash_size = 64;
- if (init_exception_table(&s->pending, hash_size)) {
+ if (init_exception_table(&s->pending, hash_size, 0)) {
exit_exception_table(&s->complete, exception_cache);
return -ENOMEM;
}
* Round a number up to the nearest 'size' boundary. size must
* be a power of 2.
*/
-static inline ulong round_up(ulong n, ulong size)
+static ulong round_up(ulong n, ulong size)
{
size--;
return (n + size) & ~size;
}
+static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
+ char **error)
+{
+ unsigned long chunk_size;
+ char *value;
+
+ chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
+ if (*chunk_size_arg == '\0' || *value != '\0') {
+ *error = "Invalid chunk size";
+ return -EINVAL;
+ }
+
+ if (!chunk_size) {
+ s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
+ return 0;
+ }
+
+ /*
+ * Chunk size must be multiple of page size. Silently
+ * round up if it's not.
+ */
+ chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
+
+ /* Check chunk_size is a power of 2 */
+ if (!is_power_of_2(chunk_size)) {
+ *error = "Chunk size is not a power of 2";
+ return -EINVAL;
+ }
+
+ /* Validate the chunk size against the device block size */
+ if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
+ *error = "Chunk size is not a multiple of device blocksize";
+ return -EINVAL;
+ }
+
+ s->chunk_size = chunk_size;
+ s->chunk_mask = chunk_size - 1;
+ s->chunk_shift = ffs(chunk_size) - 1;
+
+ return 0;
+}
+
/*
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
*/
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_snapshot *s;
- unsigned long chunk_size;
+ int i;
int r = -EINVAL;
char persistent;
char *origin_path;
char *cow_path;
- char *value;
- int blocksize;
- if (argc < 4) {
- ti->error = "dm-snapshot: requires exactly 4 arguments";
+ if (argc != 4) {
+ ti->error = "requires exactly 4 arguments";
r = -EINVAL;
goto bad1;
}
goto bad1;
}
- chunk_size = simple_strtoul(argv[3], &value, 10);
- if (chunk_size == 0 || value == NULL) {
- ti->error = "Invalid chunk size";
- r = -EINVAL;
- goto bad1;
- }
-
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL) {
ti->error = "Cannot allocate snapshot context private "
goto bad2;
}
- /*
- * Chunk size must be multiple of page size. Silently
- * round up if it's not.
- */
- chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
-
- /* Validate the chunk size against the device block size */
- blocksize = s->cow->bdev->bd_disk->queue->hardsect_size;
- if (chunk_size % (blocksize >> 9)) {
- ti->error = "Chunk size is not a multiple of device blocksize";
- r = -EINVAL;
- goto bad3;
- }
-
- /* Check chunk_size is a power of 2 */
- if (chunk_size & (chunk_size - 1)) {
- ti->error = "Chunk size is not a power of 2";
- r = -EINVAL;
+ r = set_chunk_size(s, argv[3], &ti->error);
+ if (r)
goto bad3;
- }
- s->chunk_size = chunk_size;
- s->chunk_mask = chunk_size - 1;
s->type = persistent;
- s->chunk_shift = ffs(chunk_size) - 1;
s->valid = 1;
- s->have_metadata = 0;
- s->last_percent = 0;
+ s->active = 0;
+ atomic_set(&s->pending_exceptions_count, 0);
init_rwsem(&s->lock);
- s->table = ti->table;
+ spin_lock_init(&s->pe_lock);
+ s->ti = ti;
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
goto bad3;
}
- /*
- * Check the persistent flag - done here because we need the iobuf
- * to check the LV header
- */
s->store.snap = s;
if (persistent == 'P')
- r = dm_create_persistent(&s->store, chunk_size);
+ r = dm_create_persistent(&s->store);
else
- r = dm_create_transient(&s->store, s, blocksize);
+ r = dm_create_transient(&s->store);
if (r) {
ti->error = "Couldn't create exception store";
goto bad4;
}
- r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
+ r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
if (r) {
ti->error = "Could not create kcopyd client";
goto bad5;
}
+ s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
+ if (!s->pending_pool) {
+ ti->error = "Could not allocate mempool for pending exceptions";
+ goto bad6;
+ }
+
+ s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
+ tracked_chunk_cache);
+ if (!s->tracked_chunk_pool) {
+ ti->error = "Could not allocate tracked_chunk mempool for "
+ "tracking reads";
+ goto bad_tracked_chunk_pool;
+ }
+
+ for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
+
+ spin_lock_init(&s->tracked_chunk_lock);
+
+ /* Metadata must only be loaded into one table at once */
+ r = s->store.read_metadata(&s->store, dm_add_exception, (void *)s);
+ if (r < 0) {
+ ti->error = "Failed to read snapshot metadata";
+ goto bad_load_and_register;
+ } else if (r > 0) {
+ s->valid = 0;
+ DMWARN("Snapshot is marked invalid.");
+ }
+
+ bio_list_init(&s->queued_bios);
+ INIT_WORK(&s->queued_bios_work, flush_queued_bios);
+
/* Add snapshot to the list of snapshots for this origin */
+ /* Exceptions aren't triggered till snapshot_resume() is called */
if (register_snapshot(s)) {
r = -EINVAL;
ti->error = "Cannot register snapshot origin";
- goto bad6;
+ goto bad_load_and_register;
}
ti->private = s;
- ti->split_io = chunk_size;
+ ti->split_io = s->chunk_size;
return 0;
+ bad_load_and_register:
+ mempool_destroy(s->tracked_chunk_pool);
+
+ bad_tracked_chunk_pool:
+ mempool_destroy(s->pending_pool);
+
bad6:
- kcopyd_client_destroy(s->kcopyd_client);
+ dm_kcopyd_client_destroy(s->kcopyd_client);
bad5:
s->store.destroy(&s->store);
return r;
}
-static void snapshot_dtr(struct dm_target *ti)
+static void __free_exceptions(struct dm_snapshot *s)
{
- struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
-
- unregister_snapshot(s);
+ dm_kcopyd_client_destroy(s->kcopyd_client);
+ s->kcopyd_client = NULL;
exit_exception_table(&s->pending, pending_cache);
exit_exception_table(&s->complete, exception_cache);
- /* Deallocate memory used */
s->store.destroy(&s->store);
+}
+
+static void snapshot_dtr(struct dm_target *ti)
+{
+#ifdef CONFIG_DM_DEBUG
+ int i;
+#endif
+ struct dm_snapshot *s = ti->private;
+
+ flush_workqueue(ksnapd);
+
+ /* Prevent further origin writes from using this snapshot. */
+ /* After this returns there can be no new kcopyd jobs. */
+ unregister_snapshot(s);
+
+ while (atomic_read(&s->pending_exceptions_count))
+ msleep(1);
+ /*
+ * Ensure instructions in mempool_destroy aren't reordered
+ * before atomic_read.
+ */
+ smp_mb();
+
+#ifdef CONFIG_DM_DEBUG
+ for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
+ BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
+#endif
+
+ mempool_destroy(s->tracked_chunk_pool);
+
+ __free_exceptions(s);
+
+ mempool_destroy(s->pending_pool);
dm_put_device(ti, s->origin);
dm_put_device(ti, s->cow);
- kcopyd_client_destroy(s->kcopyd_client);
+
kfree(s);
}
}
}
+static void flush_queued_bios(struct work_struct *work)
+{
+ struct dm_snapshot *s =
+ container_of(work, struct dm_snapshot, queued_bios_work);
+ struct bio *queued_bios;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->pe_lock, flags);
+ queued_bios = bio_list_get(&s->queued_bios);
+ spin_unlock_irqrestore(&s->pe_lock, flags);
+
+ flush_bios(queued_bios);
+}
+
/*
* Error a list of buffers.
*/
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- bio_io_error(bio, bio->bi_size);
+ bio_io_error(bio);
bio = n;
}
}
-static struct bio *__flush_bios(struct pending_exception *pe)
+static void __invalidate_snapshot(struct dm_snapshot *s, int err)
{
- struct pending_exception *sibling;
+ if (!s->valid)
+ return;
- if (list_empty(&pe->siblings))
- return bio_list_get(&pe->origin_bios);
+ if (err == -EIO)
+ DMERR("Invalidating snapshot: Error reading/writing.");
+ else if (err == -ENOMEM)
+ DMERR("Invalidating snapshot: Unable to allocate exception.");
- sibling = list_entry(pe->siblings.next,
- struct pending_exception, siblings);
+ if (s->store.drop_snapshot)
+ s->store.drop_snapshot(&s->store);
- list_del(&pe->siblings);
+ s->valid = 0;
- /* This is fine as long as kcopyd is single-threaded. If kcopyd
- * becomes multi-threaded, we'll need some locking here.
- */
- bio_list_merge(&sibling->origin_bios, &pe->origin_bios);
+ dm_table_event(s->ti->table);
+}
- return NULL;
+static void get_pending_exception(struct dm_snap_pending_exception *pe)
+{
+ atomic_inc(&pe->ref_count);
}
-static void pending_complete(struct pending_exception *pe, int success)
+static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
{
- struct exception *e;
- struct dm_snapshot *s = pe->snap;
- struct bio *flush = NULL;
-
- if (success) {
- e = alloc_exception();
- if (!e) {
- DMWARN("Unable to allocate exception.");
- down_write(&s->lock);
- s->store.drop_snapshot(&s->store);
- s->valid = 0;
- flush = __flush_bios(pe);
- up_write(&s->lock);
+ struct dm_snap_pending_exception *primary_pe;
+ struct bio *origin_bios = NULL;
- error_bios(bio_list_get(&pe->snapshot_bios));
- goto out;
- }
- *e = pe->e;
+ primary_pe = pe->primary_pe;
- /*
- * Add a proper exception, and remove the
- * in-flight exception from the list.
- */
- down_write(&s->lock);
- insert_exception(&s->complete, e);
- remove_exception(&pe->e);
- flush = __flush_bios(pe);
+ /*
+ * If this pe is involved in a write to the origin and
+ * it is the last sibling to complete then release
+ * the bios for the original write to the origin.
+ */
+ if (primary_pe &&
+ atomic_dec_and_test(&primary_pe->ref_count)) {
+ origin_bios = bio_list_get(&primary_pe->origin_bios);
+ free_pending_exception(primary_pe);
+ }
- /* Submit any pending write bios */
- up_write(&s->lock);
+ /*
+ * Free the pe if it's not linked to an origin write or if
+ * it's not itself a primary pe.
+ */
+ if (!primary_pe || primary_pe != pe)
+ free_pending_exception(pe);
- flush_bios(bio_list_get(&pe->snapshot_bios));
- } else {
+ return origin_bios;
+}
+
+static void pending_complete(struct dm_snap_pending_exception *pe, int success)
+{
+ struct dm_snap_exception *e;
+ struct dm_snapshot *s = pe->snap;
+ struct bio *origin_bios = NULL;
+ struct bio *snapshot_bios = NULL;
+ int error = 0;
+
+ if (!success) {
/* Read/write error - snapshot is unusable */
down_write(&s->lock);
- if (s->valid)
- DMERR("Error reading/writing snapshot");
- s->store.drop_snapshot(&s->store);
- s->valid = 0;
- remove_exception(&pe->e);
- flush = __flush_bios(pe);
- up_write(&s->lock);
+ __invalidate_snapshot(s, -EIO);
+ error = 1;
+ goto out;
+ }
- error_bios(bio_list_get(&pe->snapshot_bios));
+ e = alloc_exception();
+ if (!e) {
+ down_write(&s->lock);
+ __invalidate_snapshot(s, -ENOMEM);
+ error = 1;
+ goto out;
+ }
+ *e = pe->e;
- dm_table_event(s->table);
+ down_write(&s->lock);
+ if (!s->valid) {
+ free_exception(e);
+ error = 1;
+ goto out;
}
+ /*
+ * Check for conflicting reads. This is extremely improbable,
+ * so msleep(1) is sufficient and there is no need for a wait queue.
+ */
+ while (__chunk_is_tracked(s, pe->e.old_chunk))
+ msleep(1);
+
+ /*
+ * Add a proper exception, and remove the
+ * in-flight exception from the list.
+ */
+ insert_completed_exception(s, e);
+
out:
- free_pending_exception(pe);
+ remove_exception(&pe->e);
+ snapshot_bios = bio_list_get(&pe->snapshot_bios);
+ origin_bios = put_pending_exception(pe);
+
+ up_write(&s->lock);
+
+ /* Submit any pending write bios */
+ if (error)
+ error_bios(snapshot_bios);
+ else
+ flush_bios(snapshot_bios);
- if (flush)
- flush_bios(flush);
+ flush_bios(origin_bios);
}
static void commit_callback(void *context, int success)
{
- struct pending_exception *pe = (struct pending_exception *) context;
+ struct dm_snap_pending_exception *pe = context;
+
pending_complete(pe, success);
}
* Called when the copy I/O has finished. kcopyd actually runs
* this code so don't block.
*/
-static void copy_callback(int read_err, unsigned int write_err, void *context)
+static void copy_callback(int read_err, unsigned long write_err, void *context)
{
- struct pending_exception *pe = (struct pending_exception *) context;
+ struct dm_snap_pending_exception *pe = context;
struct dm_snapshot *s = pe->snap;
if (read_err || write_err)
/*
* Dispatches the copy operation to kcopyd.
*/
-static inline void start_copy(struct pending_exception *pe)
+static void start_copy(struct dm_snap_pending_exception *pe)
{
struct dm_snapshot *s = pe->snap;
- struct io_region src, dest;
+ struct dm_io_region src, dest;
struct block_device *bdev = s->origin->bdev;
sector_t dev_size;
dest.count = src.count;
/* Hand over to kcopyd */
- kcopyd_copy(s->kcopyd_client,
+ dm_kcopyd_copy(s->kcopyd_client,
&src, 1, &dest, 0, copy_callback, pe);
}
* NOTE: a write lock must be held on snap->lock before calling
* this.
*/
-static struct pending_exception *
+static struct dm_snap_pending_exception *
__find_pending_exception(struct dm_snapshot *s, struct bio *bio)
{
- struct exception *e;
- struct pending_exception *pe;
+ struct dm_snap_exception *e;
+ struct dm_snap_pending_exception *pe;
chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
/*
e = lookup_exception(&s->pending, chunk);
if (e) {
/* cast the exception to a pending exception */
- pe = container_of(e, struct pending_exception, e);
+ pe = container_of(e, struct dm_snap_pending_exception, e);
+ goto out;
+ }
- } else {
- /*
- * Create a new pending exception, we don't want
- * to hold the lock while we do this.
- */
- up_write(&s->lock);
- pe = alloc_pending_exception();
- down_write(&s->lock);
+ /*
+ * Create a new pending exception, we don't want
+ * to hold the lock while we do this.
+ */
+ up_write(&s->lock);
+ pe = alloc_pending_exception(s);
+ down_write(&s->lock);
- e = lookup_exception(&s->pending, chunk);
- if (e) {
- free_pending_exception(pe);
- pe = container_of(e, struct pending_exception, e);
- } else {
- pe->e.old_chunk = chunk;
- bio_list_init(&pe->origin_bios);
- bio_list_init(&pe->snapshot_bios);
- INIT_LIST_HEAD(&pe->siblings);
- pe->snap = s;
- pe->started = 0;
-
- if (s->store.prepare_exception(&s->store, &pe->e)) {
- free_pending_exception(pe);
- s->valid = 0;
- return NULL;
- }
+ if (!s->valid) {
+ free_pending_exception(pe);
+ return NULL;
+ }
- insert_exception(&s->pending, &pe->e);
- }
+ e = lookup_exception(&s->pending, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ pe = container_of(e, struct dm_snap_pending_exception, e);
+ goto out;
}
+ pe->e.old_chunk = chunk;
+ bio_list_init(&pe->origin_bios);
+ bio_list_init(&pe->snapshot_bios);
+ pe->primary_pe = NULL;
+ atomic_set(&pe->ref_count, 0);
+ pe->started = 0;
+
+ if (s->store.prepare_exception(&s->store, &pe->e)) {
+ free_pending_exception(pe);
+ return NULL;
+ }
+
+ get_pending_exception(pe);
+ insert_exception(&s->pending, &pe->e);
+
+ out:
return pe;
}
-static inline void remap_exception(struct dm_snapshot *s, struct exception *e,
- struct bio *bio)
+static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
+ struct bio *bio, chunk_t chunk)
{
bio->bi_bdev = s->cow->bdev;
- bio->bi_sector = chunk_to_sector(s, e->new_chunk) +
- (bio->bi_sector & s->chunk_mask);
+ bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
+ (chunk - e->old_chunk)) +
+ (bio->bi_sector & s->chunk_mask);
}
static int snapshot_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct exception *e;
- struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
- int r = 1;
+ struct dm_snap_exception *e;
+ struct dm_snapshot *s = ti->private;
+ int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
- struct pending_exception *pe;
+ struct dm_snap_pending_exception *pe = NULL;
chunk = sector_to_chunk(s, bio->bi_sector);
/* Full snapshots are not usable */
+ /* To get here the table must be live so s->active is always set. */
if (!s->valid)
return -EIO;
+ /* FIXME: should only take write lock if we need
+ * to copy an exception */
+ down_write(&s->lock);
+
+ if (!s->valid) {
+ r = -EIO;
+ goto out_unlock;
+ }
+
+ /* If the block is already remapped - use that, else remap it */
+ e = lookup_exception(&s->complete, chunk);
+ if (e) {
+ remap_exception(s, e, bio, chunk);
+ goto out_unlock;
+ }
+
/*
* Write to snapshot - higher level takes care of RW/RO
* flags so we should only get this if we are
* writeable.
*/
if (bio_rw(bio) == WRITE) {
+ pe = __find_pending_exception(s, bio);
+ if (!pe) {
+ __invalidate_snapshot(s, -ENOMEM);
+ r = -EIO;
+ goto out_unlock;
+ }
- /* FIXME: should only take write lock if we need
- * to copy an exception */
- down_write(&s->lock);
+ remap_exception(s, &pe->e, bio, chunk);
+ bio_list_add(&pe->snapshot_bios, bio);
- /* If the block is already remapped - use that, else remap it */
- e = lookup_exception(&s->complete, chunk);
- if (e) {
- remap_exception(s, e, bio);
- up_write(&s->lock);
+ r = DM_MAPIO_SUBMITTED;
- } else {
- pe = __find_pending_exception(s, bio);
-
- if (!pe) {
- if (s->store.drop_snapshot)
- s->store.drop_snapshot(&s->store);
- s->valid = 0;
- r = -EIO;
- up_write(&s->lock);
- } else {
- remap_exception(s, &pe->e, bio);
- bio_list_add(&pe->snapshot_bios, bio);
-
- if (!pe->started) {
- /* this is protected by snap->lock */
- pe->started = 1;
- up_write(&s->lock);
- start_copy(pe);
- } else
- up_write(&s->lock);
- r = 0;
- }
+ if (!pe->started) {
+ /* this is protected by snap->lock */
+ pe->started = 1;
+ up_write(&s->lock);
+ start_copy(pe);
+ goto out;
}
-
} else {
- /*
- * FIXME: this read path scares me because we
- * always use the origin when we have a pending
- * exception. However I can't think of a
- * situation where this is wrong - ejt.
- */
-
- /* Do reads */
- down_read(&s->lock);
-
- /* See if it it has been remapped */
- e = lookup_exception(&s->complete, chunk);
- if (e)
- remap_exception(s, e, bio);
- else
- bio->bi_bdev = s->origin->bdev;
-
- up_read(&s->lock);
+ bio->bi_bdev = s->origin->bdev;
+ map_context->ptr = track_chunk(s, chunk);
}
+ out_unlock:
+ up_write(&s->lock);
+ out:
return r;
}
-static void snapshot_resume(struct dm_target *ti)
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
+ int error, union map_info *map_context)
{
- struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
+ struct dm_snapshot *s = ti->private;
+ struct dm_snap_tracked_chunk *c = map_context->ptr;
- if (s->have_metadata)
- return;
+ if (c)
+ stop_tracking_chunk(s, c);
- if (s->store.read_metadata(&s->store)) {
- down_write(&s->lock);
- s->valid = 0;
- up_write(&s->lock);
- }
+ return 0;
+}
+
+static void snapshot_resume(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
- s->have_metadata = 1;
+ down_write(&s->lock);
+ s->active = 1;
+ up_write(&s->lock);
}
static int snapshot_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
- struct dm_snapshot *snap = (struct dm_snapshot *) ti->private;
+ struct dm_snapshot *snap = ti->private;
switch (type) {
case STATUSTYPE_INFO:
snap->store.fraction_full(&snap->store,
&numerator,
&denominator);
- snprintf(result, maxlen,
- SECTOR_FORMAT "/" SECTOR_FORMAT,
- numerator, denominator);
+ snprintf(result, maxlen, "%llu/%llu",
+ (unsigned long long)numerator,
+ (unsigned long long)denominator);
}
else
snprintf(result, maxlen, "Unknown");
* to make private copies if the output is to
* make sense.
*/
- snprintf(result, maxlen, "%s %s %c " SECTOR_FORMAT,
+ snprintf(result, maxlen, "%s %s %c %llu",
snap->origin->name, snap->cow->name,
- snap->type, snap->chunk_size);
+ snap->type,
+ (unsigned long long)snap->chunk_size);
break;
}
/*-----------------------------------------------------------------
* Origin methods
*---------------------------------------------------------------*/
-static void list_merge(struct list_head *l1, struct list_head *l2)
-{
- struct list_head *l1_n, *l2_p;
-
- l1_n = l1->next;
- l2_p = l2->prev;
-
- l1->next = l2;
- l2->prev = l1;
-
- l2_p->next = l1_n;
- l1_n->prev = l2_p;
-}
-
static int __origin_write(struct list_head *snapshots, struct bio *bio)
{
- int r = 1, first = 1;
+ int r = DM_MAPIO_REMAPPED, first = 0;
struct dm_snapshot *snap;
- struct exception *e;
- struct pending_exception *pe, *last = NULL;
+ struct dm_snap_exception *e;
+ struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
chunk_t chunk;
+ LIST_HEAD(pe_queue);
/* Do all the snapshots on this origin */
list_for_each_entry (snap, snapshots, list) {
- /* Only deal with valid snapshots */
- if (!snap->valid)
- continue;
-
down_write(&snap->lock);
+ /* Only deal with valid and active snapshots */
+ if (!snap->valid || !snap->active)
+ goto next_snapshot;
+
+ /* Nothing to do if writing beyond end of snapshot */
+ if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
+ goto next_snapshot;
+
/*
* Remember, different snapshots can have
* different chunk sizes.
* Check exception table to see if block
* is already remapped in this snapshot
* and trigger an exception if not.
+ *
+ * ref_count is initialised to 1 so pending_complete()
+ * won't destroy the primary_pe while we're inside this loop.
*/
e = lookup_exception(&snap->complete, chunk);
- if (!e) {
- pe = __find_pending_exception(snap, bio);
- if (!pe) {
- snap->store.drop_snapshot(&snap->store);
- snap->valid = 0;
-
- } else {
- if (last)
- list_merge(&pe->siblings,
- &last->siblings);
-
- last = pe;
- r = 0;
+ if (e)
+ goto next_snapshot;
+
+ pe = __find_pending_exception(snap, bio);
+ if (!pe) {
+ __invalidate_snapshot(snap, -ENOMEM);
+ goto next_snapshot;
+ }
+
+ if (!primary_pe) {
+ /*
+ * Either every pe here has same
+ * primary_pe or none has one yet.
+ */
+ if (pe->primary_pe)
+ primary_pe = pe->primary_pe;
+ else {
+ primary_pe = pe;
+ first = 1;
}
+
+ bio_list_add(&primary_pe->origin_bios, bio);
+
+ r = DM_MAPIO_SUBMITTED;
}
+ if (!pe->primary_pe) {
+ pe->primary_pe = primary_pe;
+ get_pending_exception(primary_pe);
+ }
+
+ if (!pe->started) {
+ pe->started = 1;
+ list_add_tail(&pe->list, &pe_queue);
+ }
+
+ next_snapshot:
up_write(&snap->lock);
}
+ if (!primary_pe)
+ return r;
+
/*
- * Now that we have a complete pe list we can start the copying.
+ * If this is the first time we're processing this chunk and
+ * ref_count is now 1 it means all the pending exceptions
+ * got completed while we were in the loop above, so it falls to
+ * us here to remove the primary_pe and submit any origin_bios.
*/
- if (last) {
- pe = last;
- do {
- down_write(&pe->snap->lock);
- if (first)
- bio_list_add(&pe->origin_bios, bio);
- if (!pe->started) {
- pe->started = 1;
- up_write(&pe->snap->lock);
- start_copy(pe);
- } else
- up_write(&pe->snap->lock);
- first = 0;
- pe = list_entry(pe->siblings.next,
- struct pending_exception, siblings);
-
- } while (pe != last);
+
+ if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
+ flush_bios(bio_list_get(&primary_pe->origin_bios));
+ free_pending_exception(primary_pe);
+ /* If we got here, pe_queue is necessarily empty. */
+ return r;
}
+ /*
+ * Now that we have a complete pe list we can start the copying.
+ */
+ list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
+ start_copy(pe);
+
return r;
}
static int do_origin(struct dm_dev *origin, struct bio *bio)
{
struct origin *o;
- int r = 1;
+ int r = DM_MAPIO_REMAPPED;
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
struct dm_dev *dev;
if (argc != 1) {
- ti->error = "dm-origin: incorrect number of arguments";
+ ti->error = "origin: incorrect number of arguments";
return -EINVAL;
}
static void origin_dtr(struct dm_target *ti)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
dm_put_device(ti, dev);
}
static int origin_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
/* Only tell snapshots if this is a write */
- return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : 1;
+ return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
*/
static void origin_resume(struct dm_target *ti)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
struct dm_snapshot *snap;
struct origin *o;
chunk_t chunk_size = 0;
static int origin_status(struct dm_target *ti, status_type_t type, char *result,
unsigned int maxlen)
{
- struct dm_dev *dev = (struct dm_dev *) ti->private;
+ struct dm_dev *dev = ti->private;
switch (type) {
case STATUSTYPE_INFO:
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 0, 1},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 0, 1},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
.map = snapshot_map,
+ .end_io = snapshot_end_io,
.resume = snapshot_resume,
.status = snapshot_status,
};
{
int r;
+ r = dm_exception_store_init();
+ if (r) {
+ DMERR("Failed to initialize exception stores");
+ return r;
+ }
+
r = dm_register_target(&snapshot_target);
if (r) {
DMERR("snapshot target register failed %d", r);
r = dm_register_target(&origin_target);
if (r < 0) {
- DMERR("Device mapper: Origin: register failed %d\n", r);
+ DMERR("Origin target register failed %d", r);
goto bad1;
}
goto bad2;
}
- exception_cache = kmem_cache_create("dm-snapshot-ex",
- sizeof(struct exception),
- __alignof__(struct exception),
- 0, NULL, NULL);
+ exception_cache = KMEM_CACHE(dm_snap_exception, 0);
if (!exception_cache) {
DMERR("Couldn't create exception cache.");
r = -ENOMEM;
goto bad3;
}
- pending_cache =
- kmem_cache_create("dm-snapshot-in",
- sizeof(struct pending_exception),
- __alignof__(struct pending_exception),
- 0, NULL, NULL);
+ pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
if (!pending_cache) {
DMERR("Couldn't create pending cache.");
r = -ENOMEM;
goto bad4;
}
- pending_pool = mempool_create(128, mempool_alloc_slab,
- mempool_free_slab, pending_cache);
- if (!pending_pool) {
- DMERR("Couldn't create pending pool.");
+ tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
+ if (!tracked_chunk_cache) {
+ DMERR("Couldn't create cache to track chunks in use.");
r = -ENOMEM;
goto bad5;
}
+ ksnapd = create_singlethread_workqueue("ksnapd");
+ if (!ksnapd) {
+ DMERR("Failed to create ksnapd workqueue.");
+ r = -ENOMEM;
+ goto bad_pending_pool;
+ }
+
return 0;
- bad5:
+bad_pending_pool:
+ kmem_cache_destroy(tracked_chunk_cache);
+bad5:
kmem_cache_destroy(pending_cache);
- bad4:
+bad4:
kmem_cache_destroy(exception_cache);
- bad3:
+bad3:
exit_origin_hash();
- bad2:
+bad2:
dm_unregister_target(&origin_target);
- bad1:
+bad1:
dm_unregister_target(&snapshot_target);
return r;
}
static void __exit dm_snapshot_exit(void)
{
- int r;
-
- r = dm_unregister_target(&snapshot_target);
- if (r)
- DMERR("snapshot unregister failed %d", r);
+ destroy_workqueue(ksnapd);
- r = dm_unregister_target(&origin_target);
- if (r)
- DMERR("origin unregister failed %d", r);
+ dm_unregister_target(&snapshot_target);
+ dm_unregister_target(&origin_target);
exit_origin_hash();
- mempool_destroy(pending_pool);
kmem_cache_destroy(pending_cache);
kmem_cache_destroy(exception_cache);
+ kmem_cache_destroy(tracked_chunk_cache);
+
+ dm_exception_store_exit();
}
/* Module hooks */