*/
#include <linux/blkdev.h>
-#include <linux/ctype.h>
#include <linux/device-mapper.h>
+#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
+#include <linux/workqueue.h>
-#include "dm-snap.h"
-#include "dm-bio-list.h"
+#include "dm-exception-store.h"
#define DM_MSG_PREFIX "snapshots"
*/
#define MIN_IOS 256
+#define DM_TRACKED_CHUNK_HASH_SIZE 16
+#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
+ (DM_TRACKED_CHUNK_HASH_SIZE - 1))
+
+struct dm_exception_table {
+ uint32_t hash_mask;
+ unsigned hash_shift;
+ struct list_head *table;
+};
+
+struct dm_snapshot {
+ struct rw_semaphore lock;
+
+ struct dm_dev *origin;
+ struct dm_dev *cow;
+
+ struct dm_target *ti;
+
+ /* List of snapshots per Origin */
+ struct list_head list;
+
+ /* You can't use a snapshot if this is 0 (e.g. if full) */
+ int valid;
+
+ /* Origin writes don't trigger exceptions until this is set */
+ int active;
+
+ /* Whether or not owning mapped_device is suspended */
+ int suspended;
+
+ mempool_t *pending_pool;
+
+ atomic_t pending_exceptions_count;
+
+ struct dm_exception_table pending;
+ struct dm_exception_table complete;
+
+ /*
+ * pe_lock protects all pending_exception operations and access
+ * as well as the snapshot_bios list.
+ */
+ spinlock_t pe_lock;
+
+ /* The on disk metadata handler */
+ struct dm_exception_store *store;
+
+ struct dm_kcopyd_client *kcopyd_client;
+
+ /* Queue of snapshot writes for ksnapd to flush */
+ struct bio_list queued_bios;
+ struct work_struct queued_bios_work;
+
+ /* Chunks with outstanding reads */
+ mempool_t *tracked_chunk_pool;
+ spinlock_t tracked_chunk_lock;
+ struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+};
+
+struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
+{
+ return s->cow;
+}
+EXPORT_SYMBOL(dm_snap_cow);
+
static struct workqueue_struct *ksnapd;
static void flush_queued_bios(struct work_struct *work);
+static sector_t chunk_to_sector(struct dm_exception_store *store,
+ chunk_t chunk)
+{
+ return chunk << store->chunk_shift;
+}
+
+static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
+{
+ /*
+ * There is only ever one instance of a particular block
+ * device so we can compare pointers safely.
+ */
+ return lhs == rhs;
+}
+
struct dm_snap_pending_exception {
- struct dm_snap_exception e;
+ struct dm_exception e;
/*
* Origin buffers waiting for this to complete are held
}
/*
+ * This conflicting I/O is extremely improbable in the caller,
+ * so msleep(1) is sufficient and there is no need for a wait queue.
+ */
+static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
+{
+ while (__chunk_is_tracked(s, chunk))
+ msleep(1);
+}
+
+/*
* One of these per registered origin, held in the snapshot_origins hash
*/
struct origin {
}
/*
+ * _origins_lock must be held when calling this function.
+ * Returns number of snapshots registered using the supplied cow device, plus:
+ * snap_src - a snapshot suitable for use as a source of exception handover
+ * snap_dest - a snapshot capable of receiving exception handover.
+ *
+ * Possible return values and states:
+ * 0: NULL, NULL - first new snapshot
+ * 1: snap_src, NULL - normal snapshot
+ * 2: snap_src, snap_dest - waiting for handover
+ * 2: snap_src, NULL - handed over, waiting for old to be deleted
+ * 1: NULL, snap_dest - source got destroyed without handover
+ */
+static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
+ struct dm_snapshot **snap_src,
+ struct dm_snapshot **snap_dest)
+{
+ struct dm_snapshot *s;
+ struct origin *o;
+ int count = 0;
+ int active;
+
+ o = __lookup_origin(snap->origin->bdev);
+ if (!o)
+ goto out;
+
+ list_for_each_entry(s, &o->snapshots, list) {
+ if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
+ continue;
+
+ down_read(&s->lock);
+ active = s->active;
+ up_read(&s->lock);
+
+ if (active) {
+ if (snap_src)
+ *snap_src = s;
+ } else if (snap_dest)
+ *snap_dest = s;
+
+ count++;
+ }
+
+out:
+ return count;
+}
+
+/*
+ * On success, returns 1 if this snapshot is a handover destination,
+ * otherwise returns 0.
+ */
+static int __validate_exception_handover(struct dm_snapshot *snap)
+{
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ /* Does snapshot need exceptions handed over to it? */
+ if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest) == 2) ||
+ snap_dest) {
+ snap->ti->error = "Snapshot cow pairing for exception "
+ "table handover failed";
+ return -EINVAL;
+ }
+
+ /*
+ * If no snap_src was found, snap cannot become a handover
+ * destination.
+ */
+ if (!snap_src)
+ return 0;
+
+ return 1;
+}
+
+static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
+{
+ struct dm_snapshot *l;
+
+ /* Sort the list according to chunk size, largest-first smallest-last */
+ list_for_each_entry(l, &o->snapshots, list)
+ if (l->store->chunk_size < s->store->chunk_size)
+ break;
+ list_add_tail(&s->list, &l->list);
+}
+
+/*
* Make a note of the snapshot and its origin so we can look it
* up when the origin has a write on it.
+ *
+ * Also validate snapshot exception store handovers.
+ * On success, returns 1 if this registration is a handover destination,
+ * otherwise returns 0.
*/
static int register_snapshot(struct dm_snapshot *snap)
{
- struct origin *o, *new_o;
+ struct origin *o, *new_o = NULL;
struct block_device *bdev = snap->origin->bdev;
+ int r = 0;
new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
if (!new_o)
return -ENOMEM;
down_write(&_origins_lock);
- o = __lookup_origin(bdev);
+ r = __validate_exception_handover(snap);
+ if (r < 0) {
+ kfree(new_o);
+ goto out;
+ }
+
+ o = __lookup_origin(bdev);
if (o)
kfree(new_o);
else {
__insert_origin(o);
}
- list_add_tail(&snap->list, &o->snapshots);
+ __insert_snapshot(o, snap);
+
+out:
+ up_write(&_origins_lock);
+
+ return r;
+}
+
+/*
+ * Move snapshot to correct place in list according to chunk size.
+ */
+static void reregister_snapshot(struct dm_snapshot *s)
+{
+ struct block_device *bdev = s->origin->bdev;
+
+ down_write(&_origins_lock);
+
+ list_del(&s->list);
+ __insert_snapshot(__lookup_origin(bdev), s);
up_write(&_origins_lock);
- return 0;
}
static void unregister_snapshot(struct dm_snapshot *s)
o = __lookup_origin(s->origin->bdev);
list_del(&s->list);
- if (list_empty(&o->snapshots)) {
+ if (o && list_empty(&o->snapshots)) {
list_del(&o->hash_list);
kfree(o);
}
* The lowest hash_shift bits of the chunk number are ignored, allowing
* some consecutive chunks to be grouped together.
*/
-static int init_exception_table(struct exception_table *et, uint32_t size,
- unsigned hash_shift)
+static int dm_exception_table_init(struct dm_exception_table *et,
+ uint32_t size, unsigned hash_shift)
{
unsigned int i;
return 0;
}
-static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
+static void dm_exception_table_exit(struct dm_exception_table *et,
+ struct kmem_cache *mem)
{
struct list_head *slot;
- struct dm_snap_exception *ex, *next;
+ struct dm_exception *ex, *next;
int i, size;
size = et->hash_mask + 1;
vfree(et->table);
}
-static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
+static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
{
return (chunk >> et->hash_shift) & et->hash_mask;
}
-static void insert_exception(struct exception_table *eh,
- struct dm_snap_exception *e)
-{
- struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
- list_add(&e->hash_list, l);
-}
-
-static void remove_exception(struct dm_snap_exception *e)
+static void dm_remove_exception(struct dm_exception *e)
{
list_del(&e->hash_list);
}
* Return the exception data for a sector, or NULL if not
* remapped.
*/
-static struct dm_snap_exception *lookup_exception(struct exception_table *et,
- chunk_t chunk)
+static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
+ chunk_t chunk)
{
struct list_head *slot;
- struct dm_snap_exception *e;
+ struct dm_exception *e;
slot = &et->table[exception_hash(et, chunk)];
list_for_each_entry (e, slot, hash_list)
return NULL;
}
-static struct dm_snap_exception *alloc_exception(void)
+static struct dm_exception *alloc_completed_exception(void)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
e = kmem_cache_alloc(exception_cache, GFP_NOIO);
if (!e)
return e;
}
-static void free_exception(struct dm_snap_exception *e)
+static void free_completed_exception(struct dm_exception *e)
{
kmem_cache_free(exception_cache, e);
}
struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
GFP_NOIO);
+ atomic_inc(&s->pending_exceptions_count);
pe->snap = s;
return pe;
static void free_pending_exception(struct dm_snap_pending_exception *pe)
{
- mempool_free(pe, pe->snap->pending_pool);
+ struct dm_snapshot *s = pe->snap;
+
+ mempool_free(pe, s->pending_pool);
+ smp_mb__before_atomic_dec();
+ atomic_dec(&s->pending_exceptions_count);
}
-static void insert_completed_exception(struct dm_snapshot *s,
- struct dm_snap_exception *new_e)
+static void dm_insert_exception(struct dm_exception_table *eh,
+ struct dm_exception *new_e)
{
- struct exception_table *eh = &s->complete;
struct list_head *l;
- struct dm_snap_exception *e = NULL;
+ struct dm_exception *e = NULL;
l = &eh->table[exception_hash(eh, new_e->old_chunk)];
new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
dm_consecutive_chunk_count(e) + 1)) {
dm_consecutive_chunk_count_inc(e);
- free_exception(new_e);
+ free_completed_exception(new_e);
return;
}
dm_consecutive_chunk_count_inc(e);
e->old_chunk--;
e->new_chunk--;
- free_exception(new_e);
+ free_completed_exception(new_e);
return;
}
list_add(&new_e->hash_list, e ? &e->hash_list : l);
}
-int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
+/*
+ * Callback used by the exception stores to load exceptions when
+ * initialising.
+ */
+static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
- struct dm_snap_exception *e;
+ struct dm_snapshot *s = context;
+ struct dm_exception *e;
- e = alloc_exception();
+ e = alloc_completed_exception();
if (!e)
return -ENOMEM;
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
- insert_completed_exception(s, e);
+ dm_insert_exception(&s->complete, e);
return 0;
}
+#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
+
+/*
+ * Return a minimum chunk size of all snapshots that have the specified origin.
+ * Return zero if the origin has no snapshots.
+ */
+static sector_t __minimum_chunk_size(struct origin *o)
+{
+ struct dm_snapshot *snap;
+ unsigned chunk_size = 0;
+
+ if (o)
+ list_for_each_entry(snap, &o->snapshots, list)
+ chunk_size = min_not_zero(chunk_size,
+ snap->store->chunk_size);
+
+ return chunk_size;
+}
+
/*
* Hard coded magic.
*/
origin_dev_size = get_dev_size(s->origin->bdev);
max_buckets = calc_max_buckets();
- hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
+ hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
hash_size = min(hash_size, max_buckets);
+ if (hash_size < 64)
+ hash_size = 64;
hash_size = rounddown_pow_of_two(hash_size);
- if (init_exception_table(&s->complete, hash_size,
- DM_CHUNK_CONSECUTIVE_BITS))
+ if (dm_exception_table_init(&s->complete, hash_size,
+ DM_CHUNK_CONSECUTIVE_BITS))
return -ENOMEM;
/*
if (hash_size < 64)
hash_size = 64;
- if (init_exception_table(&s->pending, hash_size, 0)) {
- exit_exception_table(&s->complete, exception_cache);
+ if (dm_exception_table_init(&s->pending, hash_size, 0)) {
+ dm_exception_table_exit(&s->complete, exception_cache);
return -ENOMEM;
}
}
/*
- * Round a number up to the nearest 'size' boundary. size must
- * be a power of 2.
- */
-static ulong round_up(ulong n, ulong size)
-{
- size--;
- return (n + size) & ~size;
-}
-
-static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
- char **error)
-{
- unsigned long chunk_size;
- char *value;
-
- chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
- if (*chunk_size_arg == '\0' || *value != '\0') {
- *error = "Invalid chunk size";
- return -EINVAL;
- }
-
- if (!chunk_size) {
- s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
- return 0;
- }
-
- /*
- * Chunk size must be multiple of page size. Silently
- * round up if it's not.
- */
- chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
-
- /* Check chunk_size is a power of 2 */
- if (!is_power_of_2(chunk_size)) {
- *error = "Chunk size is not a power of 2";
- return -EINVAL;
- }
-
- /* Validate the chunk size against the device block size */
- if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
- *error = "Chunk size is not a multiple of device blocksize";
- return -EINVAL;
- }
-
- s->chunk_size = chunk_size;
- s->chunk_mask = chunk_size - 1;
- s->chunk_shift = ffs(chunk_size) - 1;
-
- return 0;
-}
-
-/*
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
*/
static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
struct dm_snapshot *s;
int i;
int r = -EINVAL;
- char persistent;
- char *origin_path;
- char *cow_path;
+ char *origin_path, *cow_path;
+ unsigned args_used;
if (argc != 4) {
ti->error = "requires exactly 4 arguments";
r = -EINVAL;
- goto bad1;
+ goto bad;
}
origin_path = argv[0];
- cow_path = argv[1];
- persistent = toupper(*argv[2]);
-
- if (persistent != 'P' && persistent != 'N') {
- ti->error = "Persistent flag is not P or N";
- r = -EINVAL;
- goto bad1;
- }
+ argv++;
+ argc--;
s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s == NULL) {
+ if (!s) {
ti->error = "Cannot allocate snapshot context private "
"structure";
r = -ENOMEM;
- goto bad1;
+ goto bad;
}
- r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
- if (r) {
- ti->error = "Cannot get origin device";
- goto bad2;
- }
+ cow_path = argv[0];
+ argv++;
+ argc--;
r = dm_get_device(ti, cow_path, 0, 0,
FMODE_READ | FMODE_WRITE, &s->cow);
if (r) {
- dm_put_device(ti, s->origin);
ti->error = "Cannot get COW device";
- goto bad2;
+ goto bad_cow;
}
- r = set_chunk_size(s, argv[3], &ti->error);
- if (r)
- goto bad3;
+ r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
+ if (r) {
+ ti->error = "Couldn't create exception store";
+ r = -EINVAL;
+ goto bad_store;
+ }
- s->type = persistent;
+ argv += args_used;
+ argc -= args_used;
+ r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
+ if (r) {
+ ti->error = "Cannot get origin device";
+ goto bad_origin;
+ }
+
+ s->ti = ti;
s->valid = 1;
s->active = 0;
+ s->suspended = 0;
+ atomic_set(&s->pending_exceptions_count, 0);
init_rwsem(&s->lock);
+ INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
- s->ti = ti;
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
ti->error = "Unable to allocate hash table space";
r = -ENOMEM;
- goto bad3;
- }
-
- s->store.snap = s;
-
- if (persistent == 'P')
- r = dm_create_persistent(&s->store);
- else
- r = dm_create_transient(&s->store);
-
- if (r) {
- ti->error = "Couldn't create exception store";
- r = -EINVAL;
- goto bad4;
+ goto bad_hash_tables;
}
r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
if (r) {
ti->error = "Could not create kcopyd client";
- goto bad5;
+ goto bad_kcopyd;
}
s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
if (!s->pending_pool) {
ti->error = "Could not allocate mempool for pending exceptions";
- goto bad6;
+ goto bad_pending_pool;
}
s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
spin_lock_init(&s->tracked_chunk_lock);
- /* Metadata must only be loaded into one table at once */
- r = s->store.read_metadata(&s->store);
- if (r < 0) {
- ti->error = "Failed to read snapshot metadata";
- goto bad_load_and_register;
- } else if (r > 0) {
- s->valid = 0;
- DMWARN("Snapshot is marked invalid.");
- }
-
bio_list_init(&s->queued_bios);
INIT_WORK(&s->queued_bios_work, flush_queued_bios);
+ ti->private = s;
+ ti->num_flush_requests = 1;
+
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
- if (register_snapshot(s)) {
- r = -EINVAL;
- ti->error = "Cannot register snapshot origin";
+ r = register_snapshot(s);
+ if (r == -ENOMEM) {
+ ti->error = "Snapshot origin struct allocation failed";
+ goto bad_load_and_register;
+ } else if (r < 0) {
+ /* invalid handover, register_snapshot has set ti->error */
goto bad_load_and_register;
}
- ti->private = s;
- ti->split_io = s->chunk_size;
+ /*
+ * Metadata must only be loaded into one table at once, so skip this
+ * if metadata will be handed over during resume.
+ * Chunk size will be set during the handover - set it to zero to
+ * ensure it's ignored.
+ */
+ if (r > 0) {
+ s->store->chunk_size = 0;
+ return 0;
+ }
+
+ r = s->store->type->read_metadata(s->store, dm_add_exception,
+ (void *)s);
+ if (r < 0) {
+ ti->error = "Failed to read snapshot metadata";
+ goto bad_read_metadata;
+ } else if (r > 0) {
+ s->valid = 0;
+ DMWARN("Snapshot is marked invalid.");
+ }
+
+ if (!s->store->chunk_size) {
+ ti->error = "Chunk size not set";
+ goto bad_read_metadata;
+ }
+ ti->split_io = s->store->chunk_size;
return 0;
- bad_load_and_register:
+bad_read_metadata:
+ unregister_snapshot(s);
+
+bad_load_and_register:
mempool_destroy(s->tracked_chunk_pool);
- bad_tracked_chunk_pool:
+bad_tracked_chunk_pool:
mempool_destroy(s->pending_pool);
- bad6:
+bad_pending_pool:
dm_kcopyd_client_destroy(s->kcopyd_client);
- bad5:
- s->store.destroy(&s->store);
+bad_kcopyd:
+ dm_exception_table_exit(&s->pending, pending_cache);
+ dm_exception_table_exit(&s->complete, exception_cache);
+
+bad_hash_tables:
+ dm_put_device(ti, s->origin);
- bad4:
- exit_exception_table(&s->pending, pending_cache);
- exit_exception_table(&s->complete, exception_cache);
+bad_origin:
+ dm_exception_store_destroy(s->store);
- bad3:
+bad_store:
dm_put_device(ti, s->cow);
- dm_put_device(ti, s->origin);
- bad2:
+bad_cow:
kfree(s);
- bad1:
+bad:
return r;
}
dm_kcopyd_client_destroy(s->kcopyd_client);
s->kcopyd_client = NULL;
- exit_exception_table(&s->pending, pending_cache);
- exit_exception_table(&s->complete, exception_cache);
+ dm_exception_table_exit(&s->pending, pending_cache);
+ dm_exception_table_exit(&s->complete, exception_cache);
+}
- s->store.destroy(&s->store);
+static void __handover_exceptions(struct dm_snapshot *snap_src,
+ struct dm_snapshot *snap_dest)
+{
+ union {
+ struct dm_exception_table table_swap;
+ struct dm_exception_store *store_swap;
+ } u;
+
+ /*
+ * Swap all snapshot context information between the two instances.
+ */
+ u.table_swap = snap_dest->complete;
+ snap_dest->complete = snap_src->complete;
+ snap_src->complete = u.table_swap;
+
+ u.store_swap = snap_dest->store;
+ snap_dest->store = snap_src->store;
+ snap_src->store = u.store_swap;
+
+ snap_dest->store->snap = snap_dest;
+ snap_src->store->snap = snap_src;
+
+ snap_dest->ti->split_io = snap_dest->store->chunk_size;
+ snap_dest->valid = snap_src->valid;
+
+ /*
+ * Set source invalid to ensure it receives no further I/O.
+ */
+ snap_src->valid = 0;
}
static void snapshot_dtr(struct dm_target *ti)
int i;
#endif
struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
flush_workqueue(ksnapd);
+ down_read(&_origins_lock);
+ /* Check whether exception handover must be cancelled */
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
+ if (snap_src && snap_dest && (s == snap_src)) {
+ down_write(&snap_dest->lock);
+ snap_dest->valid = 0;
+ up_write(&snap_dest->lock);
+ DMERR("Cancelling snapshot handover.");
+ }
+ up_read(&_origins_lock);
+
/* Prevent further origin writes from using this snapshot. */
/* After this returns there can be no new kcopyd jobs. */
unregister_snapshot(s);
+ while (atomic_read(&s->pending_exceptions_count))
+ msleep(1);
+ /*
+ * Ensure instructions in mempool_destroy aren't reordered
+ * before atomic_read.
+ */
+ smp_mb();
+
#ifdef CONFIG_DM_DEBUG
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
mempool_destroy(s->pending_pool);
dm_put_device(ti, s->origin);
+
+ dm_exception_store_destroy(s->store);
+
dm_put_device(ti, s->cow);
kfree(s);
else if (err == -ENOMEM)
DMERR("Invalidating snapshot: Unable to allocate exception.");
- if (s->store.drop_snapshot)
- s->store.drop_snapshot(&s->store);
+ if (s->store->type->drop_snapshot)
+ s->store->type->drop_snapshot(s->store);
s->valid = 0;
static void pending_complete(struct dm_snap_pending_exception *pe, int success)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
struct dm_snapshot *s = pe->snap;
struct bio *origin_bios = NULL;
struct bio *snapshot_bios = NULL;
goto out;
}
- e = alloc_exception();
+ e = alloc_completed_exception();
if (!e) {
down_write(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
down_write(&s->lock);
if (!s->valid) {
- free_exception(e);
+ free_completed_exception(e);
error = 1;
goto out;
}
- /*
- * Check for conflicting reads. This is extremely improbable,
- * so yield() is sufficient and there is no need for a wait queue.
- */
- while (__chunk_is_tracked(s, pe->e.old_chunk))
- yield();
+ /* Check for conflicting reads */
+ __check_for_conflicting_io(s, pe->e.old_chunk);
/*
* Add a proper exception, and remove the
* in-flight exception from the list.
*/
- insert_completed_exception(s, e);
+ dm_insert_exception(&s->complete, e);
out:
- remove_exception(&pe->e);
+ dm_remove_exception(&pe->e);
snapshot_bios = bio_list_get(&pe->snapshot_bios);
origin_bios = put_pending_exception(pe);
else
/* Update the metadata if we are persistent */
- s->store.commit_exception(&s->store, &pe->e, commit_callback,
- pe);
+ s->store->type->commit_exception(s->store, &pe->e,
+ commit_callback, pe);
}
/*
dev_size = get_dev_size(bdev);
src.bdev = bdev;
- src.sector = chunk_to_sector(s, pe->e.old_chunk);
- src.count = min(s->chunk_size, dev_size - src.sector);
+ src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
+ src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
dest.bdev = s->cow->bdev;
- dest.sector = chunk_to_sector(s, pe->e.new_chunk);
+ dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
dest.count = src.count;
/* Hand over to kcopyd */
&src, 1, &dest, 0, copy_callback, pe);
}
+static struct dm_snap_pending_exception *
+__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
+{
+ struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
+
+ if (!e)
+ return NULL;
+
+ return container_of(e, struct dm_snap_pending_exception, e);
+}
+
/*
* Looks to see if this snapshot already has a pending exception
* for this chunk, otherwise it allocates a new one and inserts
* this.
*/
static struct dm_snap_pending_exception *
-__find_pending_exception(struct dm_snapshot *s, struct bio *bio)
+__find_pending_exception(struct dm_snapshot *s,
+ struct dm_snap_pending_exception *pe, chunk_t chunk)
{
- struct dm_snap_exception *e;
- struct dm_snap_pending_exception *pe;
- chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
-
- /*
- * Is there a pending exception for this already ?
- */
- e = lookup_exception(&s->pending, chunk);
- if (e) {
- /* cast the exception to a pending exception */
- pe = container_of(e, struct dm_snap_pending_exception, e);
- goto out;
- }
+ struct dm_snap_pending_exception *pe2;
- /*
- * Create a new pending exception, we don't want
- * to hold the lock while we do this.
- */
- up_write(&s->lock);
- pe = alloc_pending_exception(s);
- down_write(&s->lock);
-
- if (!s->valid) {
+ pe2 = __lookup_pending_exception(s, chunk);
+ if (pe2) {
free_pending_exception(pe);
- return NULL;
- }
-
- e = lookup_exception(&s->pending, chunk);
- if (e) {
- free_pending_exception(pe);
- pe = container_of(e, struct dm_snap_pending_exception, e);
- goto out;
+ return pe2;
}
pe->e.old_chunk = chunk;
atomic_set(&pe->ref_count, 0);
pe->started = 0;
- if (s->store.prepare_exception(&s->store, &pe->e)) {
+ if (s->store->type->prepare_exception(s->store, &pe->e)) {
free_pending_exception(pe);
return NULL;
}
get_pending_exception(pe);
- insert_exception(&s->pending, &pe->e);
+ dm_insert_exception(&s->pending, &pe->e);
- out:
return pe;
}
-static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
+static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk)
{
bio->bi_bdev = s->cow->bdev;
- bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
- (chunk - e->old_chunk)) +
- (bio->bi_sector & s->chunk_mask);
+ bio->bi_sector = chunk_to_sector(s->store,
+ dm_chunk_number(e->new_chunk) +
+ (chunk - e->old_chunk)) +
+ (bio->bi_sector &
+ s->store->chunk_mask);
}
static int snapshot_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context)
{
- struct dm_snap_exception *e;
+ struct dm_exception *e;
struct dm_snapshot *s = ti->private;
int r = DM_MAPIO_REMAPPED;
chunk_t chunk;
struct dm_snap_pending_exception *pe = NULL;
- chunk = sector_to_chunk(s, bio->bi_sector);
+ if (unlikely(bio_empty_barrier(bio))) {
+ bio->bi_bdev = s->cow->bdev;
+ return DM_MAPIO_REMAPPED;
+ }
+
+ chunk = sector_to_chunk(s->store, bio->bi_sector);
/* Full snapshots are not usable */
/* To get here the table must be live so s->active is always set. */
}
/* If the block is already remapped - use that, else remap it */
- e = lookup_exception(&s->complete, chunk);
+ e = dm_lookup_exception(&s->complete, chunk);
if (e) {
remap_exception(s, e, bio, chunk);
goto out_unlock;
* writeable.
*/
if (bio_rw(bio) == WRITE) {
- pe = __find_pending_exception(s, bio);
+ pe = __lookup_pending_exception(s, chunk);
if (!pe) {
- __invalidate_snapshot(s, -ENOMEM);
- r = -EIO;
- goto out_unlock;
+ up_write(&s->lock);
+ pe = alloc_pending_exception(s);
+ down_write(&s->lock);
+
+ if (!s->valid) {
+ free_pending_exception(pe);
+ r = -EIO;
+ goto out_unlock;
+ }
+
+ e = dm_lookup_exception(&s->complete, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ remap_exception(s, e, bio, chunk);
+ goto out_unlock;
+ }
+
+ pe = __find_pending_exception(s, pe, chunk);
+ if (!pe) {
+ __invalidate_snapshot(s, -ENOMEM);
+ r = -EIO;
+ goto out_unlock;
+ }
}
remap_exception(s, &pe->e, bio, chunk);
return 0;
}
+static void snapshot_postsuspend(struct dm_target *ti)
+{
+ struct dm_snapshot *s = ti->private;
+
+ down_write(&s->lock);
+ s->suspended = 1;
+ up_write(&s->lock);
+}
+
+static int snapshot_preresume(struct dm_target *ti)
+{
+ int r = 0;
+ struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
+ if (snap_src && snap_dest) {
+ down_read(&snap_src->lock);
+ if (s == snap_src) {
+ DMERR("Unable to resume snapshot source until "
+ "handover completes.");
+ r = -EINVAL;
+ } else if (!snap_src->suspended) {
+ DMERR("Unable to perform snapshot handover until "
+ "source is suspended.");
+ r = -EINVAL;
+ }
+ up_read(&snap_src->lock);
+ }
+ up_read(&_origins_lock);
+
+ return r;
+}
+
static void snapshot_resume(struct dm_target *ti)
{
struct dm_snapshot *s = ti->private;
+ struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
+
+ down_read(&_origins_lock);
+ (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest);
+ if (snap_src && snap_dest) {
+ down_write(&snap_src->lock);
+ down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
+ __handover_exceptions(snap_src, snap_dest);
+ up_write(&snap_dest->lock);
+ up_write(&snap_src->lock);
+ }
+ up_read(&_origins_lock);
+
+ /* Now we have correct chunk size, reregister */
+ reregister_snapshot(s);
down_write(&s->lock);
s->active = 1;
+ s->suspended = 0;
up_write(&s->lock);
}
static int snapshot_status(struct dm_target *ti, status_type_t type,
char *result, unsigned int maxlen)
{
+ unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
switch (type) {
case STATUSTYPE_INFO:
+
+ down_write(&snap->lock);
+
if (!snap->valid)
- snprintf(result, maxlen, "Invalid");
+ DMEMIT("Invalid");
else {
- if (snap->store.fraction_full) {
- sector_t numerator, denominator;
- snap->store.fraction_full(&snap->store,
- &numerator,
- &denominator);
- snprintf(result, maxlen, "%llu/%llu",
- (unsigned long long)numerator,
- (unsigned long long)denominator);
+ if (snap->store->type->usage) {
+ sector_t total_sectors, sectors_allocated,
+ metadata_sectors;
+ snap->store->type->usage(snap->store,
+ &total_sectors,
+ §ors_allocated,
+ &metadata_sectors);
+ DMEMIT("%llu/%llu %llu",
+ (unsigned long long)sectors_allocated,
+ (unsigned long long)total_sectors,
+ (unsigned long long)metadata_sectors);
}
else
- snprintf(result, maxlen, "Unknown");
+ DMEMIT("Unknown");
}
+
+ up_write(&snap->lock);
+
break;
case STATUSTYPE_TABLE:
* to make private copies if the output is to
* make sense.
*/
- snprintf(result, maxlen, "%s %s %c %llu",
- snap->origin->name, snap->cow->name,
- snap->type,
- (unsigned long long)snap->chunk_size);
+ DMEMIT("%s %s", snap->origin->name, snap->cow->name);
+ snap->store->type->status(snap->store, type, result + sz,
+ maxlen - sz);
break;
}
return 0;
}
+static int snapshot_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct dm_snapshot *snap = ti->private;
+
+ return fn(ti, snap->origin, 0, ti->len, data);
+}
+
+
/*-----------------------------------------------------------------
* Origin methods
*---------------------------------------------------------------*/
-static int __origin_write(struct list_head *snapshots, struct bio *bio)
+
+/*
+ * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
+ * supplied bio was ignored. The caller may submit it immediately.
+ * (No remapping actually occurs as the origin is always a direct linear
+ * map.)
+ *
+ * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
+ * and any supplied bio is added to a list to be submitted once all
+ * the necessary exceptions exist.
+ */
+static int __origin_write(struct list_head *snapshots, sector_t sector,
+ struct bio *bio)
{
int r = DM_MAPIO_REMAPPED, first = 0;
struct dm_snapshot *snap;
- struct dm_snap_exception *e;
+ struct dm_exception *e;
struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
chunk_t chunk;
LIST_HEAD(pe_queue);
goto next_snapshot;
/* Nothing to do if writing beyond end of snapshot */
- if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
+ if (sector >= dm_table_get_size(snap->ti->table))
goto next_snapshot;
/*
* Remember, different snapshots can have
* different chunk sizes.
*/
- chunk = sector_to_chunk(snap, bio->bi_sector);
+ chunk = sector_to_chunk(snap->store, sector);
/*
* Check exception table to see if block
* ref_count is initialised to 1 so pending_complete()
* won't destroy the primary_pe while we're inside this loop.
*/
- e = lookup_exception(&snap->complete, chunk);
+ e = dm_lookup_exception(&snap->complete, chunk);
if (e)
goto next_snapshot;
- pe = __find_pending_exception(snap, bio);
+ pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
- __invalidate_snapshot(snap, -ENOMEM);
- goto next_snapshot;
+ up_write(&snap->lock);
+ pe = alloc_pending_exception(snap);
+ down_write(&snap->lock);
+
+ if (!snap->valid) {
+ free_pending_exception(pe);
+ goto next_snapshot;
+ }
+
+ e = dm_lookup_exception(&snap->complete, chunk);
+ if (e) {
+ free_pending_exception(pe);
+ goto next_snapshot;
+ }
+
+ pe = __find_pending_exception(snap, pe, chunk);
+ if (!pe) {
+ __invalidate_snapshot(snap, -ENOMEM);
+ goto next_snapshot;
+ }
}
if (!primary_pe) {
first = 1;
}
- bio_list_add(&primary_pe->origin_bios, bio);
+ if (bio)
+ bio_list_add(&primary_pe->origin_bios, bio);
r = DM_MAPIO_SUBMITTED;
}
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
- r = __origin_write(&o->snapshots, bio);
+ r = __origin_write(&o->snapshots, bio->bi_sector, bio);
up_read(&_origins_lock);
return r;
}
ti->private = dev;
+ ti->num_flush_requests = 1;
+
return 0;
}
struct dm_dev *dev = ti->private;
bio->bi_bdev = dev->bdev;
+ if (unlikely(bio_empty_barrier(bio)))
+ return DM_MAPIO_REMAPPED;
+
/* Only tell snapshots if this is a write */
return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
}
-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
-
/*
* Set the target "split_io" field to the minimum of all the snapshots'
* chunk sizes.
static void origin_resume(struct dm_target *ti)
{
struct dm_dev *dev = ti->private;
- struct dm_snapshot *snap;
- struct origin *o;
- chunk_t chunk_size = 0;
down_read(&_origins_lock);
- o = __lookup_origin(dev->bdev);
- if (o)
- list_for_each_entry (snap, &o->snapshots, list)
- chunk_size = min_not_zero(chunk_size, snap->chunk_size);
- up_read(&_origins_lock);
- ti->split_io = chunk_size;
+ ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev));
+
+ up_read(&_origins_lock);
}
static int origin_status(struct dm_target *ti, status_type_t type, char *result,
return 0;
}
+static int origin_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct dm_dev *dev = ti->private;
+
+ return fn(ti, dev, 0, ti->len, data);
+}
+
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 6, 0},
+ .version = {1, 7, 0},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
.map = origin_map,
.resume = origin_resume,
.status = origin_status,
+ .iterate_devices = origin_iterate_devices,
};
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 6, 0},
+ .version = {1, 9, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
.map = snapshot_map,
.end_io = snapshot_end_io,
+ .postsuspend = snapshot_postsuspend,
+ .preresume = snapshot_preresume,
.resume = snapshot_resume,
.status = snapshot_status,
+ .iterate_devices = snapshot_iterate_devices,
};
static int __init dm_snapshot_init(void)
{
int r;
+ r = dm_exception_store_init();
+ if (r) {
+ DMERR("Failed to initialize exception stores");
+ return r;
+ }
+
r = dm_register_target(&snapshot_target);
if (r) {
DMERR("snapshot target register failed %d", r);
- return r;
+ goto bad_register_snapshot_target;
}
r = dm_register_target(&origin_target);
goto bad2;
}
- exception_cache = KMEM_CACHE(dm_snap_exception, 0);
+ exception_cache = KMEM_CACHE(dm_exception, 0);
if (!exception_cache) {
DMERR("Couldn't create exception cache.");
r = -ENOMEM;
return 0;
- bad_pending_pool:
+bad_pending_pool:
kmem_cache_destroy(tracked_chunk_cache);
- bad5:
+bad5:
kmem_cache_destroy(pending_cache);
- bad4:
+bad4:
kmem_cache_destroy(exception_cache);
- bad3:
+bad3:
exit_origin_hash();
- bad2:
+bad2:
dm_unregister_target(&origin_target);
- bad1:
+bad1:
dm_unregister_target(&snapshot_target);
+
+bad_register_snapshot_target:
+ dm_exception_store_exit();
return r;
}
static void __exit dm_snapshot_exit(void)
{
- int r;
-
destroy_workqueue(ksnapd);
- r = dm_unregister_target(&snapshot_target);
- if (r)
- DMERR("snapshot unregister failed %d", r);
-
- r = dm_unregister_target(&origin_target);
- if (r)
- DMERR("origin unregister failed %d", r);
+ dm_unregister_target(&snapshot_target);
+ dm_unregister_target(&origin_target);
exit_origin_hash();
kmem_cache_destroy(pending_cache);
kmem_cache_destroy(exception_cache);
kmem_cache_destroy(tracked_chunk_cache);
+
+ dm_exception_store_exit();
}
/* Module hooks */