#include <linux/time.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/log2.h>
#define DM_MSG_PREFIX "raid1"
+#define DM_IO_PAGES 64
+
+#define DM_RAID1_HANDLE_ERRORS 0x01
+#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
struct list_head clean_regions;
struct list_head quiesced_regions;
struct list_head recovered_regions;
+ struct list_head failed_recovered_regions;
};
enum {
* Mirror set structures.
*---------------------------------------------------------------*/
struct mirror {
+ struct mirror_set *ms;
atomic_t error_count;
struct dm_dev *dev;
sector_t offset;
struct list_head list;
struct region_hash rh;
struct kcopyd_client *kcopyd_client;
+ uint64_t features;
spinlock_t lock; /* protects the next two lists */
struct bio_list reads;
struct bio_list writes;
+ struct dm_io_client *io_client;
+
/* recovery */
region_t nr_regions;
int in_sync;
+ int log_failure;
struct mirror *default_mirror; /* Default mirror */
INIT_LIST_HEAD(&rh->clean_regions);
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
+ INIT_LIST_HEAD(&rh->failed_recovered_regions);
rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
sizeof(struct region));
LIST_HEAD(clean);
LIST_HEAD(recovered);
+ LIST_HEAD(failed_recovered);
/*
* Quickly grab the lists.
list_splice(&rh->clean_regions, &clean);
INIT_LIST_HEAD(&rh->clean_regions);
- list_for_each_entry (reg, &clean, list) {
- rh->log->type->clear_region(rh->log, reg->key);
+ list_for_each_entry(reg, &clean, list)
list_del(®->hash_list);
- }
}
if (!list_empty(&rh->recovered_regions)) {
list_for_each_entry (reg, &recovered, list)
list_del(®->hash_list);
}
+
+ if (!list_empty(&rh->failed_recovered_regions)) {
+ list_splice(&rh->failed_recovered_regions, &failed_recovered);
+ INIT_LIST_HEAD(&rh->failed_recovered_regions);
+
+ list_for_each_entry(reg, &failed_recovered, list)
+ list_del(®->hash_list);
+ }
+
spin_unlock(&rh->region_lock);
write_unlock_irq(&rh->hash_lock);
mempool_free(reg, rh->region_pool);
}
- if (!list_empty(&recovered))
- rh->log->type->flush(rh->log);
+ list_for_each_entry_safe(reg, next, &failed_recovered, list) {
+ complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
+ mempool_free(reg, rh->region_pool);
+ }
- list_for_each_entry_safe (reg, next, &clean, list)
+ list_for_each_entry_safe(reg, next, &clean, list) {
+ rh->log->type->clear_region(rh->log, reg->key);
mempool_free(reg, rh->region_pool);
+ }
+
+ rh->log->type->flush(rh->log);
}
static void rh_inc(struct region_hash *rh, region_t region)
return reg;
}
-/* FIXME: success ignored for now */
static void rh_recovery_end(struct region *reg, int success)
{
struct region_hash *rh = reg->rh;
spin_lock_irq(&rh->region_lock);
- list_add(®->list, ®->rh->recovered_regions);
+ if (success)
+ list_add(®->list, ®->rh->recovered_regions);
+ else {
+ reg->state = RH_NOSYNC;
+ list_add(®->list, ®->rh->failed_recovered_regions);
+ }
spin_unlock_irq(&rh->region_lock);
wake(rh->ms);
}
-static void rh_flush(struct region_hash *rh)
+static int rh_flush(struct region_hash *rh)
{
- rh->log->type->flush(rh->log);
+ return rh->log->type->flush(rh->log);
}
static void rh_delay(struct region_hash *rh, struct bio *bio)
{
struct region *reg = (struct region *) context;
- /* FIXME: better error handling */
+ if (read_err)
+ /* Read error means the failure of default mirror. */
+ DMERR_LIMIT("Unable to read primary mirror during recovery");
+
+ if (write_err)
+ DMERR_LIMIT("Write error during recovery (error = 0x%x)",
+ write_err);
+
rh_recovery_end(reg, !(read_err || write_err));
}
/*
* We can only read balance if the region is in sync.
*/
- if (rh_in_sync(&ms->rh, region, 0))
+ if (rh_in_sync(&ms->rh, region, 1))
m = choose_mirror(ms, bio->bi_sector);
else
m = ms->default_mirror;
break;
}
}
- bio_endio(bio, bio->bi_size, 0);
+ bio_endio(bio, 0);
}
static void do_write(struct mirror_set *ms, struct bio *bio)
unsigned int i;
struct io_region io[KCOPYD_MAX_REGIONS+1];
struct mirror *m;
+ struct dm_io_request io_req = {
+ .bi_rw = WRITE,
+ .mem.type = DM_IO_BVEC,
+ .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
+ .notify.fn = write_callback,
+ .notify.context = bio,
+ .client = ms->io_client,
+ };
for (i = 0; i < ms->nr_mirrors; i++) {
m = ms->mirror + i;
}
bio_set_ms(bio, ms);
- dm_io_async_bvec(ms->nr_mirrors, io, WRITE,
- bio->bi_io_vec + bio->bi_idx,
- write_callback, bio);
+
+ (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
*/
rh_inc_pending(&ms->rh, &sync);
rh_inc_pending(&ms->rh, &nosync);
- rh_flush(&ms->rh);
+ ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
/*
* Dispatch io.
*/
- while ((bio = bio_list_pop(&sync)))
+ if (unlikely(ms->log_failure))
+ while ((bio = bio_list_pop(&sync)))
+ bio_endio(bio, -EIO);
+ else while ((bio = bio_list_pop(&sync)))
do_write(ms, bio);
while ((bio = bio_list_pop(&recover)))
len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
- ms = kmalloc(len, GFP_KERNEL);
+ ms = kzalloc(len, GFP_KERNEL);
if (!ms) {
ti->error = "Cannot allocate mirror context";
return NULL;
}
- memset(ms, 0, len);
spin_lock_init(&ms->lock);
ms->ti = ti;
ms->in_sync = 0;
ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
+ ms->io_client = dm_io_client_create(DM_IO_PAGES);
+ if (IS_ERR(ms->io_client)) {
+ ti->error = "Error creating dm_io client";
+ kfree(ms);
+ return NULL;
+ }
+
if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
ti->error = "Error creating dirty region hash";
+ dm_io_client_destroy(ms->io_client);
kfree(ms);
return NULL;
}
while (m--)
dm_put_device(ti, ms->mirror[m].dev);
+ dm_io_client_destroy(ms->io_client);
rh_exit(&ms->rh);
kfree(ms);
}
static inline int _check_region_size(struct dm_target *ti, uint32_t size)
{
- return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
+ return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
size > ti->len);
}
return -ENXIO;
}
+ ms->mirror[mirror].ms = ms;
ms->mirror[mirror].offset = offset;
return 0;
return dl;
}
+static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
+ unsigned *args_used)
+{
+ unsigned num_features;
+ struct dm_target *ti = ms->ti;
+
+ *args_used = 0;
+
+ if (!argc)
+ return 0;
+
+ if (sscanf(argv[0], "%u", &num_features) != 1) {
+ ti->error = "Invalid number of features";
+ return -EINVAL;
+ }
+
+ argc--;
+ argv++;
+ (*args_used)++;
+
+ if (num_features > argc) {
+ ti->error = "Not enough arguments to support feature count";
+ return -EINVAL;
+ }
+
+ if (!strcmp("handle_errors", argv[0]))
+ ms->features |= DM_RAID1_HANDLE_ERRORS;
+ else {
+ ti->error = "Unrecognised feature requested";
+ return -EINVAL;
+ }
+
+ (*args_used)++;
+
+ return 0;
+}
+
/*
* Construct a mirror mapping:
*
* log_type #log_params <log_params>
* #mirrors [mirror_path offset]{2,}
+ * [#features <features>]
*
* log_type is "core" or "disk"
* #log_params is between 1 and 3
+ *
+ * If present, features must be "handle_errors".
*/
-#define DM_IO_PAGES 64
static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
argv++, argc--;
- if (argc != nr_mirrors * 2) {
- ti->error = "Wrong number of mirror arguments";
+ if (argc < nr_mirrors * 2) {
+ ti->error = "Too few mirror arguments";
dm_destroy_dirty_log(dl);
return -EINVAL;
}
ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
if (!ms->kmirrord_wq) {
DMERR("couldn't start kmirrord");
- free_context(ms, ti, m);
- return -ENOMEM;
+ r = -ENOMEM;
+ goto err_free_context;
}
INIT_WORK(&ms->kmirrord_work, do_mirror);
- r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
- if (r) {
- destroy_workqueue(ms->kmirrord_wq);
- free_context(ms, ti, ms->nr_mirrors);
- return r;
+ r = parse_features(ms, argc, argv, &args_used);
+ if (r)
+ goto err_destroy_wq;
+
+ argv += args_used;
+ argc -= args_used;
+
+ /*
+ * Any read-balancing addition depends on the
+ * DM_RAID1_HANDLE_ERRORS flag being present.
+ * This is because the decision to balance depends
+ * on the sync state of a region. If the above
+ * flag is not present, we ignore errors; and
+ * the sync state may be inaccurate.
+ */
+
+ if (argc) {
+ ti->error = "Too many mirror arguments";
+ r = -EINVAL;
+ goto err_destroy_wq;
}
+ r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
+ if (r)
+ goto err_destroy_wq;
+
wake(ms);
return 0;
+
+err_destroy_wq:
+ destroy_workqueue(ms->kmirrord_wq);
+err_free_context:
+ free_context(ms, ti, ms->nr_mirrors);
+ return r;
}
static void mirror_dtr(struct dm_target *ti)
wait_event(_kmirrord_recovery_stopped,
!atomic_read(&ms->rh.recovery_in_flight));
- if (log->type->suspend && log->type->suspend(log))
+ if (log->type->postsuspend && log->type->postsuspend(log))
/* FIXME: need better error handling */
DMWARN("log suspend failed");
}
for (m = 0; m < ms->nr_mirrors; m++)
DMEMIT("%s ", ms->mirror[m].dev->name);
- DMEMIT("%llu/%llu",
+ DMEMIT("%llu/%llu 0 ",
(unsigned long long)ms->rh.log->type->
get_sync_count(ms->rh.log),
(unsigned long long)ms->nr_regions);
- sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
+ sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
break;
for (m = 0; m < ms->nr_mirrors; m++)
DMEMIT(" %s %llu", ms->mirror[m].dev->name,
(unsigned long long)ms->mirror[m].offset);
+
+ if (ms->features & DM_RAID1_HANDLE_ERRORS)
+ DMEMIT(" 1 handle_errors");
}
return 0;
r = dm_register_target(&mirror_target);
if (r < 0) {
- DMERR("%s: Failed to register mirror target",
- mirror_target.name);
+ DMERR("Failed to register mirror target");
dm_dirty_log_exit();
}
r = dm_unregister_target(&mirror_target);
if (r < 0)
- DMERR("%s: unregister failed %d", mirror_target.name, r);
+ DMERR("unregister failed %d", r);
dm_dirty_log_exit();
}