block: move bio list helpers into bio.h
[safe/jmp/linux-2.6] / drivers / md / dm-mpath.c
index fea966d..6a386ab 100644 (file)
@@ -5,9 +5,9 @@
  * This file is released under the GPL.
  */
 
-#include "dm.h"
+#include <linux/device-mapper.h>
+
 #include "dm-path-selector.h"
-#include "dm-bio-list.h"
 #include "dm-bio-record.h"
 #include "dm-uevent.h"
 
@@ -30,9 +30,11 @@ struct pgpath {
        struct list_head list;
 
        struct priority_group *pg;      /* Owning PG */
+       unsigned is_active;             /* Path status */
        unsigned fail_count;            /* Cumulative failure count */
 
        struct dm_path path;
+       struct work_struct deactivate_path;
 };
 
 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
@@ -63,6 +65,7 @@ struct multipath {
 
        const char *hw_handler_name;
        struct work_struct activate_path;
+       struct pgpath *pgpath_to_activate;
        unsigned nr_priority_groups;
        struct list_head priority_groups;
        unsigned pg_init_required;      /* pg_init needs calling? */
@@ -111,6 +114,7 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
 static void process_queued_ios(struct work_struct *work);
 static void trigger_event(struct work_struct *work);
 static void activate_path(struct work_struct *work);
+static void deactivate_path(struct work_struct *work);
 
 
 /*-----------------------------------------------
@@ -121,8 +125,10 @@ static struct pgpath *alloc_pgpath(void)
 {
        struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
 
-       if (pgpath)
-               pgpath->path.is_active = 1;
+       if (pgpath) {
+               pgpath->is_active = 1;
+               INIT_WORK(&pgpath->deactivate_path, deactivate_path);
+       }
 
        return pgpath;
 }
@@ -132,6 +138,14 @@ static void free_pgpath(struct pgpath *pgpath)
        kfree(pgpath);
 }
 
+static void deactivate_path(struct work_struct *work)
+{
+       struct pgpath *pgpath =
+               container_of(work, struct pgpath, deactivate_path);
+
+       blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
+}
+
 static struct priority_group *alloc_priority_group(void)
 {
        struct priority_group *pg;
@@ -146,11 +160,19 @@ static struct priority_group *alloc_priority_group(void)
 
 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
 {
+       unsigned long flags;
        struct pgpath *pgpath, *tmp;
+       struct multipath *m = ti->private;
 
        list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
                list_del(&pgpath->list);
+               if (m->hw_handler_name)
+                       scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
                dm_put_device(ti, pgpath->path.dev);
+               spin_lock_irqsave(&m->lock, flags);
+               if (m->pgpath_to_activate == pgpath)
+                       m->pgpath_to_activate = NULL;
+               spin_unlock_irqrestore(&m->lock, flags);
                free_pgpath(pgpath);
        }
 }
@@ -423,7 +445,8 @@ static void process_queued_ios(struct work_struct *work)
            (!pgpath && !m->queue_if_no_path))
                must_queue = 0;
 
-       if (m->pg_init_required && !m->pg_init_in_progress) {
+       if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
+               m->pgpath_to_activate = pgpath;
                m->pg_init_count++;
                m->pg_init_required = 0;
                m->pg_init_in_progress = 1;
@@ -548,16 +571,17 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
 {
        int r;
        struct pgpath *p;
+       struct multipath *m = ti->private;
 
        /* we need at least a path arg */
        if (as->argc < 1) {
                ti->error = "no device given";
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        p = alloc_pgpath();
        if (!p)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        r = dm_get_device(ti, shift(as), ti->begin, ti->len,
                          dm_table_get_mode(ti->table), &p->path.dev);
@@ -566,6 +590,15 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
                goto bad;
        }
 
+       if (m->hw_handler_name) {
+               r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
+                                  m->hw_handler_name);
+               if (r < 0) {
+                       dm_put_device(ti, p->path.dev);
+                       goto bad;
+               }
+       }
+
        r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
        if (r) {
                dm_put_device(ti, p->path.dev);
@@ -576,7 +609,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
 
  bad:
        free_pgpath(p);
-       return NULL;
+       return ERR_PTR(r);
 }
 
 static struct priority_group *parse_priority_group(struct arg_set *as,
@@ -594,14 +627,14 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
 
        if (as->argc < 2) {
                as->argc = 0;
-               ti->error = "not enough priority group aruments";
-               return NULL;
+               ti->error = "not enough priority group arguments";
+               return ERR_PTR(-EINVAL);
        }
 
        pg = alloc_priority_group();
        if (!pg) {
                ti->error = "couldn't allocate priority group";
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
        pg->m = m;
 
@@ -634,8 +667,10 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
                path_args.argv = as->argv;
 
                pgpath = parse_path(&path_args, &pg->ps, ti);
-               if (!pgpath)
+               if (IS_ERR(pgpath)) {
+                       r = PTR_ERR(pgpath);
                        goto bad;
+               }
 
                pgpath->pg = pg;
                list_add_tail(&pgpath->list, &pg->pgpaths);
@@ -646,7 +681,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as,
 
  bad:
        free_priority_group(pg, ti);
-       return NULL;
+       return ERR_PTR(r);
 }
 
 static int parse_hw_handler(struct arg_set *as, struct multipath *m)
@@ -672,6 +707,10 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m)
                m->hw_handler_name = NULL;
                return -EINVAL;
        }
+
+       if (hw_argc > 1)
+               DMWARN("Ignoring user-specified arguments for "
+                      "hardware handler \"%s\"", m->hw_handler_name);
        consume(as, hw_argc - 1);
 
        return 0;
@@ -765,8 +804,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
                struct priority_group *pg;
 
                pg = parse_priority_group(&as, m);
-               if (!pg) {
-                       r = -EINVAL;
+               if (IS_ERR(pg)) {
+                       r = PTR_ERR(pg);
                        goto bad;
                }
 
@@ -814,7 +853,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
        dm_bio_record(&mpio->details, bio);
 
        map_context->ptr = mpio;
-       bio->bi_rw |= (1 << BIO_RW_FAILFAST);
+       bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
        r = map_io(m, bio, mpio, 0);
        if (r < 0 || r == DM_MAPIO_REQUEUE)
                mempool_free(mpio, m->mpio_pool);
@@ -832,13 +871,13 @@ static int fail_path(struct pgpath *pgpath)
 
        spin_lock_irqsave(&m->lock, flags);
 
-       if (!pgpath->path.is_active)
+       if (!pgpath->is_active)
                goto out;
 
        DMWARN("Failing path %s.", pgpath->path.dev->name);
 
        pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
-       pgpath->path.is_active = 0;
+       pgpath->is_active = 0;
        pgpath->fail_count++;
 
        m->nr_valid_paths--;
@@ -849,7 +888,8 @@ static int fail_path(struct pgpath *pgpath)
        dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
                      pgpath->path.dev->name, m->nr_valid_paths);
 
-       queue_work(kmultipathd, &m->trigger_event);
+       schedule_work(&m->trigger_event);
+       queue_work(kmultipathd, &pgpath->deactivate_path);
 
 out:
        spin_unlock_irqrestore(&m->lock, flags);
@@ -868,7 +908,7 @@ static int reinstate_path(struct pgpath *pgpath)
 
        spin_lock_irqsave(&m->lock, flags);
 
-       if (pgpath->path.is_active)
+       if (pgpath->is_active)
                goto out;
 
        if (!pgpath->pg->ps.type->reinstate_path) {
@@ -882,7 +922,7 @@ static int reinstate_path(struct pgpath *pgpath)
        if (r)
                goto out;
 
-       pgpath->path.is_active = 1;
+       pgpath->is_active = 1;
 
        m->current_pgpath = NULL;
        if (!m->nr_valid_paths++ && m->queue_size)
@@ -891,7 +931,7 @@ static int reinstate_path(struct pgpath *pgpath)
        dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
                      pgpath->path.dev->name, m->nr_valid_paths);
 
-       queue_work(kmultipathd, &m->trigger_event);
+       schedule_work(&m->trigger_event);
 
 out:
        spin_unlock_irqrestore(&m->lock, flags);
@@ -935,7 +975,7 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
 
        spin_unlock_irqrestore(&m->lock, flags);
 
-       queue_work(kmultipathd, &m->trigger_event);
+       schedule_work(&m->trigger_event);
 }
 
 /*
@@ -965,7 +1005,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
        }
        spin_unlock_irqrestore(&m->lock, flags);
 
-       queue_work(kmultipathd, &m->trigger_event);
+       schedule_work(&m->trigger_event);
        return 0;
 }
 
@@ -1080,8 +1120,15 @@ static void activate_path(struct work_struct *work)
        int ret;
        struct multipath *m =
                container_of(work, struct multipath, activate_path);
-       struct dm_path *path = &m->current_pgpath->path;
+       struct dm_path *path;
+       unsigned long flags;
 
+       spin_lock_irqsave(&m->lock, flags);
+       path = &m->pgpath_to_activate->path;
+       m->pgpath_to_activate = NULL;
+       spin_unlock_irqrestore(&m->lock, flags);
+       if (!path)
+               return;
        ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
        pg_init_done(path, ret);
 }
@@ -1263,7 +1310,7 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
 
                        list_for_each_entry(p, &pg->pgpaths, list) {
                                DMEMIT("%s %s %u ", p->path.dev->name,
-                                      p->path.is_active ? "A" : "F",
+                                      p->is_active ? "A" : "F",
                                       p->fail_count);
                                if (pg->ps.type->status)
                                        sz += pg->ps.type->status(&pg->ps,
@@ -1352,19 +1399,15 @@ error:
        return -EINVAL;
 }
 
-static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
-                          struct file *filp, unsigned int cmd,
+static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                           unsigned long arg)
 {
        struct multipath *m = (struct multipath *) ti->private;
        struct block_device *bdev = NULL;
+       fmode_t mode = 0;
        unsigned long flags;
-       struct file fake_file = {};
-       struct dentry fake_dentry = {};
        int r = 0;
 
-       fake_file.f_path.dentry = &fake_dentry;
-
        spin_lock_irqsave(&m->lock, flags);
 
        if (!m->current_pgpath)
@@ -1372,8 +1415,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
 
        if (m->current_pgpath) {
                bdev = m->current_pgpath->path.dev->bdev;
-               fake_dentry.d_inode = bdev->bd_inode;
-               fake_file.f_mode = m->current_pgpath->path.dev->mode;
+               mode = m->current_pgpath->path.dev->mode;
        }
 
        if (m->queue_io)
@@ -1383,8 +1425,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode,
 
        spin_unlock_irqrestore(&m->lock, flags);
 
-       return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file,
-                                        bdev->bd_disk, cmd, arg);
+       return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 }
 
 /*-----------------------------------------------------------------
@@ -1453,14 +1494,10 @@ static int __init dm_multipath_init(void)
 
 static void __exit dm_multipath_exit(void)
 {
-       int r;
-
        destroy_workqueue(kmpath_handlerd);
        destroy_workqueue(kmultipathd);
 
-       r = dm_unregister_target(&multipath_target);
-       if (r < 0)
-               DMERR("target unregister failed %d", r);
+       dm_unregister_target(&multipath_target);
        kmem_cache_destroy(_mpio_cache);
 }