tridentfb: remove misplaced enable_mmio()
[safe/jmp/linux-2.6] / drivers / md / dm-table.c
index 9b1e2f5..94116ea 100644 (file)
 #include <linux/ctype.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
+#include <linux/mutex.h>
 #include <asm/atomic.h>
 
+#define DM_MSG_PREFIX "table"
+
 #define MAX_DEPTH 16
 #define NODE_SIZE L1_CACHE_BYTES
 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
 #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
 
 struct dm_table {
+       struct mapped_device *md;
        atomic_t holders;
 
        /* btree table */
@@ -95,8 +99,15 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
        lhs->max_segment_size =
                min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
 
+       lhs->max_hw_sectors =
+               min_not_zero(lhs->max_hw_sectors, rhs->max_hw_sectors);
+
        lhs->seg_boundary_mask =
                min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
+
+       lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
+
+       lhs->no_cluster |= rhs->no_cluster;
 }
 
 /*
@@ -181,8 +192,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
 
        /*
         * Allocate both the target array and offset array at once.
+        * Append an empty entry to catch sectors beyond the end of
+        * the device.
         */
-       n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
+       n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
                                          sizeof(sector_t));
        if (!n_highs)
                return -ENOMEM;
@@ -204,14 +217,14 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
        return 0;
 }
 
-int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
+int dm_table_create(struct dm_table **result, int mode,
+                   unsigned num_targets, struct mapped_device *md)
 {
-       struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+       struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 
        if (!t)
                return -ENOMEM;
 
-       memset(t, 0, sizeof(*t));
        INIT_LIST_HEAD(&t->devices);
        atomic_set(&t->holders, 1);
 
@@ -227,6 +240,7 @@ int dm_table_create(struct dm_table **result, int mode, unsigned num_targets)
        }
 
        t->mode = mode;
+       t->md = md;
        *result = t;
        return 0;
 }
@@ -235,9 +249,8 @@ static void free_devices(struct list_head *devices)
 {
        struct list_head *tmp, *next;
 
-       for (tmp = devices->next; tmp != devices; tmp = next) {
+       list_for_each_safe(tmp, next, devices) {
                struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
-               next = tmp->next;
                kfree(dd);
        }
 }
@@ -310,7 +323,7 @@ static int lookup_device(const char *path, dev_t *dev)
        if ((r = path_lookup(path, LOOKUP_FOLLOW, &nd)))
                return r;
 
-       inode = nd.dentry->d_inode;
+       inode = nd.path.dentry->d_inode;
        if (!inode) {
                r = -ENOENT;
                goto out;
@@ -324,7 +337,7 @@ static int lookup_device(const char *path, dev_t *dev)
        *dev = inode->i_rdev;
 
  out:
-       path_release(&nd);
+       path_put(&nd.path);
        return r;
 }
 
@@ -345,20 +358,19 @@ static struct dm_dev *find_device(struct list_head *l, dev_t dev)
 /*
  * Open a device so we can use it as a map destination.
  */
-static int open_dev(struct dm_dev *d, dev_t dev)
+static int open_dev(struct dm_dev *d, dev_t dev, struct mapped_device *md)
 {
        static char *_claim_ptr = "I belong to device-mapper";
        struct block_device *bdev;
 
        int r;
 
-       if (d->bdev)
-               BUG();
+       BUG_ON(d->bdev);
 
        bdev = open_by_devnum(dev, d->mode);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
-       r = bd_claim(bdev, _claim_ptr);
+       r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
        if (r)
                blkdev_put(bdev);
        else
@@ -369,24 +381,26 @@ static int open_dev(struct dm_dev *d, dev_t dev)
 /*
  * Close a device that we've been using.
  */
-static void close_dev(struct dm_dev *d)
+static void close_dev(struct dm_dev *d, struct mapped_device *md)
 {
        if (!d->bdev)
                return;
 
-       bd_release(d->bdev);
+       bd_release_from_disk(d->bdev, dm_disk(md));
        blkdev_put(d->bdev);
        d->bdev = NULL;
 }
 
 /*
- * If possible (ie. blk_size[major] is set), this checks an area
- * of a destination device is valid.
+ * If possible, this checks an area of a destination device is valid.
  */
 static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
 {
-       sector_t dev_size;
-       dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+       sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+
+       if (!dev_size)
+               return 1;
+
        return ((start < dev_size) && (len <= (dev_size - start)));
 }
 
@@ -395,7 +409,7 @@ static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
  * careful to leave things as they were if we fail to reopen the
  * device.
  */
-static int upgrade_mode(struct dm_dev *dd, int new_mode)
+static int upgrade_mode(struct dm_dev *dd, int new_mode, struct mapped_device *md)
 {
        int r;
        struct dm_dev dd_copy;
@@ -405,9 +419,9 @@ static int upgrade_mode(struct dm_dev *dd, int new_mode)
 
        dd->mode |= new_mode;
        dd->bdev = NULL;
-       r = open_dev(dd, dev);
+       r = open_dev(dd, dev, md);
        if (!r)
-               close_dev(&dd_copy);
+               close_dev(&dd_copy, md);
        else
                *dd = dd_copy;
 
@@ -423,12 +437,11 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
                              int mode, struct dm_dev **result)
 {
        int r;
-       dev_t dev;
+       dev_t uninitialized_var(dev);
        struct dm_dev *dd;
        unsigned int major, minor;
 
-       if (!t)
-               BUG();
+       BUG_ON(!t);
 
        if (sscanf(path, "%u:%u", &major, &minor) == 2) {
                /* Extract the major/minor numbers */
@@ -450,7 +463,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
                dd->mode = mode;
                dd->bdev = NULL;
 
-               if ((r = open_dev(dd, dev))) {
+               if ((r = open_dev(dd, dev, t->md))) {
                        kfree(dd);
                        return r;
                }
@@ -461,7 +474,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
                list_add(&dd->list, &t->devices);
 
        } else if (dd->mode != (mode | dd->mode)) {
-               r = upgrade_mode(dd, mode);
+               r = upgrade_mode(dd, mode, t->md);
                if (r)
                        return r;
        }
@@ -478,54 +491,66 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
        return 0;
 }
 
+void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
+{
+       struct request_queue *q = bdev_get_queue(bdev);
+       struct io_restrictions *rs = &ti->limits;
+
+       /*
+        * Combine the device limits low.
+        *
+        * FIXME: if we move an io_restriction struct
+        *        into q this would just be a call to
+        *        combine_restrictions_low()
+        */
+       rs->max_sectors =
+               min_not_zero(rs->max_sectors, q->max_sectors);
+
+       /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
+        *        currently doesn't honor MD's merge_bvec_fn routine.
+        *        In this case, we'll force DM to use PAGE_SIZE or
+        *        smaller I/O, just to be safe. A better fix is in the
+        *        works, but add this for the time being so it will at
+        *        least operate correctly.
+        */
+       if (q->merge_bvec_fn)
+               rs->max_sectors =
+                       min_not_zero(rs->max_sectors,
+                                    (unsigned int) (PAGE_SIZE >> 9));
+
+       rs->max_phys_segments =
+               min_not_zero(rs->max_phys_segments,
+                            q->max_phys_segments);
+
+       rs->max_hw_segments =
+               min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+
+       rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
+
+       rs->max_segment_size =
+               min_not_zero(rs->max_segment_size, q->max_segment_size);
+
+       rs->max_hw_sectors =
+               min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
+
+       rs->seg_boundary_mask =
+               min_not_zero(rs->seg_boundary_mask,
+                            q->seg_boundary_mask);
+
+       rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+
+       rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+}
+EXPORT_SYMBOL_GPL(dm_set_device_limits);
 
 int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
                  sector_t len, int mode, struct dm_dev **result)
 {
        int r = __table_get_device(ti->table, ti, path,
                                   start, len, mode, result);
-       if (!r) {
-               request_queue_t *q = bdev_get_queue((*result)->bdev);
-               struct io_restrictions *rs = &ti->limits;
-
-               /*
-                * Combine the device limits low.
-                *
-                * FIXME: if we move an io_restriction struct
-                *        into q this would just be a call to
-                *        combine_restrictions_low()
-                */
-               rs->max_sectors =
-                       min_not_zero(rs->max_sectors, q->max_sectors);
-
-               /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
-                *        currently doesn't honor MD's merge_bvec_fn routine.
-                *        In this case, we'll force DM to use PAGE_SIZE or
-                *        smaller I/O, just to be safe. A better fix is in the
-                *        works, but add this for the time being so it will at
-                *        least operate correctly.
-                */
-               if (q->merge_bvec_fn)
-                       rs->max_sectors =
-                               min_not_zero(rs->max_sectors,
-                                            (unsigned int) (PAGE_SIZE >> 9));
-
-               rs->max_phys_segments =
-                       min_not_zero(rs->max_phys_segments,
-                                    q->max_phys_segments);
-
-               rs->max_hw_segments =
-                       min_not_zero(rs->max_hw_segments, q->max_hw_segments);
-
-               rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
-
-               rs->max_segment_size =
-                       min_not_zero(rs->max_segment_size, q->max_segment_size);
-
-               rs->seg_boundary_mask =
-                       min_not_zero(rs->seg_boundary_mask,
-                                    q->seg_boundary_mask);
-       }
+
+       if (!r)
+               dm_set_device_limits(ti, (*result)->bdev);
 
        return r;
 }
@@ -536,7 +561,7 @@ int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
 void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
 {
        if (atomic_dec_and_test(&dd->count)) {
-               close_dev(dd);
+               close_dev(dd, ti->table->md);
                list_del(&dd->list);
                kfree(dd);
        }
@@ -584,6 +609,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
        unsigned array_size = 0;
 
        *argc = 0;
+
+       if (!input) {
+               *argvp = NULL;
+               return 0;
+       }
+
        argv = realloc_argv(&array_size, argv);
        if (!argv)
                return -ENOMEM;
@@ -639,6 +670,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
 {
        if (!rs->max_sectors)
                rs->max_sectors = SAFE_MAX_SECTORS;
+       if (!rs->max_hw_sectors)
+               rs->max_hw_sectors = SAFE_MAX_SECTORS;
        if (!rs->max_phys_segments)
                rs->max_phys_segments = MAX_PHYS_SEGMENTS;
        if (!rs->max_hw_segments)
@@ -649,6 +682,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
                rs->max_segment_size = MAX_SEGMENT_SIZE;
        if (!rs->seg_boundary_mask)
                rs->seg_boundary_mask = -1;
+       if (!rs->bounce_pfn)
+               rs->bounce_pfn = -1;
 }
 
 int dm_table_add_target(struct dm_table *t, const char *type,
@@ -665,15 +700,14 @@ int dm_table_add_target(struct dm_table *t, const char *type,
        memset(tgt, 0, sizeof(*tgt));
 
        if (!len) {
-               tgt->error = "zero-length target";
-               DMERR("%s", tgt->error);
+               DMERR("%s: zero-length target", dm_device_name(t->md));
                return -EINVAL;
        }
 
        tgt->type = dm_get_target_type(type);
        if (!tgt->type) {
-               tgt->error = "unknown target type";
-               DMERR("%s", tgt->error);
+               DMERR("%s: %s: unknown target type", dm_device_name(t->md),
+                     type);
                return -EINVAL;
        }
 
@@ -710,7 +744,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
        return 0;
 
  bad:
-       DMERR("%s", tgt->error);
+       DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
        dm_put_target_type(tgt->type);
        return r;
 }
@@ -732,7 +766,7 @@ static int setup_indexes(struct dm_table *t)
                return -ENOMEM;
 
        /* set up internal nodes, bottom-up */
-       for (i = t->depth - 2, total = 0; i >= 0; i--) {
+       for (i = t->depth - 2; i >= 0; i--) {
                t->index[i] = indexes;
                indexes += (KEYS_PER_NODE * t->counts[i]);
                setup_btree_index(i, t);
@@ -765,14 +799,14 @@ int dm_table_complete(struct dm_table *t)
        return r;
 }
 
-static DECLARE_MUTEX(_event_lock);
+static DEFINE_MUTEX(_event_lock);
 void dm_table_event_callback(struct dm_table *t,
                             void (*fn)(void *), void *context)
 {
-       down(&_event_lock);
+       mutex_lock(&_event_lock);
        t->event_fn = fn;
        t->event_context = context;
-       up(&_event_lock);
+       mutex_unlock(&_event_lock);
 }
 
 void dm_table_event(struct dm_table *t)
@@ -783,10 +817,10 @@ void dm_table_event(struct dm_table *t)
         */
        BUG_ON(in_interrupt());
 
-       down(&_event_lock);
+       mutex_lock(&_event_lock);
        if (t->event_fn)
                t->event_fn(t->event_context);
-       up(&_event_lock);
+       mutex_unlock(&_event_lock);
 }
 
 sector_t dm_table_get_size(struct dm_table *t)
@@ -796,7 +830,7 @@ sector_t dm_table_get_size(struct dm_table *t)
 
 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
 {
-       if (index > t->num_targets)
+       if (index >= t->num_targets)
                return NULL;
 
        return t->targets + index;
@@ -804,6 +838,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
 
 /*
  * Search the btree for the correct target.
+ *
+ * Caller should check returned pointer with dm_target_is_valid()
+ * to trap I/O beyond end of device.
  */
 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
 {
@@ -833,7 +870,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
        q->max_hw_segments = t->limits.max_hw_segments;
        q->hardsect_size = t->limits.hardsect_size;
        q->max_segment_size = t->limits.max_segment_size;
+       q->max_hw_sectors = t->limits.max_hw_sectors;
        q->seg_boundary_mask = t->limits.seg_boundary_mask;
+       q->bounce_pfn = t->limits.bounce_pfn;
+
+       if (t->limits.no_cluster)
+               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+       else
+               queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
+
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -872,7 +917,7 @@ void dm_table_presuspend_targets(struct dm_table *t)
        if (!t)
                return;
 
-       return suspend_targets(t, 0);
+       suspend_targets(t, 0);
 }
 
 void dm_table_postsuspend_targets(struct dm_table *t)
@@ -880,12 +925,23 @@ void dm_table_postsuspend_targets(struct dm_table *t)
        if (!t)
                return;
 
-       return suspend_targets(t, 1);
+       suspend_targets(t, 1);
 }
 
-void dm_table_resume_targets(struct dm_table *t)
+int dm_table_resume_targets(struct dm_table *t)
 {
-       int i;
+       int i, r = 0;
+
+       for (i = 0; i < t->num_targets; i++) {
+               struct dm_target *ti = t->targets + i;
+
+               if (!ti->type->preresume)
+                       continue;
+
+               r = ti->type->preresume(ti);
+               if (r)
+                       return r;
+       }
 
        for (i = 0; i < t->num_targets; i++) {
                struct dm_target *ti = t->targets + i;
@@ -893,17 +949,18 @@ void dm_table_resume_targets(struct dm_table *t)
                if (ti->type->resume)
                        ti->type->resume(ti);
        }
+
+       return 0;
 }
 
 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
 {
-       struct list_head *d, *devices;
+       struct dm_dev *dd;
+       struct list_head *devices = dm_table_get_devices(t);
        int r = 0;
 
-       devices = dm_table_get_devices(t);
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
+       list_for_each_entry(dd, devices, list) {
+               struct request_queue *q = bdev_get_queue(dd->bdev);
                r |= bdi_congested(&q->backing_dev_info, bdi_bits);
        }
 
@@ -912,37 +969,21 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
 
 void dm_table_unplug_all(struct dm_table *t)
 {
-       struct list_head *d, *devices = dm_table_get_devices(t);
+       struct dm_dev *dd;
+       struct list_head *devices = dm_table_get_devices(t);
 
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
+       list_for_each_entry(dd, devices, list) {
+               struct request_queue *q = bdev_get_queue(dd->bdev);
 
-               if (q->unplug_fn)
-                       q->unplug_fn(q);
+               blk_unplug(q);
        }
 }
 
-int dm_table_flush_all(struct dm_table *t)
+struct mapped_device *dm_table_get_md(struct dm_table *t)
 {
-       struct list_head *d, *devices = dm_table_get_devices(t);
-       int ret = 0;
-
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
-               int err;
-
-               if (!q->issue_flush_fn)
-                       err = -EOPNOTSUPP;
-               else
-                       err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
-               if (!ret)
-                       ret = err;
-       }
+       dm_get(t->md);
 
-       return ret;
+       return t->md;
 }
 
 EXPORT_SYMBOL(dm_vcalloc);
@@ -951,7 +992,7 @@ EXPORT_SYMBOL(dm_put_device);
 EXPORT_SYMBOL(dm_table_event);
 EXPORT_SYMBOL(dm_table_get_size);
 EXPORT_SYMBOL(dm_table_get_mode);
+EXPORT_SYMBOL(dm_table_get_md);
 EXPORT_SYMBOL(dm_table_put);
 EXPORT_SYMBOL(dm_table_get);
 EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);