dm: use kzalloc
[safe/jmp/linux-2.6] / drivers / md / dm-table.c
index b6183ed..8939e61 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/mutex.h>
 #include <asm/atomic.h>
 
+#define DM_MSG_PREFIX "table"
+
 #define MAX_DEPTH 16
 #define NODE_SIZE L1_CACHE_BYTES
 #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
@@ -211,12 +213,11 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
 int dm_table_create(struct dm_table **result, int mode,
                    unsigned num_targets, struct mapped_device *md)
 {
-       struct dm_table *t = kmalloc(sizeof(*t), GFP_KERNEL);
+       struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 
        if (!t)
                return -ENOMEM;
 
-       memset(t, 0, sizeof(*t));
        INIT_LIST_HEAD(&t->devices);
        atomic_set(&t->holders, 1);
 
@@ -237,6 +238,44 @@ int dm_table_create(struct dm_table **result, int mode,
        return 0;
 }
 
+int dm_create_error_table(struct dm_table **result, struct mapped_device *md)
+{
+       struct dm_table *t;
+       sector_t dev_size = 1;
+       int r;
+
+       /*
+        * Find current size of device.
+        * Default to 1 sector if inactive.
+        */
+       t = dm_get_table(md);
+       if (t) {
+               dev_size = dm_table_get_size(t);
+               dm_table_put(t);
+       }
+
+       r = dm_table_create(&t, FMODE_READ, 1, md);
+       if (r)
+               return r;
+
+       r = dm_table_add_target(t, "error", 0, dev_size, NULL);
+       if (r)
+               goto out;
+
+       r = dm_table_complete(t);
+       if (r)
+               goto out;
+
+       *result = t;
+
+out:
+       if (r)
+               dm_table_put(t);
+
+       return r;
+}
+EXPORT_SYMBOL_GPL(dm_create_error_table);
+
 static void free_devices(struct list_head *devices)
 {
        struct list_head *tmp, *next;
@@ -385,13 +424,15 @@ static void close_dev(struct dm_dev *d, struct mapped_device *md)
 }
 
 /*
- * If possible (ie. blk_size[major] is set), this checks an area
- * of a destination device is valid.
+ * If possible, this checks an area of a destination device is valid.
  */
 static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
 {
-       sector_t dev_size;
-       dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+       sector_t dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
+
+       if (!dev_size)
+               return 1;
+
        return ((start < dev_size) && (len <= (dev_size - start)));
 }
 
@@ -482,56 +523,61 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti,
        return 0;
 }
 
-
-int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
-                 sector_t len, int mode, struct dm_dev **result)
+void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
 {
-       int r = __table_get_device(ti->table, ti, path,
-                                  start, len, mode, result);
-       if (!r) {
-               request_queue_t *q = bdev_get_queue((*result)->bdev);
-               struct io_restrictions *rs = &ti->limits;
-
-               /*
-                * Combine the device limits low.
-                *
-                * FIXME: if we move an io_restriction struct
-                *        into q this would just be a call to
-                *        combine_restrictions_low()
-                */
+       struct request_queue *q = bdev_get_queue(bdev);
+       struct io_restrictions *rs = &ti->limits;
+
+       /*
+        * Combine the device limits low.
+        *
+        * FIXME: if we move an io_restriction struct
+        *        into q this would just be a call to
+        *        combine_restrictions_low()
+        */
+       rs->max_sectors =
+               min_not_zero(rs->max_sectors, q->max_sectors);
+
+       /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
+        *        currently doesn't honor MD's merge_bvec_fn routine.
+        *        In this case, we'll force DM to use PAGE_SIZE or
+        *        smaller I/O, just to be safe. A better fix is in the
+        *        works, but add this for the time being so it will at
+        *        least operate correctly.
+        */
+       if (q->merge_bvec_fn)
                rs->max_sectors =
-                       min_not_zero(rs->max_sectors, q->max_sectors);
+                       min_not_zero(rs->max_sectors,
+                                    (unsigned int) (PAGE_SIZE >> 9));
 
-               /* FIXME: Device-Mapper on top of RAID-0 breaks because DM
-                *        currently doesn't honor MD's merge_bvec_fn routine.
-                *        In this case, we'll force DM to use PAGE_SIZE or
-                *        smaller I/O, just to be safe. A better fix is in the
-                *        works, but add this for the time being so it will at
-                *        least operate correctly.
-                */
-               if (q->merge_bvec_fn)
-                       rs->max_sectors =
-                               min_not_zero(rs->max_sectors,
-                                            (unsigned int) (PAGE_SIZE >> 9));
+       rs->max_phys_segments =
+               min_not_zero(rs->max_phys_segments,
+                            q->max_phys_segments);
 
-               rs->max_phys_segments =
-                       min_not_zero(rs->max_phys_segments,
-                                    q->max_phys_segments);
+       rs->max_hw_segments =
+               min_not_zero(rs->max_hw_segments, q->max_hw_segments);
 
-               rs->max_hw_segments =
-                       min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+       rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
 
-               rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
+       rs->max_segment_size =
+               min_not_zero(rs->max_segment_size, q->max_segment_size);
 
-               rs->max_segment_size =
-                       min_not_zero(rs->max_segment_size, q->max_segment_size);
+       rs->seg_boundary_mask =
+               min_not_zero(rs->seg_boundary_mask,
+                            q->seg_boundary_mask);
 
-               rs->seg_boundary_mask =
-                       min_not_zero(rs->seg_boundary_mask,
-                                    q->seg_boundary_mask);
+       rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+}
+EXPORT_SYMBOL_GPL(dm_set_device_limits);
 
-               rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
-       }
+int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
+                 sector_t len, int mode, struct dm_dev **result)
+{
+       int r = __table_get_device(ti->table, ti, path,
+                                  start, len, mode, result);
+
+       if (!r)
+               dm_set_device_limits(ti, (*result)->bdev);
 
        return r;
 }
@@ -590,6 +636,12 @@ int dm_split_args(int *argc, char ***argvp, char *input)
        unsigned array_size = 0;
 
        *argc = 0;
+
+       if (!input) {
+               *argvp = NULL;
+               return 0;
+       }
+
        argv = realloc_argv(&array_size, argv);
        if (!argv)
                return -ENOMEM;
@@ -671,15 +723,14 @@ int dm_table_add_target(struct dm_table *t, const char *type,
        memset(tgt, 0, sizeof(*tgt));
 
        if (!len) {
-               tgt->error = "zero-length target";
-               DMERR("%s", tgt->error);
+               DMERR("%s: zero-length target", dm_device_name(t->md));
                return -EINVAL;
        }
 
        tgt->type = dm_get_target_type(type);
        if (!tgt->type) {
-               tgt->error = "unknown target type";
-               DMERR("%s", tgt->error);
+               DMERR("%s: %s: unknown target type", dm_device_name(t->md),
+                     type);
                return -EINVAL;
        }
 
@@ -716,7 +767,7 @@ int dm_table_add_target(struct dm_table *t, const char *type,
        return 0;
 
  bad:
-       DMERR("%s", tgt->error);
+       DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
        dm_put_target_type(tgt->type);
        return r;
 }
@@ -894,9 +945,20 @@ void dm_table_postsuspend_targets(struct dm_table *t)
        return suspend_targets(t, 1);
 }
 
-void dm_table_resume_targets(struct dm_table *t)
+int dm_table_resume_targets(struct dm_table *t)
 {
-       int i;
+       int i, r = 0;
+
+       for (i = 0; i < t->num_targets; i++) {
+               struct dm_target *ti = t->targets + i;
+
+               if (!ti->type->preresume)
+                       continue;
+
+               r = ti->type->preresume(ti);
+               if (r)
+                       return r;
+       }
 
        for (i = 0; i < t->num_targets; i++) {
                struct dm_target *ti = t->targets + i;
@@ -904,6 +966,8 @@ void dm_table_resume_targets(struct dm_table *t)
                if (ti->type->resume)
                        ti->type->resume(ti);
        }
+
+       return 0;
 }
 
 int dm_table_any_congested(struct dm_table *t, int bdi_bits)
@@ -914,7 +978,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
        devices = dm_table_get_devices(t);
        for (d = devices->next; d != devices; d = d->next) {
                struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
+               struct request_queue *q = bdev_get_queue(dd->bdev);
                r |= bdi_congested(&q->backing_dev_info, bdi_bits);
        }
 
@@ -927,35 +991,13 @@ void dm_table_unplug_all(struct dm_table *t)
 
        for (d = devices->next; d != devices; d = d->next) {
                struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
+               struct request_queue *q = bdev_get_queue(dd->bdev);
 
                if (q->unplug_fn)
                        q->unplug_fn(q);
        }
 }
 
-int dm_table_flush_all(struct dm_table *t)
-{
-       struct list_head *d, *devices = dm_table_get_devices(t);
-       int ret = 0;
-
-       for (d = devices->next; d != devices; d = d->next) {
-               struct dm_dev *dd = list_entry(d, struct dm_dev, list);
-               request_queue_t *q = bdev_get_queue(dd->bdev);
-               int err;
-
-               if (!q->issue_flush_fn)
-                       err = -EOPNOTSUPP;
-               else
-                       err = q->issue_flush_fn(q, dd->bdev->bd_disk, NULL);
-
-               if (!ret)
-                       ret = err;
-       }
-
-       return ret;
-}
-
 struct mapped_device *dm_table_get_md(struct dm_table *t)
 {
        dm_get(t->md);
@@ -973,4 +1015,3 @@ EXPORT_SYMBOL(dm_table_get_md);
 EXPORT_SYMBOL(dm_table_put);
 EXPORT_SYMBOL(dm_table_get);
 EXPORT_SYMBOL(dm_table_unplug_all);
-EXPORT_SYMBOL(dm_table_flush_all);