dm raid1: implement mirror_flush
[safe/jmp/linux-2.6] / drivers / md / dm-region-hash.c
1 /*
2  * Copyright (C) 2003 Sistina Software Limited.
3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include <linux/dm-dirty-log.h>
9 #include <linux/dm-region-hash.h>
10
11 #include <linux/ctype.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/vmalloc.h>
15
16 #include "dm.h"
17
18 #define DM_MSG_PREFIX   "region hash"
19
20 /*-----------------------------------------------------------------
21  * Region hash
22  *
23  * The mirror splits itself up into discrete regions.  Each
24  * region can be in one of three states: clean, dirty,
25  * nosync.  There is no need to put clean regions in the hash.
26  *
27  * In addition to being present in the hash table a region _may_
28  * be present on one of three lists.
29  *
30  *   clean_regions: Regions on this list have no io pending to
31  *   them, they are in sync, we are no longer interested in them,
32  *   they are dull.  dm_rh_update_states() will remove them from the
33  *   hash table.
34  *
35  *   quiesced_regions: These regions have been spun down, ready
36  *   for recovery.  rh_recovery_start() will remove regions from
37  *   this list and hand them to kmirrord, which will schedule the
38  *   recovery io with kcopyd.
39  *
40  *   recovered_regions: Regions that kcopyd has successfully
41  *   recovered.  dm_rh_update_states() will now schedule any delayed
42  *   io, up the recovery_count, and remove the region from the
43  *   hash.
44  *
45  * There are 2 locks:
46  *   A rw spin lock 'hash_lock' protects just the hash table,
47  *   this is never held in write mode from interrupt context,
48  *   which I believe means that we only have to disable irqs when
49  *   doing a write lock.
50  *
51  *   An ordinary spin lock 'region_lock' that protects the three
52  *   lists in the region_hash, with the 'state', 'list' and
53  *   'delayed_bios' fields of the regions.  This is used from irq
54  *   context, so all other uses will have to suspend local irqs.
55  *---------------------------------------------------------------*/
56 struct dm_region_hash {
57         uint32_t region_size;
58         unsigned region_shift;
59
60         /* holds persistent region state */
61         struct dm_dirty_log *log;
62
63         /* hash table */
64         rwlock_t hash_lock;
65         mempool_t *region_pool;
66         unsigned mask;
67         unsigned nr_buckets;
68         unsigned prime;
69         unsigned shift;
70         struct list_head *buckets;
71
72         unsigned max_recovery; /* Max # of regions to recover in parallel */
73
74         spinlock_t region_lock;
75         atomic_t recovery_in_flight;
76         struct semaphore recovery_count;
77         struct list_head clean_regions;
78         struct list_head quiesced_regions;
79         struct list_head recovered_regions;
80         struct list_head failed_recovered_regions;
81
82         /*
83          * If there was a barrier failure no regions can be marked clean.
84          */
85         int barrier_failure;
86
87         void *context;
88         sector_t target_begin;
89
90         /* Callback function to schedule bios writes */
91         void (*dispatch_bios)(void *context, struct bio_list *bios);
92
93         /* Callback function to wakeup callers worker thread. */
94         void (*wakeup_workers)(void *context);
95
96         /* Callback function to wakeup callers recovery waiters. */
97         void (*wakeup_all_recovery_waiters)(void *context);
98 };
99
100 struct dm_region {
101         struct dm_region_hash *rh;      /* FIXME: can we get rid of this ? */
102         region_t key;
103         int state;
104
105         struct list_head hash_list;
106         struct list_head list;
107
108         atomic_t pending;
109         struct bio_list delayed_bios;
110 };
111
112 /*
113  * Conversion fns
114  */
115 static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector)
116 {
117         return sector >> rh->region_shift;
118 }
119
120 sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region)
121 {
122         return region << rh->region_shift;
123 }
124 EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
125
126 region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
127 {
128         return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin);
129 }
130 EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
131
132 void *dm_rh_region_context(struct dm_region *reg)
133 {
134         return reg->rh->context;
135 }
136 EXPORT_SYMBOL_GPL(dm_rh_region_context);
137
138 region_t dm_rh_get_region_key(struct dm_region *reg)
139 {
140         return reg->key;
141 }
142 EXPORT_SYMBOL_GPL(dm_rh_get_region_key);
143
144 sector_t dm_rh_get_region_size(struct dm_region_hash *rh)
145 {
146         return rh->region_size;
147 }
148 EXPORT_SYMBOL_GPL(dm_rh_get_region_size);
149
150 /*
151  * FIXME: shall we pass in a structure instead of all these args to
152  * dm_region_hash_create()????
153  */
154 #define RH_HASH_MULT 2654435387U
155 #define RH_HASH_SHIFT 12
156
157 #define MIN_REGIONS 64
158 struct dm_region_hash *dm_region_hash_create(
159                 void *context, void (*dispatch_bios)(void *context,
160                                                      struct bio_list *bios),
161                 void (*wakeup_workers)(void *context),
162                 void (*wakeup_all_recovery_waiters)(void *context),
163                 sector_t target_begin, unsigned max_recovery,
164                 struct dm_dirty_log *log, uint32_t region_size,
165                 region_t nr_regions)
166 {
167         struct dm_region_hash *rh;
168         unsigned nr_buckets, max_buckets;
169         size_t i;
170
171         /*
172          * Calculate a suitable number of buckets for our hash
173          * table.
174          */
175         max_buckets = nr_regions >> 6;
176         for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
177                 ;
178         nr_buckets >>= 1;
179
180         rh = kmalloc(sizeof(*rh), GFP_KERNEL);
181         if (!rh) {
182                 DMERR("unable to allocate region hash memory");
183                 return ERR_PTR(-ENOMEM);
184         }
185
186         rh->context = context;
187         rh->dispatch_bios = dispatch_bios;
188         rh->wakeup_workers = wakeup_workers;
189         rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters;
190         rh->target_begin = target_begin;
191         rh->max_recovery = max_recovery;
192         rh->log = log;
193         rh->region_size = region_size;
194         rh->region_shift = ffs(region_size) - 1;
195         rwlock_init(&rh->hash_lock);
196         rh->mask = nr_buckets - 1;
197         rh->nr_buckets = nr_buckets;
198
199         rh->shift = RH_HASH_SHIFT;
200         rh->prime = RH_HASH_MULT;
201
202         rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
203         if (!rh->buckets) {
204                 DMERR("unable to allocate region hash bucket memory");
205                 kfree(rh);
206                 return ERR_PTR(-ENOMEM);
207         }
208
209         for (i = 0; i < nr_buckets; i++)
210                 INIT_LIST_HEAD(rh->buckets + i);
211
212         spin_lock_init(&rh->region_lock);
213         sema_init(&rh->recovery_count, 0);
214         atomic_set(&rh->recovery_in_flight, 0);
215         INIT_LIST_HEAD(&rh->clean_regions);
216         INIT_LIST_HEAD(&rh->quiesced_regions);
217         INIT_LIST_HEAD(&rh->recovered_regions);
218         INIT_LIST_HEAD(&rh->failed_recovered_regions);
219         rh->barrier_failure = 0;
220
221         rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
222                                                       sizeof(struct dm_region));
223         if (!rh->region_pool) {
224                 vfree(rh->buckets);
225                 kfree(rh);
226                 rh = ERR_PTR(-ENOMEM);
227         }
228
229         return rh;
230 }
231 EXPORT_SYMBOL_GPL(dm_region_hash_create);
232
233 void dm_region_hash_destroy(struct dm_region_hash *rh)
234 {
235         unsigned h;
236         struct dm_region *reg, *nreg;
237
238         BUG_ON(!list_empty(&rh->quiesced_regions));
239         for (h = 0; h < rh->nr_buckets; h++) {
240                 list_for_each_entry_safe(reg, nreg, rh->buckets + h,
241                                          hash_list) {
242                         BUG_ON(atomic_read(&reg->pending));
243                         mempool_free(reg, rh->region_pool);
244                 }
245         }
246
247         if (rh->log)
248                 dm_dirty_log_destroy(rh->log);
249
250         if (rh->region_pool)
251                 mempool_destroy(rh->region_pool);
252
253         vfree(rh->buckets);
254         kfree(rh);
255 }
256 EXPORT_SYMBOL_GPL(dm_region_hash_destroy);
257
258 struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
259 {
260         return rh->log;
261 }
262 EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
263
264 static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
265 {
266         return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
267 }
268
269 static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
270 {
271         struct dm_region *reg;
272         struct list_head *bucket = rh->buckets + rh_hash(rh, region);
273
274         list_for_each_entry(reg, bucket, hash_list)
275                 if (reg->key == region)
276                         return reg;
277
278         return NULL;
279 }
280
281 static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg)
282 {
283         list_add(&reg->hash_list, rh->buckets + rh_hash(rh, reg->key));
284 }
285
286 static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region)
287 {
288         struct dm_region *reg, *nreg;
289
290         nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
291         if (unlikely(!nreg))
292                 nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL);
293
294         nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
295                       DM_RH_CLEAN : DM_RH_NOSYNC;
296         nreg->rh = rh;
297         nreg->key = region;
298         INIT_LIST_HEAD(&nreg->list);
299         atomic_set(&nreg->pending, 0);
300         bio_list_init(&nreg->delayed_bios);
301
302         write_lock_irq(&rh->hash_lock);
303         reg = __rh_lookup(rh, region);
304         if (reg)
305                 /* We lost the race. */
306                 mempool_free(nreg, rh->region_pool);
307         else {
308                 __rh_insert(rh, nreg);
309                 if (nreg->state == DM_RH_CLEAN) {
310                         spin_lock(&rh->region_lock);
311                         list_add(&nreg->list, &rh->clean_regions);
312                         spin_unlock(&rh->region_lock);
313                 }
314
315                 reg = nreg;
316         }
317         write_unlock_irq(&rh->hash_lock);
318
319         return reg;
320 }
321
322 static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region)
323 {
324         struct dm_region *reg;
325
326         reg = __rh_lookup(rh, region);
327         if (!reg) {
328                 read_unlock(&rh->hash_lock);
329                 reg = __rh_alloc(rh, region);
330                 read_lock(&rh->hash_lock);
331         }
332
333         return reg;
334 }
335
336 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block)
337 {
338         int r;
339         struct dm_region *reg;
340
341         read_lock(&rh->hash_lock);
342         reg = __rh_lookup(rh, region);
343         read_unlock(&rh->hash_lock);
344
345         if (reg)
346                 return reg->state;
347
348         /*
349          * The region wasn't in the hash, so we fall back to the
350          * dirty log.
351          */
352         r = rh->log->type->in_sync(rh->log, region, may_block);
353
354         /*
355          * Any error from the dirty log (eg. -EWOULDBLOCK) gets
356          * taken as a DM_RH_NOSYNC
357          */
358         return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC;
359 }
360 EXPORT_SYMBOL_GPL(dm_rh_get_state);
361
362 static void complete_resync_work(struct dm_region *reg, int success)
363 {
364         struct dm_region_hash *rh = reg->rh;
365
366         rh->log->type->set_region_sync(rh->log, reg->key, success);
367
368         /*
369          * Dispatch the bios before we call 'wake_up_all'.
370          * This is important because if we are suspending,
371          * we want to know that recovery is complete and
372          * the work queue is flushed.  If we wake_up_all
373          * before we dispatch_bios (queue bios and call wake()),
374          * then we risk suspending before the work queue
375          * has been properly flushed.
376          */
377         rh->dispatch_bios(rh->context, &reg->delayed_bios);
378         if (atomic_dec_and_test(&rh->recovery_in_flight))
379                 rh->wakeup_all_recovery_waiters(rh->context);
380         up(&rh->recovery_count);
381 }
382
383 /* dm_rh_mark_nosync
384  * @ms
385  * @bio
386  * @done
387  * @error
388  *
389  * The bio was written on some mirror(s) but failed on other mirror(s).
390  * We can successfully endio the bio but should avoid the region being
391  * marked clean by setting the state DM_RH_NOSYNC.
392  *
393  * This function is _not_ safe in interrupt context!
394  */
395 void dm_rh_mark_nosync(struct dm_region_hash *rh,
396                        struct bio *bio, unsigned done, int error)
397 {
398         unsigned long flags;
399         struct dm_dirty_log *log = rh->log;
400         struct dm_region *reg;
401         region_t region = dm_rh_bio_to_region(rh, bio);
402         int recovering = 0;
403
404         if (bio_empty_barrier(bio)) {
405                 rh->barrier_failure = 1;
406                 return;
407         }
408
409         /* We must inform the log that the sync count has changed. */
410         log->type->set_region_sync(log, region, 0);
411
412         read_lock(&rh->hash_lock);
413         reg = __rh_find(rh, region);
414         read_unlock(&rh->hash_lock);
415
416         /* region hash entry should exist because write was in-flight */
417         BUG_ON(!reg);
418         BUG_ON(!list_empty(&reg->list));
419
420         spin_lock_irqsave(&rh->region_lock, flags);
421         /*
422          * Possible cases:
423          *   1) DM_RH_DIRTY
424          *   2) DM_RH_NOSYNC: was dirty, other preceeding writes failed
425          *   3) DM_RH_RECOVERING: flushing pending writes
426          * Either case, the region should have not been connected to list.
427          */
428         recovering = (reg->state == DM_RH_RECOVERING);
429         reg->state = DM_RH_NOSYNC;
430         BUG_ON(!list_empty(&reg->list));
431         spin_unlock_irqrestore(&rh->region_lock, flags);
432
433         bio_endio(bio, error);
434         if (recovering)
435                 complete_resync_work(reg, 0);
436 }
437 EXPORT_SYMBOL_GPL(dm_rh_mark_nosync);
438
439 void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled)
440 {
441         struct dm_region *reg, *next;
442
443         LIST_HEAD(clean);
444         LIST_HEAD(recovered);
445         LIST_HEAD(failed_recovered);
446
447         /*
448          * Quickly grab the lists.
449          */
450         write_lock_irq(&rh->hash_lock);
451         spin_lock(&rh->region_lock);
452         if (!list_empty(&rh->clean_regions)) {
453                 list_splice_init(&rh->clean_regions, &clean);
454
455                 list_for_each_entry(reg, &clean, list)
456                         list_del(&reg->hash_list);
457         }
458
459         if (!list_empty(&rh->recovered_regions)) {
460                 list_splice_init(&rh->recovered_regions, &recovered);
461
462                 list_for_each_entry(reg, &recovered, list)
463                         list_del(&reg->hash_list);
464         }
465
466         if (!list_empty(&rh->failed_recovered_regions)) {
467                 list_splice_init(&rh->failed_recovered_regions,
468                                  &failed_recovered);
469
470                 list_for_each_entry(reg, &failed_recovered, list)
471                         list_del(&reg->hash_list);
472         }
473
474         spin_unlock(&rh->region_lock);
475         write_unlock_irq(&rh->hash_lock);
476
477         /*
478          * All the regions on the recovered and clean lists have
479          * now been pulled out of the system, so no need to do
480          * any more locking.
481          */
482         list_for_each_entry_safe(reg, next, &recovered, list) {
483                 rh->log->type->clear_region(rh->log, reg->key);
484                 complete_resync_work(reg, 1);
485                 mempool_free(reg, rh->region_pool);
486         }
487
488         list_for_each_entry_safe(reg, next, &failed_recovered, list) {
489                 complete_resync_work(reg, errors_handled ? 0 : 1);
490                 mempool_free(reg, rh->region_pool);
491         }
492
493         list_for_each_entry_safe(reg, next, &clean, list) {
494                 rh->log->type->clear_region(rh->log, reg->key);
495                 mempool_free(reg, rh->region_pool);
496         }
497
498         rh->log->type->flush(rh->log);
499 }
500 EXPORT_SYMBOL_GPL(dm_rh_update_states);
501
502 static void rh_inc(struct dm_region_hash *rh, region_t region)
503 {
504         struct dm_region *reg;
505
506         read_lock(&rh->hash_lock);
507         reg = __rh_find(rh, region);
508
509         spin_lock_irq(&rh->region_lock);
510         atomic_inc(&reg->pending);
511
512         if (reg->state == DM_RH_CLEAN) {
513                 reg->state = DM_RH_DIRTY;
514                 list_del_init(&reg->list);      /* take off the clean list */
515                 spin_unlock_irq(&rh->region_lock);
516
517                 rh->log->type->mark_region(rh->log, reg->key);
518         } else
519                 spin_unlock_irq(&rh->region_lock);
520
521
522         read_unlock(&rh->hash_lock);
523 }
524
525 void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
526 {
527         struct bio *bio;
528
529         for (bio = bios->head; bio; bio = bio->bi_next) {
530                 if (bio_empty_barrier(bio))
531                         continue;
532                 rh_inc(rh, dm_rh_bio_to_region(rh, bio));
533         }
534 }
535 EXPORT_SYMBOL_GPL(dm_rh_inc_pending);
536
537 void dm_rh_dec(struct dm_region_hash *rh, region_t region)
538 {
539         unsigned long flags;
540         struct dm_region *reg;
541         int should_wake = 0;
542
543         read_lock(&rh->hash_lock);
544         reg = __rh_lookup(rh, region);
545         read_unlock(&rh->hash_lock);
546
547         spin_lock_irqsave(&rh->region_lock, flags);
548         if (atomic_dec_and_test(&reg->pending)) {
549                 /*
550                  * There is no pending I/O for this region.
551                  * We can move the region to corresponding list for next action.
552                  * At this point, the region is not yet connected to any list.
553                  *
554                  * If the state is DM_RH_NOSYNC, the region should be kept off
555                  * from clean list.
556                  * The hash entry for DM_RH_NOSYNC will remain in memory
557                  * until the region is recovered or the map is reloaded.
558                  */
559
560                 /* do nothing for DM_RH_NOSYNC */
561                 if (unlikely(rh->barrier_failure)) {
562                         /*
563                          * If a write barrier failed some time ago, we
564                          * don't know whether or not this write made it
565                          * to the disk, so we must resync the device.
566                          */
567                         reg->state = DM_RH_NOSYNC;
568                 } else if (reg->state == DM_RH_RECOVERING) {
569                         list_add_tail(&reg->list, &rh->quiesced_regions);
570                 } else if (reg->state == DM_RH_DIRTY) {
571                         reg->state = DM_RH_CLEAN;
572                         list_add(&reg->list, &rh->clean_regions);
573                 }
574                 should_wake = 1;
575         }
576         spin_unlock_irqrestore(&rh->region_lock, flags);
577
578         if (should_wake)
579                 rh->wakeup_workers(rh->context);
580 }
581 EXPORT_SYMBOL_GPL(dm_rh_dec);
582
583 /*
584  * Starts quiescing a region in preparation for recovery.
585  */
586 static int __rh_recovery_prepare(struct dm_region_hash *rh)
587 {
588         int r;
589         region_t region;
590         struct dm_region *reg;
591
592         /*
593          * Ask the dirty log what's next.
594          */
595         r = rh->log->type->get_resync_work(rh->log, &region);
596         if (r <= 0)
597                 return r;
598
599         /*
600          * Get this region, and start it quiescing by setting the
601          * recovering flag.
602          */
603         read_lock(&rh->hash_lock);
604         reg = __rh_find(rh, region);
605         read_unlock(&rh->hash_lock);
606
607         spin_lock_irq(&rh->region_lock);
608         reg->state = DM_RH_RECOVERING;
609
610         /* Already quiesced ? */
611         if (atomic_read(&reg->pending))
612                 list_del_init(&reg->list);
613         else
614                 list_move(&reg->list, &rh->quiesced_regions);
615
616         spin_unlock_irq(&rh->region_lock);
617
618         return 1;
619 }
620
621 void dm_rh_recovery_prepare(struct dm_region_hash *rh)
622 {
623         /* Extra reference to avoid race with dm_rh_stop_recovery */
624         atomic_inc(&rh->recovery_in_flight);
625
626         while (!down_trylock(&rh->recovery_count)) {
627                 atomic_inc(&rh->recovery_in_flight);
628                 if (__rh_recovery_prepare(rh) <= 0) {
629                         atomic_dec(&rh->recovery_in_flight);
630                         up(&rh->recovery_count);
631                         break;
632                 }
633         }
634
635         /* Drop the extra reference */
636         if (atomic_dec_and_test(&rh->recovery_in_flight))
637                 rh->wakeup_all_recovery_waiters(rh->context);
638 }
639 EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare);
640
641 /*
642  * Returns any quiesced regions.
643  */
644 struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh)
645 {
646         struct dm_region *reg = NULL;
647
648         spin_lock_irq(&rh->region_lock);
649         if (!list_empty(&rh->quiesced_regions)) {
650                 reg = list_entry(rh->quiesced_regions.next,
651                                  struct dm_region, list);
652                 list_del_init(&reg->list);  /* remove from the quiesced list */
653         }
654         spin_unlock_irq(&rh->region_lock);
655
656         return reg;
657 }
658 EXPORT_SYMBOL_GPL(dm_rh_recovery_start);
659
660 void dm_rh_recovery_end(struct dm_region *reg, int success)
661 {
662         struct dm_region_hash *rh = reg->rh;
663
664         spin_lock_irq(&rh->region_lock);
665         if (success)
666                 list_add(&reg->list, &reg->rh->recovered_regions);
667         else {
668                 reg->state = DM_RH_NOSYNC;
669                 list_add(&reg->list, &reg->rh->failed_recovered_regions);
670         }
671         spin_unlock_irq(&rh->region_lock);
672
673         rh->wakeup_workers(rh->context);
674 }
675 EXPORT_SYMBOL_GPL(dm_rh_recovery_end);
676
677 /* Return recovery in flight count. */
678 int dm_rh_recovery_in_flight(struct dm_region_hash *rh)
679 {
680         return atomic_read(&rh->recovery_in_flight);
681 }
682 EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight);
683
684 int dm_rh_flush(struct dm_region_hash *rh)
685 {
686         return rh->log->type->flush(rh->log);
687 }
688 EXPORT_SYMBOL_GPL(dm_rh_flush);
689
690 void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio)
691 {
692         struct dm_region *reg;
693
694         read_lock(&rh->hash_lock);
695         reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio));
696         bio_list_add(&reg->delayed_bios, bio);
697         read_unlock(&rh->hash_lock);
698 }
699 EXPORT_SYMBOL_GPL(dm_rh_delay);
700
701 void dm_rh_stop_recovery(struct dm_region_hash *rh)
702 {
703         int i;
704
705         /* wait for any recovering regions */
706         for (i = 0; i < rh->max_recovery; i++)
707                 down(&rh->recovery_count);
708 }
709 EXPORT_SYMBOL_GPL(dm_rh_stop_recovery);
710
711 void dm_rh_start_recovery(struct dm_region_hash *rh)
712 {
713         int i;
714
715         for (i = 0; i < rh->max_recovery; i++)
716                 up(&rh->recovery_count);
717
718         rh->wakeup_workers(rh->context);
719 }
720 EXPORT_SYMBOL_GPL(dm_rh_start_recovery);
721
722 MODULE_DESCRIPTION(DM_NAME " region hash");
723 MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>");
724 MODULE_LICENSE("GPL");