drbd: Do not Oops when C_STANDALONE when uuid gets generated
[safe/jmp/linux-2.6] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/smp_lock.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60         struct drbd_work w;
61         union drbd_state os;
62         union drbd_state ns;
63         enum chg_state_flags flags;
64         struct completion *done;
65 };
66
67 int drbdd_init(struct drbd_thread *);
68 int drbd_worker(struct drbd_thread *);
69 int drbd_asender(struct drbd_thread *);
70
71 int drbd_init(void);
72 static int drbd_open(struct block_device *bdev, fmode_t mode);
73 static int drbd_release(struct gendisk *gd, fmode_t mode);
74 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
75 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
76                            union drbd_state ns, enum chg_state_flags flags);
77 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
78 static void md_sync_timer_fn(unsigned long data);
79 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
80
81 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
82               "Lars Ellenberg <lars@linbit.com>");
83 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
84 MODULE_VERSION(REL_VERSION);
85 MODULE_LICENSE("GPL");
86 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
87 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
88
89 #include <linux/moduleparam.h>
90 /* allow_open_on_secondary */
91 MODULE_PARM_DESC(allow_oos, "DONT USE!");
92 /* thanks to these macros, if compiled into the kernel (not-module),
93  * this becomes the boot parameter drbd.minor_count */
94 module_param(minor_count, uint, 0444);
95 module_param(disable_sendpage, bool, 0644);
96 module_param(allow_oos, bool, 0);
97 module_param(cn_idx, uint, 0444);
98 module_param(proc_details, int, 0644);
99
100 #ifdef CONFIG_DRBD_FAULT_INJECTION
101 int enable_faults;
102 int fault_rate;
103 static int fault_count;
104 int fault_devs;
105 /* bitmap of enabled faults */
106 module_param(enable_faults, int, 0664);
107 /* fault rate % value - applies to all enabled faults */
108 module_param(fault_rate, int, 0664);
109 /* count of faults inserted */
110 module_param(fault_count, int, 0664);
111 /* bitmap of devices to insert faults on */
112 module_param(fault_devs, int, 0644);
113 #endif
114
115 /* module parameter, defined */
116 unsigned int minor_count = 32;
117 int disable_sendpage;
118 int allow_oos;
119 unsigned int cn_idx = CN_IDX_DRBD;
120 int proc_details;       /* Detail level in proc drbd*/
121
122 /* Module parameter for setting the user mode helper program
123  * to run. Default is /sbin/drbdadm */
124 char usermode_helper[80] = "/sbin/drbdadm";
125
126 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
127
128 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
129  * as member "struct gendisk *vdisk;"
130  */
131 struct drbd_conf **minor_table;
132
133 struct kmem_cache *drbd_request_cache;
134 struct kmem_cache *drbd_ee_cache;       /* epoch entries */
135 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
136 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
137 mempool_t *drbd_request_mempool;
138 mempool_t *drbd_ee_mempool;
139
140 /* I do not use a standard mempool, because:
141    1) I want to hand out the pre-allocated objects first.
142    2) I want to be able to interrupt sleeping allocation with a signal.
143    Note: This is a single linked list, the next pointer is the private
144          member of struct page.
145  */
146 struct page *drbd_pp_pool;
147 spinlock_t   drbd_pp_lock;
148 int          drbd_pp_vacant;
149 wait_queue_head_t drbd_pp_wait;
150
151 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
152
153 static const struct block_device_operations drbd_ops = {
154         .owner =   THIS_MODULE,
155         .open =    drbd_open,
156         .release = drbd_release,
157 };
158
159 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
160
161 #ifdef __CHECKER__
162 /* When checking with sparse, and this is an inline function, sparse will
163    give tons of false positives. When this is a real functions sparse works.
164  */
165 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
166 {
167         int io_allowed;
168
169         atomic_inc(&mdev->local_cnt);
170         io_allowed = (mdev->state.disk >= mins);
171         if (!io_allowed) {
172                 if (atomic_dec_and_test(&mdev->local_cnt))
173                         wake_up(&mdev->misc_wait);
174         }
175         return io_allowed;
176 }
177
178 #endif
179
180 /**
181  * DOC: The transfer log
182  *
183  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
184  * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
185  * of the list. There is always at least one &struct drbd_tl_epoch object.
186  *
187  * Each &struct drbd_tl_epoch has a circular double linked list of requests
188  * attached.
189  */
190 static int tl_init(struct drbd_conf *mdev)
191 {
192         struct drbd_tl_epoch *b;
193
194         /* during device minor initialization, we may well use GFP_KERNEL */
195         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
196         if (!b)
197                 return 0;
198         INIT_LIST_HEAD(&b->requests);
199         INIT_LIST_HEAD(&b->w.list);
200         b->next = NULL;
201         b->br_number = 4711;
202         b->n_req = 0;
203         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
204
205         mdev->oldest_tle = b;
206         mdev->newest_tle = b;
207         INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
208
209         mdev->tl_hash = NULL;
210         mdev->tl_hash_s = 0;
211
212         return 1;
213 }
214
215 static void tl_cleanup(struct drbd_conf *mdev)
216 {
217         D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
218         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
219         kfree(mdev->oldest_tle);
220         mdev->oldest_tle = NULL;
221         kfree(mdev->unused_spare_tle);
222         mdev->unused_spare_tle = NULL;
223         kfree(mdev->tl_hash);
224         mdev->tl_hash = NULL;
225         mdev->tl_hash_s = 0;
226 }
227
228 /**
229  * _tl_add_barrier() - Adds a barrier to the transfer log
230  * @mdev:       DRBD device.
231  * @new:        Barrier to be added before the current head of the TL.
232  *
233  * The caller must hold the req_lock.
234  */
235 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
236 {
237         struct drbd_tl_epoch *newest_before;
238
239         INIT_LIST_HEAD(&new->requests);
240         INIT_LIST_HEAD(&new->w.list);
241         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
242         new->next = NULL;
243         new->n_req = 0;
244
245         newest_before = mdev->newest_tle;
246         /* never send a barrier number == 0, because that is special-cased
247          * when using TCQ for our write ordering code */
248         new->br_number = (newest_before->br_number+1) ?: 1;
249         if (mdev->newest_tle != new) {
250                 mdev->newest_tle->next = new;
251                 mdev->newest_tle = new;
252         }
253 }
254
255 /**
256  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
257  * @mdev:       DRBD device.
258  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
259  * @set_size:   Expected number of requests before that barrier.
260  *
261  * In case the passed barrier_nr or set_size does not match the oldest
262  * &struct drbd_tl_epoch objects this function will cause a termination
263  * of the connection.
264  */
265 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
266                        unsigned int set_size)
267 {
268         struct drbd_tl_epoch *b, *nob; /* next old barrier */
269         struct list_head *le, *tle;
270         struct drbd_request *r;
271
272         spin_lock_irq(&mdev->req_lock);
273
274         b = mdev->oldest_tle;
275
276         /* first some paranoia code */
277         if (b == NULL) {
278                 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
279                         barrier_nr);
280                 goto bail;
281         }
282         if (b->br_number != barrier_nr) {
283                 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
284                         barrier_nr, b->br_number);
285                 goto bail;
286         }
287         if (b->n_req != set_size) {
288                 dev_err(DEV, "BAD! BarrierAck #%u received with n_req=%u, expected n_req=%u!\n",
289                         barrier_nr, set_size, b->n_req);
290                 goto bail;
291         }
292
293         /* Clean up list of requests processed during current epoch */
294         list_for_each_safe(le, tle, &b->requests) {
295                 r = list_entry(le, struct drbd_request, tl_requests);
296                 _req_mod(r, barrier_acked);
297         }
298         /* There could be requests on the list waiting for completion
299            of the write to the local disk. To avoid corruptions of
300            slab's data structures we have to remove the lists head.
301
302            Also there could have been a barrier ack out of sequence, overtaking
303            the write acks - which would be a bug and violating write ordering.
304            To not deadlock in case we lose connection while such requests are
305            still pending, we need some way to find them for the
306            _req_mode(connection_lost_while_pending).
307
308            These have been list_move'd to the out_of_sequence_requests list in
309            _req_mod(, barrier_acked) above.
310            */
311         list_del_init(&b->requests);
312
313         nob = b->next;
314         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
315                 _tl_add_barrier(mdev, b);
316                 if (nob)
317                         mdev->oldest_tle = nob;
318                 /* if nob == NULL b was the only barrier, and becomes the new
319                    barrier. Therefore mdev->oldest_tle points already to b */
320         } else {
321                 D_ASSERT(nob != NULL);
322                 mdev->oldest_tle = nob;
323                 kfree(b);
324         }
325
326         spin_unlock_irq(&mdev->req_lock);
327         dec_ap_pending(mdev);
328
329         return;
330
331 bail:
332         spin_unlock_irq(&mdev->req_lock);
333         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
334 }
335
336
337 /**
338  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
339  * @mdev:       DRBD device.
340  *
341  * This is called after the connection to the peer was lost. The storage covered
342  * by the requests on the transfer gets marked as our of sync. Called from the
343  * receiver thread and the worker thread.
344  */
345 void tl_clear(struct drbd_conf *mdev)
346 {
347         struct drbd_tl_epoch *b, *tmp;
348         struct list_head *le, *tle;
349         struct drbd_request *r;
350         int new_initial_bnr = net_random();
351
352         spin_lock_irq(&mdev->req_lock);
353
354         b = mdev->oldest_tle;
355         while (b) {
356                 list_for_each_safe(le, tle, &b->requests) {
357                         r = list_entry(le, struct drbd_request, tl_requests);
358                         /* It would be nice to complete outside of spinlock.
359                          * But this is easier for now. */
360                         _req_mod(r, connection_lost_while_pending);
361                 }
362                 tmp = b->next;
363
364                 /* there could still be requests on that ring list,
365                  * in case local io is still pending */
366                 list_del(&b->requests);
367
368                 /* dec_ap_pending corresponding to queue_barrier.
369                  * the newest barrier may not have been queued yet,
370                  * in which case w.cb is still NULL. */
371                 if (b->w.cb != NULL)
372                         dec_ap_pending(mdev);
373
374                 if (b == mdev->newest_tle) {
375                         /* recycle, but reinit! */
376                         D_ASSERT(tmp == NULL);
377                         INIT_LIST_HEAD(&b->requests);
378                         INIT_LIST_HEAD(&b->w.list);
379                         b->w.cb = NULL;
380                         b->br_number = new_initial_bnr;
381                         b->n_req = 0;
382
383                         mdev->oldest_tle = b;
384                         break;
385                 }
386                 kfree(b);
387                 b = tmp;
388         }
389
390         /* we expect this list to be empty. */
391         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
392
393         /* but just in case, clean it up anyways! */
394         list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
395                 r = list_entry(le, struct drbd_request, tl_requests);
396                 /* It would be nice to complete outside of spinlock.
397                  * But this is easier for now. */
398                 _req_mod(r, connection_lost_while_pending);
399         }
400
401         /* ensure bit indicating barrier is required is clear */
402         clear_bit(CREATE_BARRIER, &mdev->flags);
403
404         spin_unlock_irq(&mdev->req_lock);
405 }
406
407 /**
408  * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
409  * @mdev:       DRBD device.
410  * @os:         old (current) state.
411  * @ns:         new (wanted) state.
412  */
413 static int cl_wide_st_chg(struct drbd_conf *mdev,
414                           union drbd_state os, union drbd_state ns)
415 {
416         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
417                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
418                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
419                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
420                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
421                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
422                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
423 }
424
425 int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
426                       union drbd_state mask, union drbd_state val)
427 {
428         unsigned long flags;
429         union drbd_state os, ns;
430         int rv;
431
432         spin_lock_irqsave(&mdev->req_lock, flags);
433         os = mdev->state;
434         ns.i = (os.i & ~mask.i) | val.i;
435         rv = _drbd_set_state(mdev, ns, f, NULL);
436         ns = mdev->state;
437         spin_unlock_irqrestore(&mdev->req_lock, flags);
438
439         return rv;
440 }
441
442 /**
443  * drbd_force_state() - Impose a change which happens outside our control on our state
444  * @mdev:       DRBD device.
445  * @mask:       mask of state bits to change.
446  * @val:        value of new state bits.
447  */
448 void drbd_force_state(struct drbd_conf *mdev,
449         union drbd_state mask, union drbd_state val)
450 {
451         drbd_change_state(mdev, CS_HARD, mask, val);
452 }
453
454 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
455 static int is_valid_state_transition(struct drbd_conf *,
456                                      union drbd_state, union drbd_state);
457 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
458                                        union drbd_state ns, int *warn_sync_abort);
459 int drbd_send_state_req(struct drbd_conf *,
460                         union drbd_state, union drbd_state);
461
462 static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
463                                     union drbd_state mask, union drbd_state val)
464 {
465         union drbd_state os, ns;
466         unsigned long flags;
467         int rv;
468
469         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
470                 return SS_CW_SUCCESS;
471
472         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
473                 return SS_CW_FAILED_BY_PEER;
474
475         rv = 0;
476         spin_lock_irqsave(&mdev->req_lock, flags);
477         os = mdev->state;
478         ns.i = (os.i & ~mask.i) | val.i;
479         ns = sanitize_state(mdev, os, ns, NULL);
480
481         if (!cl_wide_st_chg(mdev, os, ns))
482                 rv = SS_CW_NO_NEED;
483         if (!rv) {
484                 rv = is_valid_state(mdev, ns);
485                 if (rv == SS_SUCCESS) {
486                         rv = is_valid_state_transition(mdev, ns, os);
487                         if (rv == SS_SUCCESS)
488                                 rv = 0; /* cont waiting, otherwise fail. */
489                 }
490         }
491         spin_unlock_irqrestore(&mdev->req_lock, flags);
492
493         return rv;
494 }
495
496 /**
497  * drbd_req_state() - Perform an eventually cluster wide state change
498  * @mdev:       DRBD device.
499  * @mask:       mask of state bits to change.
500  * @val:        value of new state bits.
501  * @f:          flags
502  *
503  * Should not be called directly, use drbd_request_state() or
504  * _drbd_request_state().
505  */
506 static int drbd_req_state(struct drbd_conf *mdev,
507                           union drbd_state mask, union drbd_state val,
508                           enum chg_state_flags f)
509 {
510         struct completion done;
511         unsigned long flags;
512         union drbd_state os, ns;
513         int rv;
514
515         init_completion(&done);
516
517         if (f & CS_SERIALIZE)
518                 mutex_lock(&mdev->state_mutex);
519
520         spin_lock_irqsave(&mdev->req_lock, flags);
521         os = mdev->state;
522         ns.i = (os.i & ~mask.i) | val.i;
523         ns = sanitize_state(mdev, os, ns, NULL);
524
525         if (cl_wide_st_chg(mdev, os, ns)) {
526                 rv = is_valid_state(mdev, ns);
527                 if (rv == SS_SUCCESS)
528                         rv = is_valid_state_transition(mdev, ns, os);
529                 spin_unlock_irqrestore(&mdev->req_lock, flags);
530
531                 if (rv < SS_SUCCESS) {
532                         if (f & CS_VERBOSE)
533                                 print_st_err(mdev, os, ns, rv);
534                         goto abort;
535                 }
536
537                 drbd_state_lock(mdev);
538                 if (!drbd_send_state_req(mdev, mask, val)) {
539                         drbd_state_unlock(mdev);
540                         rv = SS_CW_FAILED_BY_PEER;
541                         if (f & CS_VERBOSE)
542                                 print_st_err(mdev, os, ns, rv);
543                         goto abort;
544                 }
545
546                 wait_event(mdev->state_wait,
547                         (rv = _req_st_cond(mdev, mask, val)));
548
549                 if (rv < SS_SUCCESS) {
550                         drbd_state_unlock(mdev);
551                         if (f & CS_VERBOSE)
552                                 print_st_err(mdev, os, ns, rv);
553                         goto abort;
554                 }
555                 spin_lock_irqsave(&mdev->req_lock, flags);
556                 os = mdev->state;
557                 ns.i = (os.i & ~mask.i) | val.i;
558                 rv = _drbd_set_state(mdev, ns, f, &done);
559                 drbd_state_unlock(mdev);
560         } else {
561                 rv = _drbd_set_state(mdev, ns, f, &done);
562         }
563
564         spin_unlock_irqrestore(&mdev->req_lock, flags);
565
566         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
567                 D_ASSERT(current != mdev->worker.task);
568                 wait_for_completion(&done);
569         }
570
571 abort:
572         if (f & CS_SERIALIZE)
573                 mutex_unlock(&mdev->state_mutex);
574
575         return rv;
576 }
577
578 /**
579  * _drbd_request_state() - Request a state change (with flags)
580  * @mdev:       DRBD device.
581  * @mask:       mask of state bits to change.
582  * @val:        value of new state bits.
583  * @f:          flags
584  *
585  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
586  * flag, or when logging of failed state change requests is not desired.
587  */
588 int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
589                         union drbd_state val,   enum chg_state_flags f)
590 {
591         int rv;
592
593         wait_event(mdev->state_wait,
594                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
595
596         return rv;
597 }
598
599 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
600 {
601         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
602             name,
603             drbd_conn_str(ns.conn),
604             drbd_role_str(ns.role),
605             drbd_role_str(ns.peer),
606             drbd_disk_str(ns.disk),
607             drbd_disk_str(ns.pdsk),
608             ns.susp ? 's' : 'r',
609             ns.aftr_isp ? 'a' : '-',
610             ns.peer_isp ? 'p' : '-',
611             ns.user_isp ? 'u' : '-'
612             );
613 }
614
615 void print_st_err(struct drbd_conf *mdev,
616         union drbd_state os, union drbd_state ns, int err)
617 {
618         if (err == SS_IN_TRANSIENT_STATE)
619                 return;
620         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
621         print_st(mdev, " state", os);
622         print_st(mdev, "wanted", ns);
623 }
624
625
626 #define drbd_peer_str drbd_role_str
627 #define drbd_pdsk_str drbd_disk_str
628
629 #define drbd_susp_str(A)     ((A) ? "1" : "0")
630 #define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
631 #define drbd_peer_isp_str(A) ((A) ? "1" : "0")
632 #define drbd_user_isp_str(A) ((A) ? "1" : "0")
633
634 #define PSC(A) \
635         ({ if (ns.A != os.A) { \
636                 pbp += sprintf(pbp, #A "( %s -> %s ) ", \
637                               drbd_##A##_str(os.A), \
638                               drbd_##A##_str(ns.A)); \
639         } })
640
641 /**
642  * is_valid_state() - Returns an SS_ error code if ns is not valid
643  * @mdev:       DRBD device.
644  * @ns:         State to consider.
645  */
646 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
647 {
648         /* See drbd_state_sw_errors in drbd_strings.c */
649
650         enum drbd_fencing_p fp;
651         int rv = SS_SUCCESS;
652
653         fp = FP_DONT_CARE;
654         if (get_ldev(mdev)) {
655                 fp = mdev->ldev->dc.fencing;
656                 put_ldev(mdev);
657         }
658
659         if (get_net_conf(mdev)) {
660                 if (!mdev->net_conf->two_primaries &&
661                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
662                         rv = SS_TWO_PRIMARIES;
663                 put_net_conf(mdev);
664         }
665
666         if (rv <= 0)
667                 /* already found a reason to abort */;
668         else if (ns.role == R_SECONDARY && mdev->open_cnt)
669                 rv = SS_DEVICE_IN_USE;
670
671         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
672                 rv = SS_NO_UP_TO_DATE_DISK;
673
674         else if (fp >= FP_RESOURCE &&
675                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
676                 rv = SS_PRIMARY_NOP;
677
678         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
679                 rv = SS_NO_UP_TO_DATE_DISK;
680
681         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
682                 rv = SS_NO_LOCAL_DISK;
683
684         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
685                 rv = SS_NO_REMOTE_DISK;
686
687         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
688                 rv = SS_NO_UP_TO_DATE_DISK;
689
690         else if ((ns.conn == C_CONNECTED ||
691                   ns.conn == C_WF_BITMAP_S ||
692                   ns.conn == C_SYNC_SOURCE ||
693                   ns.conn == C_PAUSED_SYNC_S) &&
694                   ns.disk == D_OUTDATED)
695                 rv = SS_CONNECTED_OUTDATES;
696
697         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
698                  (mdev->sync_conf.verify_alg[0] == 0))
699                 rv = SS_NO_VERIFY_ALG;
700
701         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
702                   mdev->agreed_pro_version < 88)
703                 rv = SS_NOT_SUPPORTED;
704
705         return rv;
706 }
707
708 /**
709  * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
710  * @mdev:       DRBD device.
711  * @ns:         new state.
712  * @os:         old state.
713  */
714 static int is_valid_state_transition(struct drbd_conf *mdev,
715                                      union drbd_state ns, union drbd_state os)
716 {
717         int rv = SS_SUCCESS;
718
719         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
720             os.conn > C_CONNECTED)
721                 rv = SS_RESYNC_RUNNING;
722
723         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
724                 rv = SS_ALREADY_STANDALONE;
725
726         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
727                 rv = SS_IS_DISKLESS;
728
729         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
730                 rv = SS_NO_NET_CONFIG;
731
732         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
733                 rv = SS_LOWER_THAN_OUTDATED;
734
735         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
736                 rv = SS_IN_TRANSIENT_STATE;
737
738         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
739                 rv = SS_IN_TRANSIENT_STATE;
740
741         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
742                 rv = SS_NEED_CONNECTION;
743
744         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
745             ns.conn != os.conn && os.conn > C_CONNECTED)
746                 rv = SS_RESYNC_RUNNING;
747
748         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
749             os.conn < C_CONNECTED)
750                 rv = SS_NEED_CONNECTION;
751
752         return rv;
753 }
754
755 /**
756  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
757  * @mdev:       DRBD device.
758  * @os:         old state.
759  * @ns:         new state.
760  * @warn_sync_abort:
761  *
762  * When we loose connection, we have to set the state of the peers disk (pdsk)
763  * to D_UNKNOWN. This rule and many more along those lines are in this function.
764  */
765 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
766                                        union drbd_state ns, int *warn_sync_abort)
767 {
768         enum drbd_fencing_p fp;
769
770         fp = FP_DONT_CARE;
771         if (get_ldev(mdev)) {
772                 fp = mdev->ldev->dc.fencing;
773                 put_ldev(mdev);
774         }
775
776         /* Disallow Network errors to configure a device's network part */
777         if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
778             os.conn <= C_DISCONNECTING)
779                 ns.conn = os.conn;
780
781         /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */
782         if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
783             ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING)
784                 ns.conn = os.conn;
785
786         /* After C_DISCONNECTING only C_STANDALONE may follow */
787         if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
788                 ns.conn = os.conn;
789
790         if (ns.conn < C_CONNECTED) {
791                 ns.peer_isp = 0;
792                 ns.peer = R_UNKNOWN;
793                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
794                         ns.pdsk = D_UNKNOWN;
795         }
796
797         /* Clear the aftr_isp when becoming unconfigured */
798         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
799                 ns.aftr_isp = 0;
800
801         if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS)
802                 ns.pdsk = D_UNKNOWN;
803
804         /* Abort resync if a disk fails/detaches */
805         if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
806             (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
807                 if (warn_sync_abort)
808                         *warn_sync_abort = 1;
809                 ns.conn = C_CONNECTED;
810         }
811
812         if (ns.conn >= C_CONNECTED &&
813             ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
814              (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
815                 switch (ns.conn) {
816                 case C_WF_BITMAP_T:
817                 case C_PAUSED_SYNC_T:
818                         ns.disk = D_OUTDATED;
819                         break;
820                 case C_CONNECTED:
821                 case C_WF_BITMAP_S:
822                 case C_SYNC_SOURCE:
823                 case C_PAUSED_SYNC_S:
824                         ns.disk = D_UP_TO_DATE;
825                         break;
826                 case C_SYNC_TARGET:
827                         ns.disk = D_INCONSISTENT;
828                         dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
829                         break;
830                 }
831                 if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
832                         dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
833         }
834
835         if (ns.conn >= C_CONNECTED &&
836             (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
837                 switch (ns.conn) {
838                 case C_CONNECTED:
839                 case C_WF_BITMAP_T:
840                 case C_PAUSED_SYNC_T:
841                 case C_SYNC_TARGET:
842                         ns.pdsk = D_UP_TO_DATE;
843                         break;
844                 case C_WF_BITMAP_S:
845                 case C_PAUSED_SYNC_S:
846                         /* remap any consistent state to D_OUTDATED,
847                          * but disallow "upgrade" of not even consistent states.
848                          */
849                         ns.pdsk =
850                                 (D_DISKLESS < os.pdsk && os.pdsk < D_OUTDATED)
851                                 ? os.pdsk : D_OUTDATED;
852                         break;
853                 case C_SYNC_SOURCE:
854                         ns.pdsk = D_INCONSISTENT;
855                         dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
856                         break;
857                 }
858                 if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
859                         dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
860         }
861
862         /* Connection breaks down before we finished "Negotiating" */
863         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
864             get_ldev_if_state(mdev, D_NEGOTIATING)) {
865                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
866                         ns.disk = mdev->new_state_tmp.disk;
867                         ns.pdsk = mdev->new_state_tmp.pdsk;
868                 } else {
869                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
870                         ns.disk = D_DISKLESS;
871                         ns.pdsk = D_UNKNOWN;
872                 }
873                 put_ldev(mdev);
874         }
875
876         if (fp == FP_STONITH &&
877             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
878             !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
879                 ns.susp = 1;
880
881         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
882                 if (ns.conn == C_SYNC_SOURCE)
883                         ns.conn = C_PAUSED_SYNC_S;
884                 if (ns.conn == C_SYNC_TARGET)
885                         ns.conn = C_PAUSED_SYNC_T;
886         } else {
887                 if (ns.conn == C_PAUSED_SYNC_S)
888                         ns.conn = C_SYNC_SOURCE;
889                 if (ns.conn == C_PAUSED_SYNC_T)
890                         ns.conn = C_SYNC_TARGET;
891         }
892
893         return ns;
894 }
895
896 /* helper for __drbd_set_state */
897 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
898 {
899         if (cs == C_VERIFY_T) {
900                 /* starting online verify from an arbitrary position
901                  * does not fit well into the existing protocol.
902                  * on C_VERIFY_T, we initialize ov_left and friends
903                  * implicitly in receive_DataRequest once the
904                  * first P_OV_REQUEST is received */
905                 mdev->ov_start_sector = ~(sector_t)0;
906         } else {
907                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
908                 if (bit >= mdev->rs_total)
909                         mdev->ov_start_sector =
910                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
911                 mdev->ov_position = mdev->ov_start_sector;
912         }
913 }
914
915 /**
916  * __drbd_set_state() - Set a new DRBD state
917  * @mdev:       DRBD device.
918  * @ns:         new state.
919  * @flags:      Flags
920  * @done:       Optional completion, that will get completed after the after_state_ch() finished
921  *
922  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
923  */
924 int __drbd_set_state(struct drbd_conf *mdev,
925                     union drbd_state ns, enum chg_state_flags flags,
926                     struct completion *done)
927 {
928         union drbd_state os;
929         int rv = SS_SUCCESS;
930         int warn_sync_abort = 0;
931         struct after_state_chg_work *ascw;
932
933         os = mdev->state;
934
935         ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
936
937         if (ns.i == os.i)
938                 return SS_NOTHING_TO_DO;
939
940         if (!(flags & CS_HARD)) {
941                 /*  pre-state-change checks ; only look at ns  */
942                 /* See drbd_state_sw_errors in drbd_strings.c */
943
944                 rv = is_valid_state(mdev, ns);
945                 if (rv < SS_SUCCESS) {
946                         /* If the old state was illegal as well, then let
947                            this happen...*/
948
949                         if (is_valid_state(mdev, os) == rv) {
950                                 dev_err(DEV, "Considering state change from bad state. "
951                                     "Error would be: '%s'\n",
952                                     drbd_set_st_err_str(rv));
953                                 print_st(mdev, "old", os);
954                                 print_st(mdev, "new", ns);
955                                 rv = is_valid_state_transition(mdev, ns, os);
956                         }
957                 } else
958                         rv = is_valid_state_transition(mdev, ns, os);
959         }
960
961         if (rv < SS_SUCCESS) {
962                 if (flags & CS_VERBOSE)
963                         print_st_err(mdev, os, ns, rv);
964                 return rv;
965         }
966
967         if (warn_sync_abort)
968                 dev_warn(DEV, "Resync aborted.\n");
969
970         {
971                 char *pbp, pb[300];
972                 pbp = pb;
973                 *pbp = 0;
974                 PSC(role);
975                 PSC(peer);
976                 PSC(conn);
977                 PSC(disk);
978                 PSC(pdsk);
979                 PSC(susp);
980                 PSC(aftr_isp);
981                 PSC(peer_isp);
982                 PSC(user_isp);
983                 dev_info(DEV, "%s\n", pb);
984         }
985
986         /* solve the race between becoming unconfigured,
987          * worker doing the cleanup, and
988          * admin reconfiguring us:
989          * on (re)configure, first set CONFIG_PENDING,
990          * then wait for a potentially exiting worker,
991          * start the worker, and schedule one no_op.
992          * then proceed with configuration.
993          */
994         if (ns.disk == D_DISKLESS &&
995             ns.conn == C_STANDALONE &&
996             ns.role == R_SECONDARY &&
997             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
998                 set_bit(DEVICE_DYING, &mdev->flags);
999
1000         mdev->state.i = ns.i;
1001         wake_up(&mdev->misc_wait);
1002         wake_up(&mdev->state_wait);
1003
1004         /*   post-state-change actions   */
1005         if (os.conn >= C_SYNC_SOURCE   && ns.conn <= C_CONNECTED) {
1006                 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1007                 mod_timer(&mdev->resync_timer, jiffies);
1008         }
1009
1010         /* aborted verify run. log the last position */
1011         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1012             ns.conn < C_CONNECTED) {
1013                 mdev->ov_start_sector =
1014                         BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left);
1015                 dev_info(DEV, "Online Verify reached sector %llu\n",
1016                         (unsigned long long)mdev->ov_start_sector);
1017         }
1018
1019         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1020             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1021                 dev_info(DEV, "Syncer continues.\n");
1022                 mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
1023                 if (ns.conn == C_SYNC_TARGET) {
1024                         if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))
1025                                 mod_timer(&mdev->resync_timer, jiffies);
1026                         /* This if (!test_bit) is only needed for the case
1027                            that a device that has ceased to used its timer,
1028                            i.e. it is already in drbd_resync_finished() gets
1029                            paused and resumed. */
1030                 }
1031         }
1032
1033         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1034             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1035                 dev_info(DEV, "Resync suspended\n");
1036                 mdev->rs_mark_time = jiffies;
1037                 if (ns.conn == C_PAUSED_SYNC_T)
1038                         set_bit(STOP_SYNC_TIMER, &mdev->flags);
1039         }
1040
1041         if (os.conn == C_CONNECTED &&
1042             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1043                 mdev->ov_position = 0;
1044                 mdev->rs_total =
1045                 mdev->rs_mark_left = drbd_bm_bits(mdev);
1046                 if (mdev->agreed_pro_version >= 90)
1047                         set_ov_position(mdev, ns.conn);
1048                 else
1049                         mdev->ov_start_sector = 0;
1050                 mdev->ov_left = mdev->rs_total
1051                               - BM_SECT_TO_BIT(mdev->ov_position);
1052                 mdev->rs_start     =
1053                 mdev->rs_mark_time = jiffies;
1054                 mdev->ov_last_oos_size = 0;
1055                 mdev->ov_last_oos_start = 0;
1056
1057                 if (ns.conn == C_VERIFY_S) {
1058                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1059                                         (unsigned long long)mdev->ov_position);
1060                         mod_timer(&mdev->resync_timer, jiffies);
1061                 }
1062         }
1063
1064         if (get_ldev(mdev)) {
1065                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1066                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1067                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1068
1069                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1070                         mdf |= MDF_CRASHED_PRIMARY;
1071                 if (mdev->state.role == R_PRIMARY ||
1072                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1073                         mdf |= MDF_PRIMARY_IND;
1074                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1075                         mdf |= MDF_CONNECTED_IND;
1076                 if (mdev->state.disk > D_INCONSISTENT)
1077                         mdf |= MDF_CONSISTENT;
1078                 if (mdev->state.disk > D_OUTDATED)
1079                         mdf |= MDF_WAS_UP_TO_DATE;
1080                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1081                         mdf |= MDF_PEER_OUT_DATED;
1082                 if (mdf != mdev->ldev->md.flags) {
1083                         mdev->ldev->md.flags = mdf;
1084                         drbd_md_mark_dirty(mdev);
1085                 }
1086                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1087                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1088                 put_ldev(mdev);
1089         }
1090
1091         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1092         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1093             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1094                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1095
1096         /* Receiver should clean up itself */
1097         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1098                 drbd_thread_stop_nowait(&mdev->receiver);
1099
1100         /* Now the receiver finished cleaning up itself, it should die */
1101         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1102                 drbd_thread_stop_nowait(&mdev->receiver);
1103
1104         /* Upon network failure, we need to restart the receiver. */
1105         if (os.conn > C_TEAR_DOWN &&
1106             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1107                 drbd_thread_restart_nowait(&mdev->receiver);
1108
1109         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1110         if (ascw) {
1111                 ascw->os = os;
1112                 ascw->ns = ns;
1113                 ascw->flags = flags;
1114                 ascw->w.cb = w_after_state_ch;
1115                 ascw->done = done;
1116                 drbd_queue_work(&mdev->data.work, &ascw->w);
1117         } else {
1118                 dev_warn(DEV, "Could not kmalloc an ascw\n");
1119         }
1120
1121         return rv;
1122 }
1123
1124 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1125 {
1126         struct after_state_chg_work *ascw =
1127                 container_of(w, struct after_state_chg_work, w);
1128         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1129         if (ascw->flags & CS_WAIT_COMPLETE) {
1130                 D_ASSERT(ascw->done != NULL);
1131                 complete(ascw->done);
1132         }
1133         kfree(ascw);
1134
1135         return 1;
1136 }
1137
1138 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1139 {
1140         if (rv) {
1141                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1142                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1143                 return;
1144         }
1145
1146         switch (mdev->state.conn) {
1147         case C_STARTING_SYNC_T:
1148                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1149                 break;
1150         case C_STARTING_SYNC_S:
1151                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1152                 break;
1153         }
1154 }
1155
1156 /**
1157  * after_state_ch() - Perform after state change actions that may sleep
1158  * @mdev:       DRBD device.
1159  * @os:         old state.
1160  * @ns:         new state.
1161  * @flags:      Flags
1162  */
1163 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1164                            union drbd_state ns, enum chg_state_flags flags)
1165 {
1166         enum drbd_fencing_p fp;
1167
1168         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1169                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1170                 if (mdev->p_uuid)
1171                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1172         }
1173
1174         fp = FP_DONT_CARE;
1175         if (get_ldev(mdev)) {
1176                 fp = mdev->ldev->dc.fencing;
1177                 put_ldev(mdev);
1178         }
1179
1180         /* Inform userspace about the change... */
1181         drbd_bcast_state(mdev, ns);
1182
1183         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1184             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1185                 drbd_khelper(mdev, "pri-on-incon-degr");
1186
1187         /* Here we have the actions that are performed after a
1188            state change. This function might sleep */
1189
1190         if (fp == FP_STONITH && ns.susp) {
1191                 /* case1: The outdate peer handler is successful:
1192                  * case2: The connection was established again: */
1193                 if ((os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) ||
1194                     (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) {
1195                         tl_clear(mdev);
1196                         spin_lock_irq(&mdev->req_lock);
1197                         _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1198                         spin_unlock_irq(&mdev->req_lock);
1199                 }
1200         }
1201         /* Do not change the order of the if above and the two below... */
1202         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1203                 drbd_send_uuids(mdev);
1204                 drbd_send_state(mdev);
1205         }
1206         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1207                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1208
1209         /* Lost contact to peer's copy of the data */
1210         if ((os.pdsk >= D_INCONSISTENT &&
1211              os.pdsk != D_UNKNOWN &&
1212              os.pdsk != D_OUTDATED)
1213         &&  (ns.pdsk < D_INCONSISTENT ||
1214              ns.pdsk == D_UNKNOWN ||
1215              ns.pdsk == D_OUTDATED)) {
1216                 kfree(mdev->p_uuid);
1217                 mdev->p_uuid = NULL;
1218                 if (get_ldev(mdev)) {
1219                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1220                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE)
1221                                 atomic_set(&mdev->new_c_uuid, 2);
1222                         put_ldev(mdev);
1223                 }
1224         }
1225
1226         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1227                 /* Diskless peer becomes primary or got connected do diskless, primary peer. */
1228                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
1229                         atomic_set(&mdev->new_c_uuid, 2);
1230
1231                 /* D_DISKLESS Peer becomes secondary */
1232                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1233                         drbd_al_to_on_disk_bm(mdev);
1234                 put_ldev(mdev);
1235         }
1236
1237         /* Last part of the attaching process ... */
1238         if (ns.conn >= C_CONNECTED &&
1239             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1240                 kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
1241                 mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
1242                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1243                 drbd_send_uuids(mdev);
1244                 drbd_send_state(mdev);
1245         }
1246
1247         /* We want to pause/continue resync, tell peer. */
1248         if (ns.conn >= C_CONNECTED &&
1249              ((os.aftr_isp != ns.aftr_isp) ||
1250               (os.user_isp != ns.user_isp)))
1251                 drbd_send_state(mdev);
1252
1253         /* In case one of the isp bits got set, suspend other devices. */
1254         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1255             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1256                 suspend_other_sg(mdev);
1257
1258         /* Make sure the peer gets informed about eventual state
1259            changes (ISP bits) while we were in WFReportParams. */
1260         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1261                 drbd_send_state(mdev);
1262
1263         /* We are in the progress to start a full sync... */
1264         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1265             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1266                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1267
1268         /* We are invalidating our self... */
1269         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1270             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1271                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1272
1273         if (os.disk > D_FAILED && ns.disk == D_FAILED) {
1274                 enum drbd_io_error_p eh;
1275
1276                 eh = EP_PASS_ON;
1277                 if (get_ldev_if_state(mdev, D_FAILED)) {
1278                         eh = mdev->ldev->dc.on_io_error;
1279                         put_ldev(mdev);
1280                 }
1281
1282                 drbd_rs_cancel_all(mdev);
1283                 /* since get_ldev() only works as long as disk>=D_INCONSISTENT,
1284                    and it is D_DISKLESS here, local_cnt can only go down, it can
1285                    not increase... It will reach zero */
1286                 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1287                 mdev->rs_total = 0;
1288                 mdev->rs_failed = 0;
1289                 atomic_set(&mdev->rs_pending_cnt, 0);
1290
1291                 spin_lock_irq(&mdev->req_lock);
1292                 _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL);
1293                 spin_unlock_irq(&mdev->req_lock);
1294
1295                 if (eh == EP_CALL_HELPER)
1296                         drbd_khelper(mdev, "local-io-error");
1297         }
1298
1299         if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1300
1301                 if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ {
1302                         if (drbd_send_state(mdev))
1303                                 dev_warn(DEV, "Notified peer that my disk is broken.\n");
1304                         else
1305                                 dev_err(DEV, "Sending state in drbd_io_error() failed\n");
1306                 }
1307
1308                 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1309                 lc_destroy(mdev->resync);
1310                 mdev->resync = NULL;
1311                 lc_destroy(mdev->act_log);
1312                 mdev->act_log = NULL;
1313                 __no_warn(local,
1314                         drbd_free_bc(mdev->ldev);
1315                         mdev->ldev = NULL;);
1316
1317                 if (mdev->md_io_tmpp)
1318                         __free_page(mdev->md_io_tmpp);
1319         }
1320
1321         /* Disks got bigger while they were detached */
1322         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1323             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1324                 if (ns.conn == C_CONNECTED)
1325                         resync_after_online_grow(mdev);
1326         }
1327
1328         /* A resync finished or aborted, wake paused devices... */
1329         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1330             (os.peer_isp && !ns.peer_isp) ||
1331             (os.user_isp && !ns.user_isp))
1332                 resume_next_sg(mdev);
1333
1334         /* Upon network connection, we need to start the receiver */
1335         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1336                 drbd_thread_start(&mdev->receiver);
1337
1338         /* Terminate worker thread if we are unconfigured - it will be
1339            restarted as needed... */
1340         if (ns.disk == D_DISKLESS &&
1341             ns.conn == C_STANDALONE &&
1342             ns.role == R_SECONDARY) {
1343                 if (os.aftr_isp != ns.aftr_isp)
1344                         resume_next_sg(mdev);
1345                 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1346                 if (test_bit(DEVICE_DYING, &mdev->flags))
1347                         drbd_thread_stop_nowait(&mdev->worker);
1348         }
1349
1350         drbd_md_sync(mdev);
1351 }
1352
1353 static int w_new_current_uuid(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1354 {
1355         if (get_ldev(mdev)) {
1356                 drbd_uuid_new_current(mdev);
1357                 if (get_net_conf(mdev)) {
1358                         drbd_send_uuids(mdev);
1359                         put_net_conf(mdev);
1360                 }
1361                 drbd_md_sync(mdev);
1362                 put_ldev(mdev);
1363         }
1364         atomic_dec(&mdev->new_c_uuid);
1365         wake_up(&mdev->misc_wait);
1366
1367         return 1;
1368 }
1369
1370 static int drbd_thread_setup(void *arg)
1371 {
1372         struct drbd_thread *thi = (struct drbd_thread *) arg;
1373         struct drbd_conf *mdev = thi->mdev;
1374         unsigned long flags;
1375         int retval;
1376
1377 restart:
1378         retval = thi->function(thi);
1379
1380         spin_lock_irqsave(&thi->t_lock, flags);
1381
1382         /* if the receiver has been "Exiting", the last thing it did
1383          * was set the conn state to "StandAlone",
1384          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1385          * and receiver thread will be "started".
1386          * drbd_thread_start needs to set "Restarting" in that case.
1387          * t_state check and assignment needs to be within the same spinlock,
1388          * so either thread_start sees Exiting, and can remap to Restarting,
1389          * or thread_start see None, and can proceed as normal.
1390          */
1391
1392         if (thi->t_state == Restarting) {
1393                 dev_info(DEV, "Restarting %s\n", current->comm);
1394                 thi->t_state = Running;
1395                 spin_unlock_irqrestore(&thi->t_lock, flags);
1396                 goto restart;
1397         }
1398
1399         thi->task = NULL;
1400         thi->t_state = None;
1401         smp_mb();
1402         complete(&thi->stop);
1403         spin_unlock_irqrestore(&thi->t_lock, flags);
1404
1405         dev_info(DEV, "Terminating %s\n", current->comm);
1406
1407         /* Release mod reference taken when thread was started */
1408         module_put(THIS_MODULE);
1409         return retval;
1410 }
1411
1412 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1413                       int (*func) (struct drbd_thread *))
1414 {
1415         spin_lock_init(&thi->t_lock);
1416         thi->task    = NULL;
1417         thi->t_state = None;
1418         thi->function = func;
1419         thi->mdev = mdev;
1420 }
1421
1422 int drbd_thread_start(struct drbd_thread *thi)
1423 {
1424         struct drbd_conf *mdev = thi->mdev;
1425         struct task_struct *nt;
1426         unsigned long flags;
1427
1428         const char *me =
1429                 thi == &mdev->receiver ? "receiver" :
1430                 thi == &mdev->asender  ? "asender"  :
1431                 thi == &mdev->worker   ? "worker"   : "NONSENSE";
1432
1433         /* is used from state engine doing drbd_thread_stop_nowait,
1434          * while holding the req lock irqsave */
1435         spin_lock_irqsave(&thi->t_lock, flags);
1436
1437         switch (thi->t_state) {
1438         case None:
1439                 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1440                                 me, current->comm, current->pid);
1441
1442                 /* Get ref on module for thread - this is released when thread exits */
1443                 if (!try_module_get(THIS_MODULE)) {
1444                         dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1445                         spin_unlock_irqrestore(&thi->t_lock, flags);
1446                         return FALSE;
1447                 }
1448
1449                 init_completion(&thi->stop);
1450                 D_ASSERT(thi->task == NULL);
1451                 thi->reset_cpu_mask = 1;
1452                 thi->t_state = Running;
1453                 spin_unlock_irqrestore(&thi->t_lock, flags);
1454                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1455
1456                 nt = kthread_create(drbd_thread_setup, (void *) thi,
1457                                     "drbd%d_%s", mdev_to_minor(mdev), me);
1458
1459                 if (IS_ERR(nt)) {
1460                         dev_err(DEV, "Couldn't start thread\n");
1461
1462                         module_put(THIS_MODULE);
1463                         return FALSE;
1464                 }
1465                 spin_lock_irqsave(&thi->t_lock, flags);
1466                 thi->task = nt;
1467                 thi->t_state = Running;
1468                 spin_unlock_irqrestore(&thi->t_lock, flags);
1469                 wake_up_process(nt);
1470                 break;
1471         case Exiting:
1472                 thi->t_state = Restarting;
1473                 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1474                                 me, current->comm, current->pid);
1475                 /* fall through */
1476         case Running:
1477         case Restarting:
1478         default:
1479                 spin_unlock_irqrestore(&thi->t_lock, flags);
1480                 break;
1481         }
1482
1483         return TRUE;
1484 }
1485
1486
1487 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1488 {
1489         unsigned long flags;
1490
1491         enum drbd_thread_state ns = restart ? Restarting : Exiting;
1492
1493         /* may be called from state engine, holding the req lock irqsave */
1494         spin_lock_irqsave(&thi->t_lock, flags);
1495
1496         if (thi->t_state == None) {
1497                 spin_unlock_irqrestore(&thi->t_lock, flags);
1498                 if (restart)
1499                         drbd_thread_start(thi);
1500                 return;
1501         }
1502
1503         if (thi->t_state != ns) {
1504                 if (thi->task == NULL) {
1505                         spin_unlock_irqrestore(&thi->t_lock, flags);
1506                         return;
1507                 }
1508
1509                 thi->t_state = ns;
1510                 smp_mb();
1511                 init_completion(&thi->stop);
1512                 if (thi->task != current)
1513                         force_sig(DRBD_SIGKILL, thi->task);
1514
1515         }
1516
1517         spin_unlock_irqrestore(&thi->t_lock, flags);
1518
1519         if (wait)
1520                 wait_for_completion(&thi->stop);
1521 }
1522
1523 #ifdef CONFIG_SMP
1524 /**
1525  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1526  * @mdev:       DRBD device.
1527  *
1528  * Forces all threads of a device onto the same CPU. This is beneficial for
1529  * DRBD's performance. May be overwritten by user's configuration.
1530  */
1531 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1532 {
1533         int ord, cpu;
1534
1535         /* user override. */
1536         if (cpumask_weight(mdev->cpu_mask))
1537                 return;
1538
1539         ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1540         for_each_online_cpu(cpu) {
1541                 if (ord-- == 0) {
1542                         cpumask_set_cpu(cpu, mdev->cpu_mask);
1543                         return;
1544                 }
1545         }
1546         /* should not be reached */
1547         cpumask_setall(mdev->cpu_mask);
1548 }
1549
1550 /**
1551  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1552  * @mdev:       DRBD device.
1553  *
1554  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1555  * prematurely.
1556  */
1557 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1558 {
1559         struct task_struct *p = current;
1560         struct drbd_thread *thi =
1561                 p == mdev->asender.task  ? &mdev->asender  :
1562                 p == mdev->receiver.task ? &mdev->receiver :
1563                 p == mdev->worker.task   ? &mdev->worker   :
1564                 NULL;
1565         ERR_IF(thi == NULL)
1566                 return;
1567         if (!thi->reset_cpu_mask)
1568                 return;
1569         thi->reset_cpu_mask = 0;
1570         set_cpus_allowed_ptr(p, mdev->cpu_mask);
1571 }
1572 #endif
1573
1574 /* the appropriate socket mutex must be held already */
1575 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1576                           enum drbd_packets cmd, struct p_header *h,
1577                           size_t size, unsigned msg_flags)
1578 {
1579         int sent, ok;
1580
1581         ERR_IF(!h) return FALSE;
1582         ERR_IF(!size) return FALSE;
1583
1584         h->magic   = BE_DRBD_MAGIC;
1585         h->command = cpu_to_be16(cmd);
1586         h->length  = cpu_to_be16(size-sizeof(struct p_header));
1587
1588         sent = drbd_send(mdev, sock, h, size, msg_flags);
1589
1590         ok = (sent == size);
1591         if (!ok)
1592                 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1593                     cmdname(cmd), (int)size, sent);
1594         return ok;
1595 }
1596
1597 /* don't pass the socket. we may only look at it
1598  * when we hold the appropriate socket mutex.
1599  */
1600 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1601                   enum drbd_packets cmd, struct p_header *h, size_t size)
1602 {
1603         int ok = 0;
1604         struct socket *sock;
1605
1606         if (use_data_socket) {
1607                 mutex_lock(&mdev->data.mutex);
1608                 sock = mdev->data.socket;
1609         } else {
1610                 mutex_lock(&mdev->meta.mutex);
1611                 sock = mdev->meta.socket;
1612         }
1613
1614         /* drbd_disconnect() could have called drbd_free_sock()
1615          * while we were waiting in down()... */
1616         if (likely(sock != NULL))
1617                 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1618
1619         if (use_data_socket)
1620                 mutex_unlock(&mdev->data.mutex);
1621         else
1622                 mutex_unlock(&mdev->meta.mutex);
1623         return ok;
1624 }
1625
1626 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1627                    size_t size)
1628 {
1629         struct p_header h;
1630         int ok;
1631
1632         h.magic   = BE_DRBD_MAGIC;
1633         h.command = cpu_to_be16(cmd);
1634         h.length  = cpu_to_be16(size);
1635
1636         if (!drbd_get_data_sock(mdev))
1637                 return 0;
1638
1639         ok = (sizeof(h) ==
1640                 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1641         ok = ok && (size ==
1642                 drbd_send(mdev, mdev->data.socket, data, size, 0));
1643
1644         drbd_put_data_sock(mdev);
1645
1646         return ok;
1647 }
1648
1649 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1650 {
1651         struct p_rs_param_89 *p;
1652         struct socket *sock;
1653         int size, rv;
1654         const int apv = mdev->agreed_pro_version;
1655
1656         size = apv <= 87 ? sizeof(struct p_rs_param)
1657                 : apv == 88 ? sizeof(struct p_rs_param)
1658                         + strlen(mdev->sync_conf.verify_alg) + 1
1659                 : /* 89 */    sizeof(struct p_rs_param_89);
1660
1661         /* used from admin command context and receiver/worker context.
1662          * to avoid kmalloc, grab the socket right here,
1663          * then use the pre-allocated sbuf there */
1664         mutex_lock(&mdev->data.mutex);
1665         sock = mdev->data.socket;
1666
1667         if (likely(sock != NULL)) {
1668                 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1669
1670                 p = &mdev->data.sbuf.rs_param_89;
1671
1672                 /* initialize verify_alg and csums_alg */
1673                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1674
1675                 p->rate = cpu_to_be32(sc->rate);
1676
1677                 if (apv >= 88)
1678                         strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1679                 if (apv >= 89)
1680                         strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1681
1682                 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1683         } else
1684                 rv = 0; /* not ok */
1685
1686         mutex_unlock(&mdev->data.mutex);
1687
1688         return rv;
1689 }
1690
1691 int drbd_send_protocol(struct drbd_conf *mdev)
1692 {
1693         struct p_protocol *p;
1694         int size, cf, rv;
1695
1696         size = sizeof(struct p_protocol);
1697
1698         if (mdev->agreed_pro_version >= 87)
1699                 size += strlen(mdev->net_conf->integrity_alg) + 1;
1700
1701         /* we must not recurse into our own queue,
1702          * as that is blocked during handshake */
1703         p = kmalloc(size, GFP_NOIO);
1704         if (p == NULL)
1705                 return 0;
1706
1707         p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
1708         p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
1709         p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
1710         p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
1711         p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1712
1713         cf = 0;
1714         if (mdev->net_conf->want_lose)
1715                 cf |= CF_WANT_LOSE;
1716         if (mdev->net_conf->dry_run) {
1717                 if (mdev->agreed_pro_version >= 92)
1718                         cf |= CF_DRY_RUN;
1719                 else {
1720                         dev_err(DEV, "--dry-run is not supported by peer");
1721                         kfree(p);
1722                         return 0;
1723                 }
1724         }
1725         p->conn_flags    = cpu_to_be32(cf);
1726
1727         if (mdev->agreed_pro_version >= 87)
1728                 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1729
1730         rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1731                            (struct p_header *)p, size);
1732         kfree(p);
1733         return rv;
1734 }
1735
1736 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1737 {
1738         struct p_uuids p;
1739         int i;
1740
1741         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1742                 return 1;
1743
1744         for (i = UI_CURRENT; i < UI_SIZE; i++)
1745                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1746
1747         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1748         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1749         uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1750         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1751         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
1752         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
1753
1754         put_ldev(mdev);
1755
1756         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
1757                              (struct p_header *)&p, sizeof(p));
1758 }
1759
1760 int drbd_send_uuids(struct drbd_conf *mdev)
1761 {
1762         return _drbd_send_uuids(mdev, 0);
1763 }
1764
1765 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1766 {
1767         return _drbd_send_uuids(mdev, 8);
1768 }
1769
1770
1771 int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1772 {
1773         struct p_rs_uuid p;
1774
1775         p.uuid = cpu_to_be64(val);
1776
1777         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1778                              (struct p_header *)&p, sizeof(p));
1779 }
1780
1781 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
1782 {
1783         struct p_sizes p;
1784         sector_t d_size, u_size;
1785         int q_order_type;
1786         int ok;
1787
1788         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1789                 D_ASSERT(mdev->ldev->backing_bdev);
1790                 d_size = drbd_get_max_capacity(mdev->ldev);
1791                 u_size = mdev->ldev->dc.disk_size;
1792                 q_order_type = drbd_queue_order_type(mdev);
1793                 put_ldev(mdev);
1794         } else {
1795                 d_size = 0;
1796                 u_size = 0;
1797                 q_order_type = QUEUE_ORDERED_NONE;
1798         }
1799
1800         p.d_size = cpu_to_be64(d_size);
1801         p.u_size = cpu_to_be64(u_size);
1802         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1803         p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
1804         p.queue_order_type = cpu_to_be16(q_order_type);
1805         p.dds_flags = cpu_to_be16(flags);
1806
1807         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
1808                            (struct p_header *)&p, sizeof(p));
1809         return ok;
1810 }
1811
1812 /**
1813  * drbd_send_state() - Sends the drbd state to the peer
1814  * @mdev:       DRBD device.
1815  */
1816 int drbd_send_state(struct drbd_conf *mdev)
1817 {
1818         struct socket *sock;
1819         struct p_state p;
1820         int ok = 0;
1821
1822         /* Grab state lock so we wont send state if we're in the middle
1823          * of a cluster wide state change on another thread */
1824         drbd_state_lock(mdev);
1825
1826         mutex_lock(&mdev->data.mutex);
1827
1828         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1829         sock = mdev->data.socket;
1830
1831         if (likely(sock != NULL)) {
1832                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
1833                                     (struct p_header *)&p, sizeof(p), 0);
1834         }
1835
1836         mutex_unlock(&mdev->data.mutex);
1837
1838         drbd_state_unlock(mdev);
1839         return ok;
1840 }
1841
1842 int drbd_send_state_req(struct drbd_conf *mdev,
1843         union drbd_state mask, union drbd_state val)
1844 {
1845         struct p_req_state p;
1846
1847         p.mask    = cpu_to_be32(mask.i);
1848         p.val     = cpu_to_be32(val.i);
1849
1850         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
1851                              (struct p_header *)&p, sizeof(p));
1852 }
1853
1854 int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
1855 {
1856         struct p_req_state_reply p;
1857
1858         p.retcode    = cpu_to_be32(retcode);
1859
1860         return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
1861                              (struct p_header *)&p, sizeof(p));
1862 }
1863
1864 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1865         struct p_compressed_bm *p,
1866         struct bm_xfer_ctx *c)
1867 {
1868         struct bitstream bs;
1869         unsigned long plain_bits;
1870         unsigned long tmp;
1871         unsigned long rl;
1872         unsigned len;
1873         unsigned toggle;
1874         int bits;
1875
1876         /* may we use this feature? */
1877         if ((mdev->sync_conf.use_rle == 0) ||
1878                 (mdev->agreed_pro_version < 90))
1879                         return 0;
1880
1881         if (c->bit_offset >= c->bm_bits)
1882                 return 0; /* nothing to do. */
1883
1884         /* use at most thus many bytes */
1885         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
1886         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
1887         /* plain bits covered in this code string */
1888         plain_bits = 0;
1889
1890         /* p->encoding & 0x80 stores whether the first run length is set.
1891          * bit offset is implicit.
1892          * start with toggle == 2 to be able to tell the first iteration */
1893         toggle = 2;
1894
1895         /* see how much plain bits we can stuff into one packet
1896          * using RLE and VLI. */
1897         do {
1898                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1899                                     : _drbd_bm_find_next(mdev, c->bit_offset);
1900                 if (tmp == -1UL)
1901                         tmp = c->bm_bits;
1902                 rl = tmp - c->bit_offset;
1903
1904                 if (toggle == 2) { /* first iteration */
1905                         if (rl == 0) {
1906                                 /* the first checked bit was set,
1907                                  * store start value, */
1908                                 DCBP_set_start(p, 1);
1909                                 /* but skip encoding of zero run length */
1910                                 toggle = !toggle;
1911                                 continue;
1912                         }
1913                         DCBP_set_start(p, 0);
1914                 }
1915
1916                 /* paranoia: catch zero runlength.
1917                  * can only happen if bitmap is modified while we scan it. */
1918                 if (rl == 0) {
1919                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1920                             "t:%u bo:%lu\n", toggle, c->bit_offset);
1921                         return -1;
1922                 }
1923
1924                 bits = vli_encode_bits(&bs, rl);
1925                 if (bits == -ENOBUFS) /* buffer full */
1926                         break;
1927                 if (bits <= 0) {
1928                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1929                         return 0;
1930                 }
1931
1932                 toggle = !toggle;
1933                 plain_bits += rl;
1934                 c->bit_offset = tmp;
1935         } while (c->bit_offset < c->bm_bits);
1936
1937         len = bs.cur.b - p->code + !!bs.cur.bit;
1938
1939         if (plain_bits < (len << 3)) {
1940                 /* incompressible with this method.
1941                  * we need to rewind both word and bit position. */
1942                 c->bit_offset -= plain_bits;
1943                 bm_xfer_ctx_bit_to_word_offset(c);
1944                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1945                 return 0;
1946         }
1947
1948         /* RLE + VLI was able to compress it just fine.
1949          * update c->word_offset. */
1950         bm_xfer_ctx_bit_to_word_offset(c);
1951
1952         /* store pad_bits */
1953         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1954
1955         return len;
1956 }
1957
1958 enum { OK, FAILED, DONE }
1959 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
1960         struct p_header *h, struct bm_xfer_ctx *c)
1961 {
1962         struct p_compressed_bm *p = (void*)h;
1963         unsigned long num_words;
1964         int len;
1965         int ok;
1966
1967         len = fill_bitmap_rle_bits(mdev, p, c);
1968
1969         if (len < 0)
1970                 return FAILED;
1971
1972         if (len) {
1973                 DCBP_set_code(p, RLE_VLI_Bits);
1974                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
1975                         sizeof(*p) + len, 0);
1976
1977                 c->packets[0]++;
1978                 c->bytes[0] += sizeof(*p) + len;
1979
1980                 if (c->bit_offset >= c->bm_bits)
1981                         len = 0; /* DONE */
1982         } else {
1983                 /* was not compressible.
1984                  * send a buffer full of plain text bits instead. */
1985                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
1986                 len = num_words * sizeof(long);
1987                 if (len)
1988                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
1989                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
1990                                    h, sizeof(struct p_header) + len, 0);
1991                 c->word_offset += num_words;
1992                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1993
1994                 c->packets[1]++;
1995                 c->bytes[1] += sizeof(struct p_header) + len;
1996
1997                 if (c->bit_offset > c->bm_bits)
1998                         c->bit_offset = c->bm_bits;
1999         }
2000         ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
2001
2002         if (ok == DONE)
2003                 INFO_bm_xfer_stats(mdev, "send", c);
2004         return ok;
2005 }
2006
2007 /* See the comment at receive_bitmap() */
2008 int _drbd_send_bitmap(struct drbd_conf *mdev)
2009 {
2010         struct bm_xfer_ctx c;
2011         struct p_header *p;
2012         int ret;
2013
2014         ERR_IF(!mdev->bitmap) return FALSE;
2015
2016         /* maybe we should use some per thread scratch page,
2017          * and allocate that during initial device creation? */
2018         p = (struct p_header *) __get_free_page(GFP_NOIO);
2019         if (!p) {
2020                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2021                 return FALSE;
2022         }
2023
2024         if (get_ldev(mdev)) {
2025                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2026                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2027                         drbd_bm_set_all(mdev);
2028                         if (drbd_bm_write(mdev)) {
2029                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2030                                  * but otherwise process as per normal - need to tell other
2031                                  * side that a full resync is required! */
2032                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
2033                         } else {
2034                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2035                                 drbd_md_sync(mdev);
2036                         }
2037                 }
2038                 put_ldev(mdev);
2039         }
2040
2041         c = (struct bm_xfer_ctx) {
2042                 .bm_bits = drbd_bm_bits(mdev),
2043                 .bm_words = drbd_bm_words(mdev),
2044         };
2045
2046         do {
2047                 ret = send_bitmap_rle_or_plain(mdev, p, &c);
2048         } while (ret == OK);
2049
2050         free_page((unsigned long) p);
2051         return (ret == DONE);
2052 }
2053
2054 int drbd_send_bitmap(struct drbd_conf *mdev)
2055 {
2056         int err;
2057
2058         if (!drbd_get_data_sock(mdev))
2059                 return -1;
2060         err = !_drbd_send_bitmap(mdev);
2061         drbd_put_data_sock(mdev);
2062         return err;
2063 }
2064
2065 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2066 {
2067         int ok;
2068         struct p_barrier_ack p;
2069
2070         p.barrier  = barrier_nr;
2071         p.set_size = cpu_to_be32(set_size);
2072
2073         if (mdev->state.conn < C_CONNECTED)
2074                 return FALSE;
2075         ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2076                         (struct p_header *)&p, sizeof(p));
2077         return ok;
2078 }
2079
2080 /**
2081  * _drbd_send_ack() - Sends an ack packet
2082  * @mdev:       DRBD device.
2083  * @cmd:        Packet command code.
2084  * @sector:     sector, needs to be in big endian byte order
2085  * @blksize:    size in byte, needs to be in big endian byte order
2086  * @block_id:   Id, big endian byte order
2087  */
2088 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2089                           u64 sector,
2090                           u32 blksize,
2091                           u64 block_id)
2092 {
2093         int ok;
2094         struct p_block_ack p;
2095
2096         p.sector   = sector;
2097         p.block_id = block_id;
2098         p.blksize  = blksize;
2099         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2100
2101         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2102                 return FALSE;
2103         ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2104                                 (struct p_header *)&p, sizeof(p));
2105         return ok;
2106 }
2107
2108 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2109                      struct p_data *dp)
2110 {
2111         const int header_size = sizeof(struct p_data)
2112                               - sizeof(struct p_header);
2113         int data_size  = ((struct p_header *)dp)->length - header_size;
2114
2115         return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2116                               dp->block_id);
2117 }
2118
2119 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2120                      struct p_block_req *rp)
2121 {
2122         return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2123 }
2124
2125 /**
2126  * drbd_send_ack() - Sends an ack packet
2127  * @mdev:       DRBD device.
2128  * @cmd:        Packet command code.
2129  * @e:          Epoch entry.
2130  */
2131 int drbd_send_ack(struct drbd_conf *mdev,
2132         enum drbd_packets cmd, struct drbd_epoch_entry *e)
2133 {
2134         return _drbd_send_ack(mdev, cmd,
2135                               cpu_to_be64(e->sector),
2136                               cpu_to_be32(e->size),
2137                               e->block_id);
2138 }
2139
2140 /* This function misuses the block_id field to signal if the blocks
2141  * are is sync or not. */
2142 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2143                      sector_t sector, int blksize, u64 block_id)
2144 {
2145         return _drbd_send_ack(mdev, cmd,
2146                               cpu_to_be64(sector),
2147                               cpu_to_be32(blksize),
2148                               cpu_to_be64(block_id));
2149 }
2150
2151 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2152                        sector_t sector, int size, u64 block_id)
2153 {
2154         int ok;
2155         struct p_block_req p;
2156
2157         p.sector   = cpu_to_be64(sector);
2158         p.block_id = block_id;
2159         p.blksize  = cpu_to_be32(size);
2160
2161         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2162                                 (struct p_header *)&p, sizeof(p));
2163         return ok;
2164 }
2165
2166 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2167                             sector_t sector, int size,
2168                             void *digest, int digest_size,
2169                             enum drbd_packets cmd)
2170 {
2171         int ok;
2172         struct p_block_req p;
2173
2174         p.sector   = cpu_to_be64(sector);
2175         p.block_id = BE_DRBD_MAGIC + 0xbeef;
2176         p.blksize  = cpu_to_be32(size);
2177
2178         p.head.magic   = BE_DRBD_MAGIC;
2179         p.head.command = cpu_to_be16(cmd);
2180         p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size);
2181
2182         mutex_lock(&mdev->data.mutex);
2183
2184         ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2185         ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2186
2187         mutex_unlock(&mdev->data.mutex);
2188
2189         return ok;
2190 }
2191
2192 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2193 {
2194         int ok;
2195         struct p_block_req p;
2196
2197         p.sector   = cpu_to_be64(sector);
2198         p.block_id = BE_DRBD_MAGIC + 0xbabe;
2199         p.blksize  = cpu_to_be32(size);
2200
2201         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2202                            (struct p_header *)&p, sizeof(p));
2203         return ok;
2204 }
2205
2206 static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
2207 {
2208         struct p_delay_probe dp;
2209         int offset, ok = 0;
2210         struct timeval now;
2211
2212         mutex_lock(&ds->mutex);
2213         if (likely(ds->socket)) {
2214                 do_gettimeofday(&now);
2215                 offset = now.tv_usec - mdev->dps_time.tv_usec +
2216                          (now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
2217                 dp.seq_num  = cpu_to_be32(mdev->delay_seq);
2218                 dp.offset   = cpu_to_be32(offset);
2219
2220                 ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
2221                                     (struct p_header *)&dp, sizeof(dp), 0);
2222         }
2223         mutex_unlock(&ds->mutex);
2224
2225         return ok;
2226 }
2227
2228 static int drbd_send_delay_probes(struct drbd_conf *mdev)
2229 {
2230         int ok;
2231
2232         mdev->delay_seq++;
2233         do_gettimeofday(&mdev->dps_time);
2234         ok = drbd_send_delay_probe(mdev, &mdev->meta);
2235         ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
2236
2237         mdev->dp_volume_last = mdev->send_cnt;
2238         mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
2239
2240         return ok;
2241 }
2242
2243 /* called on sndtimeo
2244  * returns FALSE if we should retry,
2245  * TRUE if we think connection is dead
2246  */
2247 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2248 {
2249         int drop_it;
2250         /* long elapsed = (long)(jiffies - mdev->last_received); */
2251
2252         drop_it =   mdev->meta.socket == sock
2253                 || !mdev->asender.task
2254                 || get_t_state(&mdev->asender) != Running
2255                 || mdev->state.conn < C_CONNECTED;
2256
2257         if (drop_it)
2258                 return TRUE;
2259
2260         drop_it = !--mdev->ko_count;
2261         if (!drop_it) {
2262                 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2263                        current->comm, current->pid, mdev->ko_count);
2264                 request_ping(mdev);
2265         }
2266
2267         return drop_it; /* && (mdev->state == R_PRIMARY) */;
2268 }
2269
2270 /* The idea of sendpage seems to be to put some kind of reference
2271  * to the page into the skb, and to hand it over to the NIC. In
2272  * this process get_page() gets called.
2273  *
2274  * As soon as the page was really sent over the network put_page()
2275  * gets called by some part of the network layer. [ NIC driver? ]
2276  *
2277  * [ get_page() / put_page() increment/decrement the count. If count
2278  *   reaches 0 the page will be freed. ]
2279  *
2280  * This works nicely with pages from FSs.
2281  * But this means that in protocol A we might signal IO completion too early!
2282  *
2283  * In order not to corrupt data during a resync we must make sure
2284  * that we do not reuse our own buffer pages (EEs) to early, therefore
2285  * we have the net_ee list.
2286  *
2287  * XFS seems to have problems, still, it submits pages with page_count == 0!
2288  * As a workaround, we disable sendpage on pages
2289  * with page_count == 0 or PageSlab.
2290  */
2291 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2292                    int offset, size_t size)
2293 {
2294         int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0);
2295         kunmap(page);
2296         if (sent == size)
2297                 mdev->send_cnt += size>>9;
2298         return sent == size;
2299 }
2300
2301 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2302                     int offset, size_t size)
2303 {
2304         mm_segment_t oldfs = get_fs();
2305         int sent, ok;
2306         int len = size;
2307
2308         /* e.g. XFS meta- & log-data is in slab pages, which have a
2309          * page_count of 0 and/or have PageSlab() set.
2310          * we cannot use send_page for those, as that does get_page();
2311          * put_page(); and would cause either a VM_BUG directly, or
2312          * __page_cache_release a page that would actually still be referenced
2313          * by someone, leading to some obscure delayed Oops somewhere else. */
2314         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2315                 return _drbd_no_send_page(mdev, page, offset, size);
2316
2317         drbd_update_congested(mdev);
2318         set_fs(KERNEL_DS);
2319         do {
2320                 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2321                                                         offset, len,
2322                                                         MSG_NOSIGNAL);
2323                 if (sent == -EAGAIN) {
2324                         if (we_should_drop_the_connection(mdev,
2325                                                           mdev->data.socket))
2326                                 break;
2327                         else
2328                                 continue;
2329                 }
2330                 if (sent <= 0) {
2331                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2332                              __func__, (int)size, len, sent);
2333                         break;
2334                 }
2335                 len    -= sent;
2336                 offset += sent;
2337         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2338         set_fs(oldfs);
2339         clear_bit(NET_CONGESTED, &mdev->flags);
2340
2341         ok = (len == 0);
2342         if (likely(ok))
2343                 mdev->send_cnt += size>>9;
2344         return ok;
2345 }
2346
2347 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2348 {
2349         struct bio_vec *bvec;
2350         int i;
2351         __bio_for_each_segment(bvec, bio, i, 0) {
2352                 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2353                                      bvec->bv_offset, bvec->bv_len))
2354                         return 0;
2355         }
2356         return 1;
2357 }
2358
2359 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2360 {
2361         struct bio_vec *bvec;
2362         int i;
2363         __bio_for_each_segment(bvec, bio, i, 0) {
2364                 if (!_drbd_send_page(mdev, bvec->bv_page,
2365                                      bvec->bv_offset, bvec->bv_len))
2366                         return 0;
2367         }
2368
2369         return 1;
2370 }
2371
2372 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2373 {
2374         struct page *page = e->pages;
2375         unsigned len = e->size;
2376         page_chain_for_each(page) {
2377                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2378                 if (!_drbd_send_page(mdev, page, 0, l))
2379                         return 0;
2380                 len -= l;
2381         }
2382         return 1;
2383 }
2384
2385 static void consider_delay_probes(struct drbd_conf *mdev)
2386 {
2387         if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
2388                 return;
2389
2390         if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
2391                 drbd_send_delay_probes(mdev);
2392 }
2393
2394 static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
2395 {
2396         if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
2397                 drbd_send_delay_probes(mdev);
2398
2399         return 1;
2400 }
2401
2402 static void delay_probe_timer_fn(unsigned long data)
2403 {
2404         struct drbd_conf *mdev = (struct drbd_conf *) data;
2405
2406         if (list_empty(&mdev->delay_probe_work.list))
2407                 drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
2408 }
2409
2410 /* Used to send write requests
2411  * R_PRIMARY -> Peer    (P_DATA)
2412  */
2413 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2414 {
2415         int ok = 1;
2416         struct p_data p;
2417         unsigned int dp_flags = 0;
2418         void *dgb;
2419         int dgs;
2420
2421         if (!drbd_get_data_sock(mdev))
2422                 return 0;
2423
2424         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2425                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2426
2427         p.head.magic   = BE_DRBD_MAGIC;
2428         p.head.command = cpu_to_be16(P_DATA);
2429         p.head.length  =
2430                 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size);
2431
2432         p.sector   = cpu_to_be64(req->sector);
2433         p.block_id = (unsigned long)req;
2434         p.seq_num  = cpu_to_be32(req->seq_num =
2435                                  atomic_add_return(1, &mdev->packet_seq));
2436         dp_flags = 0;
2437
2438         /* NOTE: no need to check if barriers supported here as we would
2439          *       not pass the test in make_request_common in that case
2440          */
2441         if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) {
2442                 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
2443                 /* dp_flags |= DP_HARDBARRIER; */
2444         }
2445         if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO))
2446                 dp_flags |= DP_RW_SYNC;
2447         /* for now handle SYNCIO and UNPLUG
2448          * as if they still were one and the same flag */
2449         if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG))
2450                 dp_flags |= DP_RW_SYNC;
2451         if (mdev->state.conn >= C_SYNC_SOURCE &&
2452             mdev->state.conn <= C_PAUSED_SYNC_T)
2453                 dp_flags |= DP_MAY_SET_IN_SYNC;
2454
2455         p.dp_flags = cpu_to_be32(dp_flags);
2456         set_bit(UNPLUG_REMOTE, &mdev->flags);
2457         ok = (sizeof(p) ==
2458                 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
2459         if (ok && dgs) {
2460                 dgb = mdev->int_dig_out;
2461                 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2462                 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
2463         }
2464         if (ok) {
2465                 if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
2466                         ok = _drbd_send_bio(mdev, req->master_bio);
2467                 else
2468                         ok = _drbd_send_zc_bio(mdev, req->master_bio);
2469         }
2470
2471         drbd_put_data_sock(mdev);
2472
2473         if (ok)
2474                 consider_delay_probes(mdev);
2475
2476         return ok;
2477 }
2478
2479 /* answer packet, used to send data back for read requests:
2480  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
2481  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
2482  */
2483 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2484                     struct drbd_epoch_entry *e)
2485 {
2486         int ok;
2487         struct p_data p;
2488         void *dgb;
2489         int dgs;
2490
2491         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2492                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2493
2494         p.head.magic   = BE_DRBD_MAGIC;
2495         p.head.command = cpu_to_be16(cmd);
2496         p.head.length  =
2497                 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size);
2498
2499         p.sector   = cpu_to_be64(e->sector);
2500         p.block_id = e->block_id;
2501         /* p.seq_num  = 0;    No sequence numbers here.. */
2502
2503         /* Only called by our kernel thread.
2504          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2505          * in response to admin command or module unload.
2506          */
2507         if (!drbd_get_data_sock(mdev))
2508                 return 0;
2509
2510         ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
2511                                         sizeof(p), MSG_MORE);
2512         if (ok && dgs) {
2513                 dgb = mdev->int_dig_out;
2514                 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2515                 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
2516         }
2517         if (ok)
2518                 ok = _drbd_send_zc_ee(mdev, e);
2519
2520         drbd_put_data_sock(mdev);
2521
2522         if (ok)
2523                 consider_delay_probes(mdev);
2524
2525         return ok;
2526 }
2527
2528 /*
2529   drbd_send distinguishes two cases:
2530
2531   Packets sent via the data socket "sock"
2532   and packets sent via the meta data socket "msock"
2533
2534                     sock                      msock
2535   -----------------+-------------------------+------------------------------
2536   timeout           conf.timeout / 2          conf.timeout / 2
2537   timeout action    send a ping via msock     Abort communication
2538                                               and close all sockets
2539 */
2540
2541 /*
2542  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2543  */
2544 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2545               void *buf, size_t size, unsigned msg_flags)
2546 {
2547         struct kvec iov;
2548         struct msghdr msg;
2549         int rv, sent = 0;
2550
2551         if (!sock)
2552                 return -1000;
2553
2554         /* THINK  if (signal_pending) return ... ? */
2555
2556         iov.iov_base = buf;
2557         iov.iov_len  = size;
2558
2559         msg.msg_name       = NULL;
2560         msg.msg_namelen    = 0;
2561         msg.msg_control    = NULL;
2562         msg.msg_controllen = 0;
2563         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
2564
2565         if (sock == mdev->data.socket) {
2566                 mdev->ko_count = mdev->net_conf->ko_count;
2567                 drbd_update_congested(mdev);
2568         }
2569         do {
2570                 /* STRANGE
2571                  * tcp_sendmsg does _not_ use its size parameter at all ?
2572                  *
2573                  * -EAGAIN on timeout, -EINTR on signal.
2574                  */
2575 /* THINK
2576  * do we need to block DRBD_SIG if sock == &meta.socket ??
2577  * otherwise wake_asender() might interrupt some send_*Ack !
2578  */
2579                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2580                 if (rv == -EAGAIN) {
2581                         if (we_should_drop_the_connection(mdev, sock))
2582                                 break;
2583                         else
2584                                 continue;
2585                 }
2586                 D_ASSERT(rv != 0);
2587                 if (rv == -EINTR) {
2588                         flush_signals(current);
2589                         rv = 0;
2590                 }
2591                 if (rv < 0)
2592                         break;
2593                 sent += rv;
2594                 iov.iov_base += rv;
2595                 iov.iov_len  -= rv;
2596         } while (sent < size);
2597
2598         if (sock == mdev->data.socket)
2599                 clear_bit(NET_CONGESTED, &mdev->flags);
2600
2601         if (rv <= 0) {
2602                 if (rv != -EAGAIN) {
2603                         dev_err(DEV, "%s_sendmsg returned %d\n",
2604                             sock == mdev->meta.socket ? "msock" : "sock",
2605                             rv);
2606                         drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2607                 } else
2608                         drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2609         }
2610
2611         return sent;
2612 }
2613
2614 static int drbd_open(struct block_device *bdev, fmode_t mode)
2615 {
2616         struct drbd_conf *mdev = bdev->bd_disk->private_data;
2617         unsigned long flags;
2618         int rv = 0;
2619
2620         spin_lock_irqsave(&mdev->req_lock, flags);
2621         /* to have a stable mdev->state.role
2622          * and no race with updating open_cnt */
2623
2624         if (mdev->state.role != R_PRIMARY) {
2625                 if (mode & FMODE_WRITE)
2626                         rv = -EROFS;
2627                 else if (!allow_oos)
2628                         rv = -EMEDIUMTYPE;
2629         }
2630
2631         if (!rv)
2632                 mdev->open_cnt++;
2633         spin_unlock_irqrestore(&mdev->req_lock, flags);
2634
2635         return rv;
2636 }
2637
2638 static int drbd_release(struct gendisk *gd, fmode_t mode)
2639 {
2640         struct drbd_conf *mdev = gd->private_data;
2641         mdev->open_cnt--;
2642         return 0;
2643 }
2644
2645 static void drbd_unplug_fn(struct request_queue *q)
2646 {
2647         struct drbd_conf *mdev = q->queuedata;
2648
2649         /* unplug FIRST */
2650         spin_lock_irq(q->queue_lock);
2651         blk_remove_plug(q);
2652         spin_unlock_irq(q->queue_lock);
2653
2654         /* only if connected */
2655         spin_lock_irq(&mdev->req_lock);
2656         if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
2657                 D_ASSERT(mdev->state.role == R_PRIMARY);
2658                 if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
2659                         /* add to the data.work queue,
2660                          * unless already queued.
2661                          * XXX this might be a good addition to drbd_queue_work
2662                          * anyways, to detect "double queuing" ... */
2663                         if (list_empty(&mdev->unplug_work.list))
2664                                 drbd_queue_work(&mdev->data.work,
2665                                                 &mdev->unplug_work);
2666                 }
2667         }
2668         spin_unlock_irq(&mdev->req_lock);
2669
2670         if (mdev->state.disk >= D_INCONSISTENT)
2671                 drbd_kick_lo(mdev);
2672 }
2673
2674 static void drbd_set_defaults(struct drbd_conf *mdev)
2675 {
2676         mdev->sync_conf.after      = DRBD_AFTER_DEF;
2677         mdev->sync_conf.rate       = DRBD_RATE_DEF;
2678         mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
2679         mdev->state = (union drbd_state) {
2680                 { .role = R_SECONDARY,
2681                   .peer = R_UNKNOWN,
2682                   .conn = C_STANDALONE,
2683                   .disk = D_DISKLESS,
2684                   .pdsk = D_UNKNOWN,
2685                   .susp = 0
2686                 } };
2687 }
2688
2689 void drbd_init_set_defaults(struct drbd_conf *mdev)
2690 {
2691         /* the memset(,0,) did most of this.
2692          * note: only assignments, no allocation in here */
2693
2694         drbd_set_defaults(mdev);
2695
2696         /* for now, we do NOT yet support it,
2697          * even though we start some framework
2698          * to eventually support barriers */
2699         set_bit(NO_BARRIER_SUPP, &mdev->flags);
2700
2701         atomic_set(&mdev->ap_bio_cnt, 0);
2702         atomic_set(&mdev->ap_pending_cnt, 0);
2703         atomic_set(&mdev->rs_pending_cnt, 0);
2704         atomic_set(&mdev->unacked_cnt, 0);
2705         atomic_set(&mdev->local_cnt, 0);
2706         atomic_set(&mdev->net_cnt, 0);
2707         atomic_set(&mdev->packet_seq, 0);
2708         atomic_set(&mdev->pp_in_use, 0);
2709         atomic_set(&mdev->new_c_uuid, 0);
2710
2711         mutex_init(&mdev->md_io_mutex);
2712         mutex_init(&mdev->data.mutex);
2713         mutex_init(&mdev->meta.mutex);
2714         sema_init(&mdev->data.work.s, 0);
2715         sema_init(&mdev->meta.work.s, 0);
2716         mutex_init(&mdev->state_mutex);
2717
2718         spin_lock_init(&mdev->data.work.q_lock);
2719         spin_lock_init(&mdev->meta.work.q_lock);
2720
2721         spin_lock_init(&mdev->al_lock);
2722         spin_lock_init(&mdev->req_lock);
2723         spin_lock_init(&mdev->peer_seq_lock);
2724         spin_lock_init(&mdev->epoch_lock);
2725
2726         INIT_LIST_HEAD(&mdev->active_ee);
2727         INIT_LIST_HEAD(&mdev->sync_ee);
2728         INIT_LIST_HEAD(&mdev->done_ee);
2729         INIT_LIST_HEAD(&mdev->read_ee);
2730         INIT_LIST_HEAD(&mdev->net_ee);
2731         INIT_LIST_HEAD(&mdev->resync_reads);
2732         INIT_LIST_HEAD(&mdev->data.work.q);
2733         INIT_LIST_HEAD(&mdev->meta.work.q);
2734         INIT_LIST_HEAD(&mdev->resync_work.list);
2735         INIT_LIST_HEAD(&mdev->unplug_work.list);
2736         INIT_LIST_HEAD(&mdev->md_sync_work.list);
2737         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2738         INIT_LIST_HEAD(&mdev->delay_probes);
2739         INIT_LIST_HEAD(&mdev->delay_probe_work.list);
2740         INIT_LIST_HEAD(&mdev->uuid_work.list);
2741
2742         mdev->resync_work.cb  = w_resync_inactive;
2743         mdev->unplug_work.cb  = w_send_write_hint;
2744         mdev->md_sync_work.cb = w_md_sync;
2745         mdev->bm_io_work.w.cb = w_bitmap_io;
2746         mdev->delay_probe_work.cb = w_delay_probes;
2747         mdev->uuid_work.cb = w_new_current_uuid;
2748         init_timer(&mdev->resync_timer);
2749         init_timer(&mdev->md_sync_timer);
2750         init_timer(&mdev->delay_probe_timer);
2751         mdev->resync_timer.function = resync_timer_fn;
2752         mdev->resync_timer.data = (unsigned long) mdev;
2753         mdev->md_sync_timer.function = md_sync_timer_fn;
2754         mdev->md_sync_timer.data = (unsigned long) mdev;
2755         mdev->delay_probe_timer.function = delay_probe_timer_fn;
2756         mdev->delay_probe_timer.data = (unsigned long) mdev;
2757
2758
2759         init_waitqueue_head(&mdev->misc_wait);
2760         init_waitqueue_head(&mdev->state_wait);
2761         init_waitqueue_head(&mdev->ee_wait);
2762         init_waitqueue_head(&mdev->al_wait);
2763         init_waitqueue_head(&mdev->seq_wait);
2764
2765         drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
2766         drbd_thread_init(mdev, &mdev->worker, drbd_worker);
2767         drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2768
2769         mdev->agreed_pro_version = PRO_VERSION_MAX;
2770         mdev->write_ordering = WO_bio_barrier;
2771         mdev->resync_wenr = LC_FREE;
2772 }
2773
2774 void drbd_mdev_cleanup(struct drbd_conf *mdev)
2775 {
2776         if (mdev->receiver.t_state != None)
2777                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2778                                 mdev->receiver.t_state);
2779
2780         /* no need to lock it, I'm the only thread alive */
2781         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
2782                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2783         mdev->al_writ_cnt  =
2784         mdev->bm_writ_cnt  =
2785         mdev->read_cnt     =
2786         mdev->recv_cnt     =
2787         mdev->send_cnt     =
2788         mdev->writ_cnt     =
2789         mdev->p_size       =
2790         mdev->rs_start     =
2791         mdev->rs_total     =
2792         mdev->rs_failed    =
2793         mdev->rs_mark_left =
2794         mdev->rs_mark_time = 0;
2795         D_ASSERT(mdev->net_conf == NULL);
2796
2797         drbd_set_my_capacity(mdev, 0);
2798         if (mdev->bitmap) {
2799                 /* maybe never allocated. */
2800                 drbd_bm_resize(mdev, 0, 1);
2801                 drbd_bm_cleanup(mdev);
2802         }
2803
2804         drbd_free_resources(mdev);
2805
2806         /*
2807          * currently we drbd_init_ee only on module load, so
2808          * we may do drbd_release_ee only on module unload!
2809          */
2810         D_ASSERT(list_empty(&mdev->active_ee));
2811         D_ASSERT(list_empty(&mdev->sync_ee));
2812         D_ASSERT(list_empty(&mdev->done_ee));
2813         D_ASSERT(list_empty(&mdev->read_ee));
2814         D_ASSERT(list_empty(&mdev->net_ee));
2815         D_ASSERT(list_empty(&mdev->resync_reads));
2816         D_ASSERT(list_empty(&mdev->data.work.q));
2817         D_ASSERT(list_empty(&mdev->meta.work.q));
2818         D_ASSERT(list_empty(&mdev->resync_work.list));
2819         D_ASSERT(list_empty(&mdev->unplug_work.list));
2820
2821 }
2822
2823
2824 static void drbd_destroy_mempools(void)
2825 {
2826         struct page *page;
2827
2828         while (drbd_pp_pool) {
2829                 page = drbd_pp_pool;
2830                 drbd_pp_pool = (struct page *)page_private(page);
2831                 __free_page(page);
2832                 drbd_pp_vacant--;
2833         }
2834
2835         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2836
2837         if (drbd_ee_mempool)
2838                 mempool_destroy(drbd_ee_mempool);
2839         if (drbd_request_mempool)
2840                 mempool_destroy(drbd_request_mempool);
2841         if (drbd_ee_cache)
2842                 kmem_cache_destroy(drbd_ee_cache);
2843         if (drbd_request_cache)
2844                 kmem_cache_destroy(drbd_request_cache);
2845         if (drbd_bm_ext_cache)
2846                 kmem_cache_destroy(drbd_bm_ext_cache);
2847         if (drbd_al_ext_cache)
2848                 kmem_cache_destroy(drbd_al_ext_cache);
2849
2850         drbd_ee_mempool      = NULL;
2851         drbd_request_mempool = NULL;
2852         drbd_ee_cache        = NULL;
2853         drbd_request_cache   = NULL;
2854         drbd_bm_ext_cache    = NULL;
2855         drbd_al_ext_cache    = NULL;
2856
2857         return;
2858 }
2859
2860 static int drbd_create_mempools(void)
2861 {
2862         struct page *page;
2863         const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count;
2864         int i;
2865
2866         /* prepare our caches and mempools */
2867         drbd_request_mempool = NULL;
2868         drbd_ee_cache        = NULL;
2869         drbd_request_cache   = NULL;
2870         drbd_bm_ext_cache    = NULL;
2871         drbd_al_ext_cache    = NULL;
2872         drbd_pp_pool         = NULL;
2873
2874         /* caches */
2875         drbd_request_cache = kmem_cache_create(
2876                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2877         if (drbd_request_cache == NULL)
2878                 goto Enomem;
2879
2880         drbd_ee_cache = kmem_cache_create(
2881                 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
2882         if (drbd_ee_cache == NULL)
2883                 goto Enomem;
2884
2885         drbd_bm_ext_cache = kmem_cache_create(
2886                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2887         if (drbd_bm_ext_cache == NULL)
2888                 goto Enomem;
2889
2890         drbd_al_ext_cache = kmem_cache_create(
2891                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2892         if (drbd_al_ext_cache == NULL)
2893                 goto Enomem;
2894
2895         /* mempools */
2896         drbd_request_mempool = mempool_create(number,
2897                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2898         if (drbd_request_mempool == NULL)
2899                 goto Enomem;
2900
2901         drbd_ee_mempool = mempool_create(number,
2902                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2903         if (drbd_request_mempool == NULL)
2904                 goto Enomem;
2905
2906         /* drbd's page pool */
2907         spin_lock_init(&drbd_pp_lock);
2908
2909         for (i = 0; i < number; i++) {
2910                 page = alloc_page(GFP_HIGHUSER);
2911                 if (!page)
2912                         goto Enomem;
2913                 set_page_private(page, (unsigned long)drbd_pp_pool);
2914                 drbd_pp_pool = page;
2915         }
2916         drbd_pp_vacant = number;
2917
2918         return 0;
2919
2920 Enomem:
2921         drbd_destroy_mempools(); /* in case we allocated some */
2922         return -ENOMEM;
2923 }
2924
2925 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2926         void *unused)
2927 {
2928         /* just so we have it.  you never know what interesting things we
2929          * might want to do here some day...
2930          */
2931
2932         return NOTIFY_DONE;
2933 }
2934
2935 static struct notifier_block drbd_notifier = {
2936         .notifier_call = drbd_notify_sys,
2937 };
2938
2939 static void drbd_release_ee_lists(struct drbd_conf *mdev)
2940 {
2941         int rr;
2942
2943         rr = drbd_release_ee(mdev, &mdev->active_ee);
2944         if (rr)
2945                 dev_err(DEV, "%d EEs in active list found!\n", rr);
2946
2947         rr = drbd_release_ee(mdev, &mdev->sync_ee);
2948         if (rr)
2949                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2950
2951         rr = drbd_release_ee(mdev, &mdev->read_ee);
2952         if (rr)
2953                 dev_err(DEV, "%d EEs in read list found!\n", rr);
2954
2955         rr = drbd_release_ee(mdev, &mdev->done_ee);
2956         if (rr)
2957                 dev_err(DEV, "%d EEs in done list found!\n", rr);
2958
2959         rr = drbd_release_ee(mdev, &mdev->net_ee);
2960         if (rr)
2961                 dev_err(DEV, "%d EEs in net list found!\n", rr);
2962 }
2963
2964 /* caution. no locking.
2965  * currently only used from module cleanup code. */
2966 static void drbd_delete_device(unsigned int minor)
2967 {
2968         struct drbd_conf *mdev = minor_to_mdev(minor);
2969
2970         if (!mdev)
2971                 return;
2972
2973         /* paranoia asserts */
2974         if (mdev->open_cnt != 0)
2975                 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
2976                                 __FILE__ , __LINE__);
2977
2978         ERR_IF (!list_empty(&mdev->data.work.q)) {
2979                 struct list_head *lp;
2980                 list_for_each(lp, &mdev->data.work.q) {
2981                         dev_err(DEV, "lp = %p\n", lp);
2982                 }
2983         };
2984         /* end paranoia asserts */
2985
2986         del_gendisk(mdev->vdisk);
2987
2988         /* cleanup stuff that may have been allocated during
2989          * device (re-)configuration or state changes */
2990
2991         if (mdev->this_bdev)
2992                 bdput(mdev->this_bdev);
2993
2994         drbd_free_resources(mdev);
2995
2996         drbd_release_ee_lists(mdev);
2997
2998         /* should be free'd on disconnect? */
2999         kfree(mdev->ee_hash);
3000         /*
3001         mdev->ee_hash_s = 0;
3002         mdev->ee_hash = NULL;
3003         */
3004
3005         lc_destroy(mdev->act_log);
3006         lc_destroy(mdev->resync);
3007
3008         kfree(mdev->p_uuid);
3009         /* mdev->p_uuid = NULL; */
3010
3011         kfree(mdev->int_dig_out);
3012         kfree(mdev->int_dig_in);
3013         kfree(mdev->int_dig_vv);
3014
3015         /* cleanup the rest that has been
3016          * allocated from drbd_new_device
3017          * and actually free the mdev itself */
3018         drbd_free_mdev(mdev);
3019 }
3020
3021 static void drbd_cleanup(void)
3022 {
3023         unsigned int i;
3024
3025         unregister_reboot_notifier(&drbd_notifier);
3026
3027         drbd_nl_cleanup();
3028
3029         if (minor_table) {
3030                 if (drbd_proc)
3031                         remove_proc_entry("drbd", NULL);
3032                 i = minor_count;
3033                 while (i--)
3034                         drbd_delete_device(i);
3035                 drbd_destroy_mempools();
3036         }
3037
3038         kfree(minor_table);
3039
3040         unregister_blkdev(DRBD_MAJOR, "drbd");
3041
3042         printk(KERN_INFO "drbd: module cleanup done.\n");
3043 }
3044
3045 /**
3046  * drbd_congested() - Callback for pdflush
3047  * @congested_data:     User data
3048  * @bdi_bits:           Bits pdflush is currently interested in
3049  *
3050  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3051  */
3052 static int drbd_congested(void *congested_data, int bdi_bits)
3053 {
3054         struct drbd_conf *mdev = congested_data;
3055         struct request_queue *q;
3056         char reason = '-';
3057         int r = 0;
3058
3059         if (!__inc_ap_bio_cond(mdev)) {
3060                 /* DRBD has frozen IO */
3061                 r = bdi_bits;
3062                 reason = 'd';
3063                 goto out;
3064         }
3065
3066         if (get_ldev(mdev)) {
3067                 q = bdev_get_queue(mdev->ldev->backing_bdev);
3068                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3069                 put_ldev(mdev);
3070                 if (r)
3071                         reason = 'b';
3072         }
3073
3074         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3075                 r |= (1 << BDI_async_congested);
3076                 reason = reason == 'b' ? 'a' : 'n';
3077         }
3078
3079 out:
3080         mdev->congestion_reason = reason;
3081         return r;
3082 }
3083
3084 struct drbd_conf *drbd_new_device(unsigned int minor)
3085 {
3086         struct drbd_conf *mdev;
3087         struct gendisk *disk;
3088         struct request_queue *q;
3089
3090         /* GFP_KERNEL, we are outside of all write-out paths */
3091         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3092         if (!mdev)
3093                 return NULL;
3094         if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3095                 goto out_no_cpumask;
3096
3097         mdev->minor = minor;
3098
3099         drbd_init_set_defaults(mdev);
3100
3101         q = blk_alloc_queue(GFP_KERNEL);
3102         if (!q)
3103                 goto out_no_q;
3104         mdev->rq_queue = q;
3105         q->queuedata   = mdev;
3106
3107         disk = alloc_disk(1);
3108         if (!disk)
3109                 goto out_no_disk;
3110         mdev->vdisk = disk;
3111
3112         set_disk_ro(disk, TRUE);
3113
3114         disk->queue = q;
3115         disk->major = DRBD_MAJOR;
3116         disk->first_minor = minor;
3117         disk->fops = &drbd_ops;
3118         sprintf(disk->disk_name, "drbd%d", minor);
3119         disk->private_data = mdev;
3120
3121         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3122         /* we have no partitions. we contain only ourselves. */
3123         mdev->this_bdev->bd_contains = mdev->this_bdev;
3124
3125         q->backing_dev_info.congested_fn = drbd_congested;
3126         q->backing_dev_info.congested_data = mdev;
3127
3128         blk_queue_make_request(q, drbd_make_request_26);
3129         blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
3130         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3131         blk_queue_merge_bvec(q, drbd_merge_bvec);
3132         q->queue_lock = &mdev->req_lock; /* needed since we use */
3133                 /* plugging on a queue, that actually has no requests! */
3134         q->unplug_fn = drbd_unplug_fn;
3135
3136         mdev->md_io_page = alloc_page(GFP_KERNEL);
3137         if (!mdev->md_io_page)
3138                 goto out_no_io_page;
3139
3140         if (drbd_bm_init(mdev))
3141                 goto out_no_bitmap;
3142         /* no need to lock access, we are still initializing this minor device. */
3143         if (!tl_init(mdev))
3144                 goto out_no_tl;
3145
3146         mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3147         if (!mdev->app_reads_hash)
3148                 goto out_no_app_reads;
3149
3150         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3151         if (!mdev->current_epoch)
3152                 goto out_no_epoch;
3153
3154         INIT_LIST_HEAD(&mdev->current_epoch->list);
3155         mdev->epochs = 1;
3156
3157         return mdev;
3158
3159 /* out_whatever_else:
3160         kfree(mdev->current_epoch); */
3161 out_no_epoch:
3162         kfree(mdev->app_reads_hash);
3163 out_no_app_reads:
3164         tl_cleanup(mdev);
3165 out_no_tl:
3166         drbd_bm_cleanup(mdev);
3167 out_no_bitmap:
3168         __free_page(mdev->md_io_page);
3169 out_no_io_page:
3170         put_disk(disk);
3171 out_no_disk:
3172         blk_cleanup_queue(q);
3173 out_no_q:
3174         free_cpumask_var(mdev->cpu_mask);
3175 out_no_cpumask:
3176         kfree(mdev);
3177         return NULL;
3178 }
3179
3180 /* counterpart of drbd_new_device.
3181  * last part of drbd_delete_device. */
3182 void drbd_free_mdev(struct drbd_conf *mdev)
3183 {
3184         kfree(mdev->current_epoch);
3185         kfree(mdev->app_reads_hash);
3186         tl_cleanup(mdev);
3187         if (mdev->bitmap) /* should no longer be there. */
3188                 drbd_bm_cleanup(mdev);
3189         __free_page(mdev->md_io_page);
3190         put_disk(mdev->vdisk);
3191         blk_cleanup_queue(mdev->rq_queue);
3192         free_cpumask_var(mdev->cpu_mask);
3193         kfree(mdev);
3194 }
3195
3196
3197 int __init drbd_init(void)
3198 {
3199         int err;
3200
3201         if (sizeof(struct p_handshake) != 80) {
3202                 printk(KERN_ERR
3203                        "drbd: never change the size or layout "
3204                        "of the HandShake packet.\n");
3205                 return -EINVAL;
3206         }
3207
3208         if (1 > minor_count || minor_count > 255) {
3209                 printk(KERN_ERR
3210                         "drbd: invalid minor_count (%d)\n", minor_count);
3211 #ifdef MODULE
3212                 return -EINVAL;
3213 #else
3214                 minor_count = 8;
3215 #endif
3216         }
3217
3218         err = drbd_nl_init();
3219         if (err)
3220                 return err;
3221
3222         err = register_blkdev(DRBD_MAJOR, "drbd");
3223         if (err) {
3224                 printk(KERN_ERR
3225                        "drbd: unable to register block device major %d\n",
3226                        DRBD_MAJOR);
3227                 return err;
3228         }
3229
3230         register_reboot_notifier(&drbd_notifier);
3231
3232         /*
3233          * allocate all necessary structs
3234          */
3235         err = -ENOMEM;
3236
3237         init_waitqueue_head(&drbd_pp_wait);
3238
3239         drbd_proc = NULL; /* play safe for drbd_cleanup */
3240         minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3241                                 GFP_KERNEL);
3242         if (!minor_table)
3243                 goto Enomem;
3244
3245         err = drbd_create_mempools();
3246         if (err)
3247                 goto Enomem;
3248
3249         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3250         if (!drbd_proc) {
3251                 printk(KERN_ERR "drbd: unable to register proc file\n");
3252                 goto Enomem;
3253         }
3254
3255         rwlock_init(&global_state_lock);
3256
3257         printk(KERN_INFO "drbd: initialized. "
3258                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3259                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3260         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3261         printk(KERN_INFO "drbd: registered as block device major %d\n",
3262                 DRBD_MAJOR);
3263         printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3264
3265         return 0; /* Success! */
3266
3267 Enomem:
3268         drbd_cleanup();
3269         if (err == -ENOMEM)
3270                 /* currently always the case */
3271                 printk(KERN_ERR "drbd: ran out of memory\n");
3272         else
3273                 printk(KERN_ERR "drbd: initialization failure\n");
3274         return err;
3275 }
3276
3277 void drbd_free_bc(struct drbd_backing_dev *ldev)
3278 {
3279         if (ldev == NULL)
3280                 return;
3281
3282         bd_release(ldev->backing_bdev);
3283         bd_release(ldev->md_bdev);
3284
3285         fput(ldev->lo_file);
3286         fput(ldev->md_file);
3287
3288         kfree(ldev);
3289 }
3290
3291 void drbd_free_sock(struct drbd_conf *mdev)
3292 {
3293         if (mdev->data.socket) {
3294                 mutex_lock(&mdev->data.mutex);
3295                 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3296                 sock_release(mdev->data.socket);
3297                 mdev->data.socket = NULL;
3298                 mutex_unlock(&mdev->data.mutex);
3299         }
3300         if (mdev->meta.socket) {
3301                 mutex_lock(&mdev->meta.mutex);
3302                 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3303                 sock_release(mdev->meta.socket);
3304                 mdev->meta.socket = NULL;
3305                 mutex_unlock(&mdev->meta.mutex);
3306         }
3307 }
3308
3309
3310 void drbd_free_resources(struct drbd_conf *mdev)
3311 {
3312         crypto_free_hash(mdev->csums_tfm);
3313         mdev->csums_tfm = NULL;
3314         crypto_free_hash(mdev->verify_tfm);
3315         mdev->verify_tfm = NULL;
3316         crypto_free_hash(mdev->cram_hmac_tfm);
3317         mdev->cram_hmac_tfm = NULL;
3318         crypto_free_hash(mdev->integrity_w_tfm);
3319         mdev->integrity_w_tfm = NULL;
3320         crypto_free_hash(mdev->integrity_r_tfm);
3321         mdev->integrity_r_tfm = NULL;
3322
3323         drbd_free_sock(mdev);
3324
3325         __no_warn(local,
3326                   drbd_free_bc(mdev->ldev);
3327                   mdev->ldev = NULL;);
3328 }
3329
3330 /* meta data management */
3331
3332 struct meta_data_on_disk {
3333         u64 la_size;           /* last agreed size. */
3334         u64 uuid[UI_SIZE];   /* UUIDs. */
3335         u64 device_uuid;
3336         u64 reserved_u64_1;
3337         u32 flags;             /* MDF */
3338         u32 magic;
3339         u32 md_size_sect;
3340         u32 al_offset;         /* offset to this block */
3341         u32 al_nr_extents;     /* important for restoring the AL */
3342               /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3343         u32 bm_offset;         /* offset to the bitmap, from here */
3344         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3345         u32 reserved_u32[4];
3346
3347 } __packed;
3348
3349 /**
3350  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3351  * @mdev:       DRBD device.
3352  */
3353 void drbd_md_sync(struct drbd_conf *mdev)
3354 {
3355         struct meta_data_on_disk *buffer;
3356         sector_t sector;
3357         int i;
3358
3359         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3360                 return;
3361         del_timer(&mdev->md_sync_timer);
3362
3363         /* We use here D_FAILED and not D_ATTACHING because we try to write
3364          * metadata even if we detach due to a disk failure! */
3365         if (!get_ldev_if_state(mdev, D_FAILED))
3366                 return;
3367
3368         mutex_lock(&mdev->md_io_mutex);
3369         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3370         memset(buffer, 0, 512);
3371
3372         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3373         for (i = UI_CURRENT; i < UI_SIZE; i++)
3374                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3375         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3376         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3377
3378         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
3379         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
3380         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3381         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3382         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3383
3384         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3385
3386         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3387         sector = mdev->ldev->md.md_offset;
3388
3389         if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3390                 clear_bit(MD_DIRTY, &mdev->flags);
3391         } else {
3392                 /* this was a try anyways ... */
3393                 dev_err(DEV, "meta data update failed!\n");
3394
3395                 drbd_chk_io_error(mdev, 1, TRUE);
3396         }
3397
3398         /* Update mdev->ldev->md.la_size_sect,
3399          * since we updated it on metadata. */
3400         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3401
3402         mutex_unlock(&mdev->md_io_mutex);
3403         put_ldev(mdev);
3404 }
3405
3406 /**
3407  * drbd_md_read() - Reads in the meta data super block
3408  * @mdev:       DRBD device.
3409  * @bdev:       Device from which the meta data should be read in.
3410  *
3411  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
3412  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3413  */
3414 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3415 {
3416         struct meta_data_on_disk *buffer;
3417         int i, rv = NO_ERROR;
3418
3419         if (!get_ldev_if_state(mdev, D_ATTACHING))
3420                 return ERR_IO_MD_DISK;
3421
3422         mutex_lock(&mdev->md_io_mutex);
3423         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3424
3425         if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3426                 /* NOTE: cant do normal error processing here as this is
3427                    called BEFORE disk is attached */
3428                 dev_err(DEV, "Error while reading metadata.\n");
3429                 rv = ERR_IO_MD_DISK;
3430                 goto err;
3431         }
3432
3433         if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3434                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3435                 rv = ERR_MD_INVALID;
3436                 goto err;
3437         }
3438         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3439                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3440                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3441                 rv = ERR_MD_INVALID;
3442                 goto err;
3443         }
3444         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3445                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3446                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3447                 rv = ERR_MD_INVALID;
3448                 goto err;
3449         }
3450         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3451                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3452                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3453                 rv = ERR_MD_INVALID;
3454                 goto err;
3455         }
3456
3457         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3458                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3459                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3460                 rv = ERR_MD_INVALID;
3461                 goto err;
3462         }
3463
3464         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3465         for (i = UI_CURRENT; i < UI_SIZE; i++)
3466                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3467         bdev->md.flags = be32_to_cpu(buffer->flags);
3468         mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3469         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3470
3471         if (mdev->sync_conf.al_extents < 7)
3472                 mdev->sync_conf.al_extents = 127;
3473
3474  err:
3475         mutex_unlock(&mdev->md_io_mutex);
3476         put_ldev(mdev);
3477
3478         return rv;
3479 }
3480
3481 /**
3482  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3483  * @mdev:       DRBD device.
3484  *
3485  * Call this function if you change anything that should be written to
3486  * the meta-data super block. This function sets MD_DIRTY, and starts a
3487  * timer that ensures that within five seconds you have to call drbd_md_sync().
3488  */
3489 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3490 {
3491         set_bit(MD_DIRTY, &mdev->flags);
3492         mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3493 }
3494
3495
3496 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3497 {
3498         int i;
3499
3500         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3501                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3502 }
3503
3504 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3505 {
3506         if (idx == UI_CURRENT) {
3507                 if (mdev->state.role == R_PRIMARY)
3508                         val |= 1;
3509                 else
3510                         val &= ~((u64)1);
3511
3512                 drbd_set_ed_uuid(mdev, val);
3513         }
3514
3515         mdev->ldev->md.uuid[idx] = val;
3516         drbd_md_mark_dirty(mdev);
3517 }
3518
3519
3520 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3521 {
3522         if (mdev->ldev->md.uuid[idx]) {
3523                 drbd_uuid_move_history(mdev);
3524                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3525         }
3526         _drbd_uuid_set(mdev, idx, val);
3527 }
3528
3529 /**
3530  * drbd_uuid_new_current() - Creates a new current UUID
3531  * @mdev:       DRBD device.
3532  *
3533  * Creates a new current UUID, and rotates the old current UUID into
3534  * the bitmap slot. Causes an incremental resync upon next connect.
3535  */
3536 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3537 {
3538         u64 val;
3539
3540         dev_info(DEV, "Creating new current UUID\n");
3541         D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3542         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3543
3544         get_random_bytes(&val, sizeof(u64));
3545         _drbd_uuid_set(mdev, UI_CURRENT, val);
3546 }
3547
3548 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3549 {
3550         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3551                 return;
3552
3553         if (val == 0) {
3554                 drbd_uuid_move_history(mdev);
3555                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3556                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3557         } else {
3558                 if (mdev->ldev->md.uuid[UI_BITMAP])
3559                         dev_warn(DEV, "bm UUID already set");
3560
3561                 mdev->ldev->md.uuid[UI_BITMAP] = val;
3562                 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3563
3564         }
3565         drbd_md_mark_dirty(mdev);
3566 }
3567
3568 /**
3569  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3570  * @mdev:       DRBD device.
3571  *
3572  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3573  */
3574 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3575 {
3576         int rv = -EIO;
3577
3578         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3579                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3580                 drbd_md_sync(mdev);
3581                 drbd_bm_set_all(mdev);
3582
3583                 rv = drbd_bm_write(mdev);
3584
3585                 if (!rv) {
3586                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3587                         drbd_md_sync(mdev);
3588                 }
3589
3590                 put_ldev(mdev);
3591         }
3592
3593         return rv;
3594 }
3595
3596 /**
3597  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3598  * @mdev:       DRBD device.
3599  *
3600  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3601  */
3602 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3603 {
3604         int rv = -EIO;
3605
3606         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3607                 drbd_bm_clear_all(mdev);
3608                 rv = drbd_bm_write(mdev);
3609                 put_ldev(mdev);
3610         }
3611
3612         return rv;
3613 }
3614
3615 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3616 {
3617         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3618         int rv;
3619
3620         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3621
3622         drbd_bm_lock(mdev, work->why);
3623         rv = work->io_fn(mdev);
3624         drbd_bm_unlock(mdev);
3625
3626         clear_bit(BITMAP_IO, &mdev->flags);
3627         wake_up(&mdev->misc_wait);
3628
3629         if (work->done)
3630                 work->done(mdev, rv);
3631
3632         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3633         work->why = NULL;
3634
3635         return 1;
3636 }
3637
3638 /**
3639  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3640  * @mdev:       DRBD device.
3641  * @io_fn:      IO callback to be called when bitmap IO is possible
3642  * @done:       callback to be called after the bitmap IO was performed
3643  * @why:        Descriptive text of the reason for doing the IO
3644  *
3645  * While IO on the bitmap happens we freeze application IO thus we ensure
3646  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3647  * called from worker context. It MUST NOT be used while a previous such
3648  * work is still pending!
3649  */
3650 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3651                           int (*io_fn)(struct drbd_conf *),
3652                           void (*done)(struct drbd_conf *, int),
3653                           char *why)
3654 {
3655         D_ASSERT(current == mdev->worker.task);
3656
3657         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3658         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3659         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3660         if (mdev->bm_io_work.why)
3661                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3662                         why, mdev->bm_io_work.why);
3663
3664         mdev->bm_io_work.io_fn = io_fn;
3665         mdev->bm_io_work.done = done;
3666         mdev->bm_io_work.why = why;
3667
3668         set_bit(BITMAP_IO, &mdev->flags);
3669         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3670                 if (list_empty(&mdev->bm_io_work.w.list)) {
3671                         set_bit(BITMAP_IO_QUEUED, &mdev->flags);
3672                         drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
3673                 } else
3674                         dev_err(DEV, "FIXME avoided double queuing bm_io_work\n");
3675         }
3676 }
3677
3678 /**
3679  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3680  * @mdev:       DRBD device.
3681  * @io_fn:      IO callback to be called when bitmap IO is possible
3682  * @why:        Descriptive text of the reason for doing the IO
3683  *
3684  * freezes application IO while that the actual IO operations runs. This
3685  * functions MAY NOT be called from worker context.
3686  */
3687 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
3688 {
3689         int rv;
3690
3691         D_ASSERT(current != mdev->worker.task);
3692
3693         drbd_suspend_io(mdev);
3694
3695         drbd_bm_lock(mdev, why);
3696         rv = io_fn(mdev);
3697         drbd_bm_unlock(mdev);
3698
3699         drbd_resume_io(mdev);
3700
3701         return rv;
3702 }
3703
3704 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3705 {
3706         if ((mdev->ldev->md.flags & flag) != flag) {
3707                 drbd_md_mark_dirty(mdev);
3708                 mdev->ldev->md.flags |= flag;
3709         }
3710 }
3711
3712 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3713 {
3714         if ((mdev->ldev->md.flags & flag) != 0) {
3715                 drbd_md_mark_dirty(mdev);
3716                 mdev->ldev->md.flags &= ~flag;
3717         }
3718 }
3719 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3720 {
3721         return (bdev->md.flags & flag) != 0;
3722 }
3723
3724 static void md_sync_timer_fn(unsigned long data)
3725 {
3726         struct drbd_conf *mdev = (struct drbd_conf *) data;
3727
3728         drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
3729 }
3730
3731 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3732 {
3733         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3734         drbd_md_sync(mdev);
3735
3736         return 1;
3737 }
3738
3739 #ifdef CONFIG_DRBD_FAULT_INJECTION
3740 /* Fault insertion support including random number generator shamelessly
3741  * stolen from kernel/rcutorture.c */
3742 struct fault_random_state {
3743         unsigned long state;
3744         unsigned long count;
3745 };
3746
3747 #define FAULT_RANDOM_MULT 39916801  /* prime */
3748 #define FAULT_RANDOM_ADD        479001701 /* prime */
3749 #define FAULT_RANDOM_REFRESH 10000
3750
3751 /*
3752  * Crude but fast random-number generator.  Uses a linear congruential
3753  * generator, with occasional help from get_random_bytes().
3754  */
3755 static unsigned long
3756 _drbd_fault_random(struct fault_random_state *rsp)
3757 {
3758         long refresh;
3759
3760         if (!rsp->count--) {
3761                 get_random_bytes(&refresh, sizeof(refresh));
3762                 rsp->state += refresh;
3763                 rsp->count = FAULT_RANDOM_REFRESH;
3764         }
3765         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3766         return swahw32(rsp->state);
3767 }
3768
3769 static char *
3770 _drbd_fault_str(unsigned int type) {
3771         static char *_faults[] = {
3772                 [DRBD_FAULT_MD_WR] = "Meta-data write",
3773                 [DRBD_FAULT_MD_RD] = "Meta-data read",
3774                 [DRBD_FAULT_RS_WR] = "Resync write",
3775                 [DRBD_FAULT_RS_RD] = "Resync read",
3776                 [DRBD_FAULT_DT_WR] = "Data write",
3777                 [DRBD_FAULT_DT_RD] = "Data read",
3778                 [DRBD_FAULT_DT_RA] = "Data read ahead",
3779                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3780                 [DRBD_FAULT_AL_EE] = "EE allocation",
3781                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3782         };
3783
3784         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3785 }
3786
3787 unsigned int
3788 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3789 {
3790         static struct fault_random_state rrs = {0, 0};
3791
3792         unsigned int ret = (
3793                 (fault_devs == 0 ||
3794                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3795                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3796
3797         if (ret) {
3798                 fault_count++;
3799
3800                 if (printk_ratelimit())
3801                         dev_warn(DEV, "***Simulating %s failure\n",
3802                                 _drbd_fault_str(type));
3803         }
3804
3805         return ret;
3806 }
3807 #endif
3808
3809 const char *drbd_buildtag(void)
3810 {
3811         /* DRBD built from external sources has here a reference to the
3812            git hash of the source code. */
3813
3814         static char buildtag[38] = "\0uilt-in";
3815
3816         if (buildtag[0] == 0) {
3817 #ifdef CONFIG_MODULES
3818                 if (THIS_MODULE != NULL)
3819                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3820                 else
3821 #endif
3822                         buildtag[0] = 'b';
3823         }
3824
3825         return buildtag;
3826 }
3827
3828 module_init(drbd_init)
3829 module_exit(drbd_cleanup)
3830
3831 EXPORT_SYMBOL(drbd_conn_str);
3832 EXPORT_SYMBOL(drbd_role_str);
3833 EXPORT_SYMBOL(drbd_disk_str);
3834 EXPORT_SYMBOL(drbd_set_st_err_str);