]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_main.c
drbd: don't pointlessly queue bitmap send, if we lost connection
[karo-tx-linux.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60         struct drbd_work w;
61         union drbd_state os;
62         union drbd_state ns;
63         enum chg_state_flags flags;
64         struct completion *done;
65 };
66
67 static DEFINE_MUTEX(drbd_main_mutex);
68 int drbdd_init(struct drbd_thread *);
69 int drbd_worker(struct drbd_thread *);
70 int drbd_asender(struct drbd_thread *);
71
72 int drbd_init(void);
73 static int drbd_open(struct block_device *bdev, fmode_t mode);
74 static int drbd_release(struct gendisk *gd, fmode_t mode);
75 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77                            union drbd_state ns, enum chg_state_flags flags);
78 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79 static void md_sync_timer_fn(unsigned long data);
80 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84               "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86 MODULE_VERSION(REL_VERSION);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89                  __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
90 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92 #include <linux/moduleparam.h>
93 /* allow_open_on_secondary */
94 MODULE_PARM_DESC(allow_oos, "DONT USE!");
95 /* thanks to these macros, if compiled into the kernel (not-module),
96  * this becomes the boot parameter drbd.minor_count */
97 module_param(minor_count, uint, 0444);
98 module_param(disable_sendpage, bool, 0644);
99 module_param(allow_oos, bool, 0);
100 module_param(cn_idx, uint, 0444);
101 module_param(proc_details, int, 0644);
102
103 #ifdef CONFIG_DRBD_FAULT_INJECTION
104 int enable_faults;
105 int fault_rate;
106 static int fault_count;
107 int fault_devs;
108 /* bitmap of enabled faults */
109 module_param(enable_faults, int, 0664);
110 /* fault rate % value - applies to all enabled faults */
111 module_param(fault_rate, int, 0664);
112 /* count of faults inserted */
113 module_param(fault_count, int, 0664);
114 /* bitmap of devices to insert faults on */
115 module_param(fault_devs, int, 0644);
116 #endif
117
118 /* module parameter, defined */
119 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
120 int disable_sendpage;
121 int allow_oos;
122 unsigned int cn_idx = CN_IDX_DRBD;
123 int proc_details;       /* Detail level in proc drbd*/
124
125 /* Module parameter for setting the user mode helper program
126  * to run. Default is /sbin/drbdadm */
127 char usermode_helper[80] = "/sbin/drbdadm";
128
129 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
132  * as member "struct gendisk *vdisk;"
133  */
134 struct drbd_conf **minor_table;
135
136 struct kmem_cache *drbd_request_cache;
137 struct kmem_cache *drbd_ee_cache;       /* epoch entries */
138 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
139 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
140 mempool_t *drbd_request_mempool;
141 mempool_t *drbd_ee_mempool;
142
143 /* I do not use a standard mempool, because:
144    1) I want to hand out the pre-allocated objects first.
145    2) I want to be able to interrupt sleeping allocation with a signal.
146    Note: This is a single linked list, the next pointer is the private
147          member of struct page.
148  */
149 struct page *drbd_pp_pool;
150 spinlock_t   drbd_pp_lock;
151 int          drbd_pp_vacant;
152 wait_queue_head_t drbd_pp_wait;
153
154 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
156 static const struct block_device_operations drbd_ops = {
157         .owner =   THIS_MODULE,
158         .open =    drbd_open,
159         .release = drbd_release,
160 };
161
162 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164 #ifdef __CHECKER__
165 /* When checking with sparse, and this is an inline function, sparse will
166    give tons of false positives. When this is a real functions sparse works.
167  */
168 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169 {
170         int io_allowed;
171
172         atomic_inc(&mdev->local_cnt);
173         io_allowed = (mdev->state.disk >= mins);
174         if (!io_allowed) {
175                 if (atomic_dec_and_test(&mdev->local_cnt))
176                         wake_up(&mdev->misc_wait);
177         }
178         return io_allowed;
179 }
180
181 #endif
182
183 /**
184  * DOC: The transfer log
185  *
186  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187  * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188  * of the list. There is always at least one &struct drbd_tl_epoch object.
189  *
190  * Each &struct drbd_tl_epoch has a circular double linked list of requests
191  * attached.
192  */
193 static int tl_init(struct drbd_conf *mdev)
194 {
195         struct drbd_tl_epoch *b;
196
197         /* during device minor initialization, we may well use GFP_KERNEL */
198         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199         if (!b)
200                 return 0;
201         INIT_LIST_HEAD(&b->requests);
202         INIT_LIST_HEAD(&b->w.list);
203         b->next = NULL;
204         b->br_number = 4711;
205         b->n_writes = 0;
206         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208         mdev->oldest_tle = b;
209         mdev->newest_tle = b;
210         INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
212         mdev->tl_hash = NULL;
213         mdev->tl_hash_s = 0;
214
215         return 1;
216 }
217
218 static void tl_cleanup(struct drbd_conf *mdev)
219 {
220         D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
221         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
222         kfree(mdev->oldest_tle);
223         mdev->oldest_tle = NULL;
224         kfree(mdev->unused_spare_tle);
225         mdev->unused_spare_tle = NULL;
226         kfree(mdev->tl_hash);
227         mdev->tl_hash = NULL;
228         mdev->tl_hash_s = 0;
229 }
230
231 /**
232  * _tl_add_barrier() - Adds a barrier to the transfer log
233  * @mdev:       DRBD device.
234  * @new:        Barrier to be added before the current head of the TL.
235  *
236  * The caller must hold the req_lock.
237  */
238 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
239 {
240         struct drbd_tl_epoch *newest_before;
241
242         INIT_LIST_HEAD(&new->requests);
243         INIT_LIST_HEAD(&new->w.list);
244         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245         new->next = NULL;
246         new->n_writes = 0;
247
248         newest_before = mdev->newest_tle;
249         /* never send a barrier number == 0, because that is special-cased
250          * when using TCQ for our write ordering code */
251         new->br_number = (newest_before->br_number+1) ?: 1;
252         if (mdev->newest_tle != new) {
253                 mdev->newest_tle->next = new;
254                 mdev->newest_tle = new;
255         }
256 }
257
258 /**
259  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
260  * @mdev:       DRBD device.
261  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
262  * @set_size:   Expected number of requests before that barrier.
263  *
264  * In case the passed barrier_nr or set_size does not match the oldest
265  * &struct drbd_tl_epoch objects this function will cause a termination
266  * of the connection.
267  */
268 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
269                        unsigned int set_size)
270 {
271         struct drbd_tl_epoch *b, *nob; /* next old barrier */
272         struct list_head *le, *tle;
273         struct drbd_request *r;
274
275         spin_lock_irq(&mdev->req_lock);
276
277         b = mdev->oldest_tle;
278
279         /* first some paranoia code */
280         if (b == NULL) {
281                 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282                         barrier_nr);
283                 goto bail;
284         }
285         if (b->br_number != barrier_nr) {
286                 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
287                         barrier_nr, b->br_number);
288                 goto bail;
289         }
290         if (b->n_writes != set_size) {
291                 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
292                         barrier_nr, set_size, b->n_writes);
293                 goto bail;
294         }
295
296         /* Clean up list of requests processed during current epoch */
297         list_for_each_safe(le, tle, &b->requests) {
298                 r = list_entry(le, struct drbd_request, tl_requests);
299                 _req_mod(r, barrier_acked);
300         }
301         /* There could be requests on the list waiting for completion
302            of the write to the local disk. To avoid corruptions of
303            slab's data structures we have to remove the lists head.
304
305            Also there could have been a barrier ack out of sequence, overtaking
306            the write acks - which would be a bug and violating write ordering.
307            To not deadlock in case we lose connection while such requests are
308            still pending, we need some way to find them for the
309            _req_mode(connection_lost_while_pending).
310
311            These have been list_move'd to the out_of_sequence_requests list in
312            _req_mod(, barrier_acked) above.
313            */
314         list_del_init(&b->requests);
315
316         nob = b->next;
317         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
318                 _tl_add_barrier(mdev, b);
319                 if (nob)
320                         mdev->oldest_tle = nob;
321                 /* if nob == NULL b was the only barrier, and becomes the new
322                    barrier. Therefore mdev->oldest_tle points already to b */
323         } else {
324                 D_ASSERT(nob != NULL);
325                 mdev->oldest_tle = nob;
326                 kfree(b);
327         }
328
329         spin_unlock_irq(&mdev->req_lock);
330         dec_ap_pending(mdev);
331
332         return;
333
334 bail:
335         spin_unlock_irq(&mdev->req_lock);
336         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337 }
338
339
340 /* In C_AHEAD mode only out_of_sync packets are sent for requests. Detach
341  * those requests from the newsest barrier when changing to an other cstate.
342  *
343  * That headless list vanishes when the last request finished its write or
344  * send out_of_sync packet.  */
345 static void tl_forget(struct drbd_conf *mdev)
346 {
347         struct drbd_tl_epoch *b;
348
349         if (test_bit(CREATE_BARRIER, &mdev->flags))
350                 return;
351
352         b = mdev->newest_tle;
353         list_del(&b->requests);
354         _tl_add_barrier(mdev, b);
355 }
356
357 /**
358  * _tl_restart() - Walks the transfer log, and applies an action to all requests
359  * @mdev:       DRBD device.
360  * @what:       The action/event to perform with all request objects
361  *
362  * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
363  * restart_frozen_disk_io.
364  */
365 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
366 {
367         struct drbd_tl_epoch *b, *tmp, **pn;
368         struct list_head *le, *tle, carry_reads;
369         struct drbd_request *req;
370         int rv, n_writes, n_reads;
371
372         b = mdev->oldest_tle;
373         pn = &mdev->oldest_tle;
374         while (b) {
375                 n_writes = 0;
376                 n_reads = 0;
377                 INIT_LIST_HEAD(&carry_reads);
378                 list_for_each_safe(le, tle, &b->requests) {
379                         req = list_entry(le, struct drbd_request, tl_requests);
380                         rv = _req_mod(req, what);
381
382                         n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
383                         n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
384                 }
385                 tmp = b->next;
386
387                 if (n_writes) {
388                         if (what == resend) {
389                                 b->n_writes = n_writes;
390                                 if (b->w.cb == NULL) {
391                                         b->w.cb = w_send_barrier;
392                                         inc_ap_pending(mdev);
393                                         set_bit(CREATE_BARRIER, &mdev->flags);
394                                 }
395
396                                 drbd_queue_work(&mdev->data.work, &b->w);
397                         }
398                         pn = &b->next;
399                 } else {
400                         if (n_reads)
401                                 list_add(&carry_reads, &b->requests);
402                         /* there could still be requests on that ring list,
403                          * in case local io is still pending */
404                         list_del(&b->requests);
405
406                         /* dec_ap_pending corresponding to queue_barrier.
407                          * the newest barrier may not have been queued yet,
408                          * in which case w.cb is still NULL. */
409                         if (b->w.cb != NULL)
410                                 dec_ap_pending(mdev);
411
412                         if (b == mdev->newest_tle) {
413                                 /* recycle, but reinit! */
414                                 D_ASSERT(tmp == NULL);
415                                 INIT_LIST_HEAD(&b->requests);
416                                 list_splice(&carry_reads, &b->requests);
417                                 INIT_LIST_HEAD(&b->w.list);
418                                 b->w.cb = NULL;
419                                 b->br_number = net_random();
420                                 b->n_writes = 0;
421
422                                 *pn = b;
423                                 break;
424                         }
425                         *pn = tmp;
426                         kfree(b);
427                 }
428                 b = tmp;
429                 list_splice(&carry_reads, &b->requests);
430         }
431 }
432
433
434 /**
435  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
436  * @mdev:       DRBD device.
437  *
438  * This is called after the connection to the peer was lost. The storage covered
439  * by the requests on the transfer gets marked as our of sync. Called from the
440  * receiver thread and the worker thread.
441  */
442 void tl_clear(struct drbd_conf *mdev)
443 {
444         struct list_head *le, *tle;
445         struct drbd_request *r;
446
447         spin_lock_irq(&mdev->req_lock);
448
449         _tl_restart(mdev, connection_lost_while_pending);
450
451         /* we expect this list to be empty. */
452         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
453
454         /* but just in case, clean it up anyways! */
455         list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
456                 r = list_entry(le, struct drbd_request, tl_requests);
457                 /* It would be nice to complete outside of spinlock.
458                  * But this is easier for now. */
459                 _req_mod(r, connection_lost_while_pending);
460         }
461
462         /* ensure bit indicating barrier is required is clear */
463         clear_bit(CREATE_BARRIER, &mdev->flags);
464
465         memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
466
467         spin_unlock_irq(&mdev->req_lock);
468 }
469
470 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
471 {
472         spin_lock_irq(&mdev->req_lock);
473         _tl_restart(mdev, what);
474         spin_unlock_irq(&mdev->req_lock);
475 }
476
477 /**
478  * cl_wide_st_chg() - true if the state change is a cluster wide one
479  * @mdev:       DRBD device.
480  * @os:         old (current) state.
481  * @ns:         new (wanted) state.
482  */
483 static int cl_wide_st_chg(struct drbd_conf *mdev,
484                           union drbd_state os, union drbd_state ns)
485 {
486         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
487                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
488                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
489                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
490                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
491                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
492                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
493 }
494
495 enum drbd_state_rv
496 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
497                   union drbd_state mask, union drbd_state val)
498 {
499         unsigned long flags;
500         union drbd_state os, ns;
501         enum drbd_state_rv rv;
502
503         spin_lock_irqsave(&mdev->req_lock, flags);
504         os = mdev->state;
505         ns.i = (os.i & ~mask.i) | val.i;
506         rv = _drbd_set_state(mdev, ns, f, NULL);
507         ns = mdev->state;
508         spin_unlock_irqrestore(&mdev->req_lock, flags);
509
510         return rv;
511 }
512
513 /**
514  * drbd_force_state() - Impose a change which happens outside our control on our state
515  * @mdev:       DRBD device.
516  * @mask:       mask of state bits to change.
517  * @val:        value of new state bits.
518  */
519 void drbd_force_state(struct drbd_conf *mdev,
520         union drbd_state mask, union drbd_state val)
521 {
522         drbd_change_state(mdev, CS_HARD, mask, val);
523 }
524
525 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
526 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
527                                                     union drbd_state,
528                                                     union drbd_state);
529 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
530                                        union drbd_state ns, const char **warn_sync_abort);
531 int drbd_send_state_req(struct drbd_conf *,
532                         union drbd_state, union drbd_state);
533
534 static enum drbd_state_rv
535 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
536              union drbd_state val)
537 {
538         union drbd_state os, ns;
539         unsigned long flags;
540         enum drbd_state_rv rv;
541
542         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
543                 return SS_CW_SUCCESS;
544
545         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
546                 return SS_CW_FAILED_BY_PEER;
547
548         rv = 0;
549         spin_lock_irqsave(&mdev->req_lock, flags);
550         os = mdev->state;
551         ns.i = (os.i & ~mask.i) | val.i;
552         ns = sanitize_state(mdev, os, ns, NULL);
553
554         if (!cl_wide_st_chg(mdev, os, ns))
555                 rv = SS_CW_NO_NEED;
556         if (!rv) {
557                 rv = is_valid_state(mdev, ns);
558                 if (rv == SS_SUCCESS) {
559                         rv = is_valid_state_transition(mdev, ns, os);
560                         if (rv == SS_SUCCESS)
561                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
562                 }
563         }
564         spin_unlock_irqrestore(&mdev->req_lock, flags);
565
566         return rv;
567 }
568
569 /**
570  * drbd_req_state() - Perform an eventually cluster wide state change
571  * @mdev:       DRBD device.
572  * @mask:       mask of state bits to change.
573  * @val:        value of new state bits.
574  * @f:          flags
575  *
576  * Should not be called directly, use drbd_request_state() or
577  * _drbd_request_state().
578  */
579 static enum drbd_state_rv
580 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
581                union drbd_state val, enum chg_state_flags f)
582 {
583         struct completion done;
584         unsigned long flags;
585         union drbd_state os, ns;
586         enum drbd_state_rv rv;
587
588         init_completion(&done);
589
590         if (f & CS_SERIALIZE)
591                 mutex_lock(&mdev->state_mutex);
592
593         spin_lock_irqsave(&mdev->req_lock, flags);
594         os = mdev->state;
595         ns.i = (os.i & ~mask.i) | val.i;
596         ns = sanitize_state(mdev, os, ns, NULL);
597
598         if (cl_wide_st_chg(mdev, os, ns)) {
599                 rv = is_valid_state(mdev, ns);
600                 if (rv == SS_SUCCESS)
601                         rv = is_valid_state_transition(mdev, ns, os);
602                 spin_unlock_irqrestore(&mdev->req_lock, flags);
603
604                 if (rv < SS_SUCCESS) {
605                         if (f & CS_VERBOSE)
606                                 print_st_err(mdev, os, ns, rv);
607                         goto abort;
608                 }
609
610                 drbd_state_lock(mdev);
611                 if (!drbd_send_state_req(mdev, mask, val)) {
612                         drbd_state_unlock(mdev);
613                         rv = SS_CW_FAILED_BY_PEER;
614                         if (f & CS_VERBOSE)
615                                 print_st_err(mdev, os, ns, rv);
616                         goto abort;
617                 }
618
619                 wait_event(mdev->state_wait,
620                         (rv = _req_st_cond(mdev, mask, val)));
621
622                 if (rv < SS_SUCCESS) {
623                         drbd_state_unlock(mdev);
624                         if (f & CS_VERBOSE)
625                                 print_st_err(mdev, os, ns, rv);
626                         goto abort;
627                 }
628                 spin_lock_irqsave(&mdev->req_lock, flags);
629                 os = mdev->state;
630                 ns.i = (os.i & ~mask.i) | val.i;
631                 rv = _drbd_set_state(mdev, ns, f, &done);
632                 drbd_state_unlock(mdev);
633         } else {
634                 rv = _drbd_set_state(mdev, ns, f, &done);
635         }
636
637         spin_unlock_irqrestore(&mdev->req_lock, flags);
638
639         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
640                 D_ASSERT(current != mdev->worker.task);
641                 wait_for_completion(&done);
642         }
643
644 abort:
645         if (f & CS_SERIALIZE)
646                 mutex_unlock(&mdev->state_mutex);
647
648         return rv;
649 }
650
651 /**
652  * _drbd_request_state() - Request a state change (with flags)
653  * @mdev:       DRBD device.
654  * @mask:       mask of state bits to change.
655  * @val:        value of new state bits.
656  * @f:          flags
657  *
658  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
659  * flag, or when logging of failed state change requests is not desired.
660  */
661 enum drbd_state_rv
662 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
663                     union drbd_state val, enum chg_state_flags f)
664 {
665         enum drbd_state_rv rv;
666
667         wait_event(mdev->state_wait,
668                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
669
670         return rv;
671 }
672
673 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
674 {
675         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
676             name,
677             drbd_conn_str(ns.conn),
678             drbd_role_str(ns.role),
679             drbd_role_str(ns.peer),
680             drbd_disk_str(ns.disk),
681             drbd_disk_str(ns.pdsk),
682             is_susp(ns) ? 's' : 'r',
683             ns.aftr_isp ? 'a' : '-',
684             ns.peer_isp ? 'p' : '-',
685             ns.user_isp ? 'u' : '-'
686             );
687 }
688
689 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
690                   union drbd_state ns, enum drbd_state_rv err)
691 {
692         if (err == SS_IN_TRANSIENT_STATE)
693                 return;
694         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
695         print_st(mdev, " state", os);
696         print_st(mdev, "wanted", ns);
697 }
698
699
700 /**
701  * is_valid_state() - Returns an SS_ error code if ns is not valid
702  * @mdev:       DRBD device.
703  * @ns:         State to consider.
704  */
705 static enum drbd_state_rv
706 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
707 {
708         /* See drbd_state_sw_errors in drbd_strings.c */
709
710         enum drbd_fencing_p fp;
711         enum drbd_state_rv rv = SS_SUCCESS;
712
713         fp = FP_DONT_CARE;
714         if (get_ldev(mdev)) {
715                 fp = mdev->ldev->dc.fencing;
716                 put_ldev(mdev);
717         }
718
719         if (get_net_conf(mdev)) {
720                 if (!mdev->net_conf->two_primaries &&
721                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
722                         rv = SS_TWO_PRIMARIES;
723                 put_net_conf(mdev);
724         }
725
726         if (rv <= 0)
727                 /* already found a reason to abort */;
728         else if (ns.role == R_SECONDARY && mdev->open_cnt)
729                 rv = SS_DEVICE_IN_USE;
730
731         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
732                 rv = SS_NO_UP_TO_DATE_DISK;
733
734         else if (fp >= FP_RESOURCE &&
735                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
736                 rv = SS_PRIMARY_NOP;
737
738         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
739                 rv = SS_NO_UP_TO_DATE_DISK;
740
741         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
742                 rv = SS_NO_LOCAL_DISK;
743
744         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
745                 rv = SS_NO_REMOTE_DISK;
746
747         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
748                 rv = SS_NO_UP_TO_DATE_DISK;
749
750         else if ((ns.conn == C_CONNECTED ||
751                   ns.conn == C_WF_BITMAP_S ||
752                   ns.conn == C_SYNC_SOURCE ||
753                   ns.conn == C_PAUSED_SYNC_S) &&
754                   ns.disk == D_OUTDATED)
755                 rv = SS_CONNECTED_OUTDATES;
756
757         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
758                  (mdev->sync_conf.verify_alg[0] == 0))
759                 rv = SS_NO_VERIFY_ALG;
760
761         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
762                   mdev->agreed_pro_version < 88)
763                 rv = SS_NOT_SUPPORTED;
764
765         return rv;
766 }
767
768 /**
769  * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
770  * @mdev:       DRBD device.
771  * @ns:         new state.
772  * @os:         old state.
773  */
774 static enum drbd_state_rv
775 is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
776                           union drbd_state os)
777 {
778         enum drbd_state_rv rv = SS_SUCCESS;
779
780         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
781             os.conn > C_CONNECTED)
782                 rv = SS_RESYNC_RUNNING;
783
784         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
785                 rv = SS_ALREADY_STANDALONE;
786
787         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
788                 rv = SS_IS_DISKLESS;
789
790         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
791                 rv = SS_NO_NET_CONFIG;
792
793         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
794                 rv = SS_LOWER_THAN_OUTDATED;
795
796         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
797                 rv = SS_IN_TRANSIENT_STATE;
798
799         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
800                 rv = SS_IN_TRANSIENT_STATE;
801
802         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
803                 rv = SS_NEED_CONNECTION;
804
805         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
806             ns.conn != os.conn && os.conn > C_CONNECTED)
807                 rv = SS_RESYNC_RUNNING;
808
809         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
810             os.conn < C_CONNECTED)
811                 rv = SS_NEED_CONNECTION;
812
813         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
814             && os.conn < C_WF_REPORT_PARAMS)
815                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
816
817         return rv;
818 }
819
820 /**
821  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
822  * @mdev:       DRBD device.
823  * @os:         old state.
824  * @ns:         new state.
825  * @warn_sync_abort:
826  *
827  * When we loose connection, we have to set the state of the peers disk (pdsk)
828  * to D_UNKNOWN. This rule and many more along those lines are in this function.
829  */
830 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
831                                        union drbd_state ns, const char **warn_sync_abort)
832 {
833         enum drbd_fencing_p fp;
834         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
835
836         fp = FP_DONT_CARE;
837         if (get_ldev(mdev)) {
838                 fp = mdev->ldev->dc.fencing;
839                 put_ldev(mdev);
840         }
841
842         /* Disallow Network errors to configure a device's network part */
843         if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
844             os.conn <= C_DISCONNECTING)
845                 ns.conn = os.conn;
846
847         /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
848          * If you try to go into some Sync* state, that shall fail (elsewhere). */
849         if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
850             ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
851                 ns.conn = os.conn;
852
853         /* we cannot fail (again) if we already detached */
854         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
855                 ns.disk = D_DISKLESS;
856
857         /* if we are only D_ATTACHING yet,
858          * we can (and should) go directly to D_DISKLESS. */
859         if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
860                 ns.disk = D_DISKLESS;
861
862         /* After C_DISCONNECTING only C_STANDALONE may follow */
863         if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
864                 ns.conn = os.conn;
865
866         if (ns.conn < C_CONNECTED) {
867                 ns.peer_isp = 0;
868                 ns.peer = R_UNKNOWN;
869                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
870                         ns.pdsk = D_UNKNOWN;
871         }
872
873         /* Clear the aftr_isp when becoming unconfigured */
874         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
875                 ns.aftr_isp = 0;
876
877         /* Abort resync if a disk fails/detaches */
878         if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
879             (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
880                 if (warn_sync_abort)
881                         *warn_sync_abort =
882                                 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
883                                 "Online-verify" : "Resync";
884                 ns.conn = C_CONNECTED;
885         }
886
887         /* Connection breaks down before we finished "Negotiating" */
888         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
889             get_ldev_if_state(mdev, D_NEGOTIATING)) {
890                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
891                         ns.disk = mdev->new_state_tmp.disk;
892                         ns.pdsk = mdev->new_state_tmp.pdsk;
893                 } else {
894                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
895                         ns.disk = D_DISKLESS;
896                         ns.pdsk = D_UNKNOWN;
897                 }
898                 put_ldev(mdev);
899         }
900
901         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
902         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
903                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
904                         ns.disk = D_UP_TO_DATE;
905                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
906                         ns.pdsk = D_UP_TO_DATE;
907         }
908
909         /* Implications of the connection stat on the disk states */
910         disk_min = D_DISKLESS;
911         disk_max = D_UP_TO_DATE;
912         pdsk_min = D_INCONSISTENT;
913         pdsk_max = D_UNKNOWN;
914         switch ((enum drbd_conns)ns.conn) {
915         case C_WF_BITMAP_T:
916         case C_PAUSED_SYNC_T:
917         case C_STARTING_SYNC_T:
918         case C_WF_SYNC_UUID:
919         case C_BEHIND:
920                 disk_min = D_INCONSISTENT;
921                 disk_max = D_OUTDATED;
922                 pdsk_min = D_UP_TO_DATE;
923                 pdsk_max = D_UP_TO_DATE;
924                 break;
925         case C_VERIFY_S:
926         case C_VERIFY_T:
927                 disk_min = D_UP_TO_DATE;
928                 disk_max = D_UP_TO_DATE;
929                 pdsk_min = D_UP_TO_DATE;
930                 pdsk_max = D_UP_TO_DATE;
931                 break;
932         case C_CONNECTED:
933                 disk_min = D_DISKLESS;
934                 disk_max = D_UP_TO_DATE;
935                 pdsk_min = D_DISKLESS;
936                 pdsk_max = D_UP_TO_DATE;
937                 break;
938         case C_WF_BITMAP_S:
939         case C_PAUSED_SYNC_S:
940         case C_STARTING_SYNC_S:
941         case C_AHEAD:
942                 disk_min = D_UP_TO_DATE;
943                 disk_max = D_UP_TO_DATE;
944                 pdsk_min = D_INCONSISTENT;
945                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
946                 break;
947         case C_SYNC_TARGET:
948                 disk_min = D_INCONSISTENT;
949                 disk_max = D_INCONSISTENT;
950                 pdsk_min = D_UP_TO_DATE;
951                 pdsk_max = D_UP_TO_DATE;
952                 break;
953         case C_SYNC_SOURCE:
954                 disk_min = D_UP_TO_DATE;
955                 disk_max = D_UP_TO_DATE;
956                 pdsk_min = D_INCONSISTENT;
957                 pdsk_max = D_INCONSISTENT;
958                 break;
959         case C_STANDALONE:
960         case C_DISCONNECTING:
961         case C_UNCONNECTED:
962         case C_TIMEOUT:
963         case C_BROKEN_PIPE:
964         case C_NETWORK_FAILURE:
965         case C_PROTOCOL_ERROR:
966         case C_TEAR_DOWN:
967         case C_WF_CONNECTION:
968         case C_WF_REPORT_PARAMS:
969         case C_MASK:
970                 break;
971         }
972         if (ns.disk > disk_max)
973                 ns.disk = disk_max;
974
975         if (ns.disk < disk_min) {
976                 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
977                          drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
978                 ns.disk = disk_min;
979         }
980         if (ns.pdsk > pdsk_max)
981                 ns.pdsk = pdsk_max;
982
983         if (ns.pdsk < pdsk_min) {
984                 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
985                          drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
986                 ns.pdsk = pdsk_min;
987         }
988
989         if (fp == FP_STONITH &&
990             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
991             !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
992                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
993
994         if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
995             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
996             !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
997                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
998
999         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1000                 if (ns.conn == C_SYNC_SOURCE)
1001                         ns.conn = C_PAUSED_SYNC_S;
1002                 if (ns.conn == C_SYNC_TARGET)
1003                         ns.conn = C_PAUSED_SYNC_T;
1004         } else {
1005                 if (ns.conn == C_PAUSED_SYNC_S)
1006                         ns.conn = C_SYNC_SOURCE;
1007                 if (ns.conn == C_PAUSED_SYNC_T)
1008                         ns.conn = C_SYNC_TARGET;
1009         }
1010
1011         return ns;
1012 }
1013
1014 /* helper for __drbd_set_state */
1015 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1016 {
1017         if (mdev->agreed_pro_version < 90)
1018                 mdev->ov_start_sector = 0;
1019         mdev->rs_total = drbd_bm_bits(mdev);
1020         mdev->ov_position = 0;
1021         if (cs == C_VERIFY_T) {
1022                 /* starting online verify from an arbitrary position
1023                  * does not fit well into the existing protocol.
1024                  * on C_VERIFY_T, we initialize ov_left and friends
1025                  * implicitly in receive_DataRequest once the
1026                  * first P_OV_REQUEST is received */
1027                 mdev->ov_start_sector = ~(sector_t)0;
1028         } else {
1029                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
1030                 if (bit >= mdev->rs_total) {
1031                         mdev->ov_start_sector =
1032                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
1033                         mdev->rs_total = 1;
1034                 } else
1035                         mdev->rs_total -= bit;
1036                 mdev->ov_position = mdev->ov_start_sector;
1037         }
1038         mdev->ov_left = mdev->rs_total;
1039 }
1040
1041 static void drbd_resume_al(struct drbd_conf *mdev)
1042 {
1043         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1044                 dev_info(DEV, "Resumed AL updates\n");
1045 }
1046
1047 /**
1048  * __drbd_set_state() - Set a new DRBD state
1049  * @mdev:       DRBD device.
1050  * @ns:         new state.
1051  * @flags:      Flags
1052  * @done:       Optional completion, that will get completed after the after_state_ch() finished
1053  *
1054  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1055  */
1056 enum drbd_state_rv
1057 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1058                  enum chg_state_flags flags, struct completion *done)
1059 {
1060         union drbd_state os;
1061         enum drbd_state_rv rv = SS_SUCCESS;
1062         const char *warn_sync_abort = NULL;
1063         struct after_state_chg_work *ascw;
1064
1065         os = mdev->state;
1066
1067         ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1068
1069         if (ns.i == os.i)
1070                 return SS_NOTHING_TO_DO;
1071
1072         if (!(flags & CS_HARD)) {
1073                 /*  pre-state-change checks ; only look at ns  */
1074                 /* See drbd_state_sw_errors in drbd_strings.c */
1075
1076                 rv = is_valid_state(mdev, ns);
1077                 if (rv < SS_SUCCESS) {
1078                         /* If the old state was illegal as well, then let
1079                            this happen...*/
1080
1081                         if (is_valid_state(mdev, os) == rv)
1082                                 rv = is_valid_state_transition(mdev, ns, os);
1083                 } else
1084                         rv = is_valid_state_transition(mdev, ns, os);
1085         }
1086
1087         if (rv < SS_SUCCESS) {
1088                 if (flags & CS_VERBOSE)
1089                         print_st_err(mdev, os, ns, rv);
1090                 return rv;
1091         }
1092
1093         if (warn_sync_abort)
1094                 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
1095
1096         {
1097         char *pbp, pb[300];
1098         pbp = pb;
1099         *pbp = 0;
1100         if (ns.role != os.role)
1101                 pbp += sprintf(pbp, "role( %s -> %s ) ",
1102                                drbd_role_str(os.role),
1103                                drbd_role_str(ns.role));
1104         if (ns.peer != os.peer)
1105                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1106                                drbd_role_str(os.peer),
1107                                drbd_role_str(ns.peer));
1108         if (ns.conn != os.conn)
1109                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1110                                drbd_conn_str(os.conn),
1111                                drbd_conn_str(ns.conn));
1112         if (ns.disk != os.disk)
1113                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1114                                drbd_disk_str(os.disk),
1115                                drbd_disk_str(ns.disk));
1116         if (ns.pdsk != os.pdsk)
1117                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1118                                drbd_disk_str(os.pdsk),
1119                                drbd_disk_str(ns.pdsk));
1120         if (is_susp(ns) != is_susp(os))
1121                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1122                                is_susp(os),
1123                                is_susp(ns));
1124         if (ns.aftr_isp != os.aftr_isp)
1125                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1126                                os.aftr_isp,
1127                                ns.aftr_isp);
1128         if (ns.peer_isp != os.peer_isp)
1129                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1130                                os.peer_isp,
1131                                ns.peer_isp);
1132         if (ns.user_isp != os.user_isp)
1133                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1134                                os.user_isp,
1135                                ns.user_isp);
1136         dev_info(DEV, "%s\n", pb);
1137         }
1138
1139         /* solve the race between becoming unconfigured,
1140          * worker doing the cleanup, and
1141          * admin reconfiguring us:
1142          * on (re)configure, first set CONFIG_PENDING,
1143          * then wait for a potentially exiting worker,
1144          * start the worker, and schedule one no_op.
1145          * then proceed with configuration.
1146          */
1147         if (ns.disk == D_DISKLESS &&
1148             ns.conn == C_STANDALONE &&
1149             ns.role == R_SECONDARY &&
1150             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1151                 set_bit(DEVICE_DYING, &mdev->flags);
1152
1153         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1154          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1155          * drbd_ldev_destroy() won't happen before our corresponding
1156          * after_state_ch works run, where we put_ldev again. */
1157         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1158             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1159                 atomic_inc(&mdev->local_cnt);
1160
1161         mdev->state = ns;
1162         wake_up(&mdev->misc_wait);
1163         wake_up(&mdev->state_wait);
1164
1165         /* aborted verify run. log the last position */
1166         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1167             ns.conn < C_CONNECTED) {
1168                 mdev->ov_start_sector =
1169                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
1170                 dev_info(DEV, "Online Verify reached sector %llu\n",
1171                         (unsigned long long)mdev->ov_start_sector);
1172         }
1173
1174         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1175             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1176                 dev_info(DEV, "Syncer continues.\n");
1177                 mdev->rs_paused += (long)jiffies
1178                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1179                 if (ns.conn == C_SYNC_TARGET)
1180                         mod_timer(&mdev->resync_timer, jiffies);
1181         }
1182
1183         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1184             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1185                 dev_info(DEV, "Resync suspended\n");
1186                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1187         }
1188
1189         if (os.conn == C_CONNECTED &&
1190             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1191                 unsigned long now = jiffies;
1192                 int i;
1193
1194                 set_ov_position(mdev, ns.conn);
1195                 mdev->rs_start = now;
1196                 mdev->rs_last_events = 0;
1197                 mdev->rs_last_sect_ev = 0;
1198                 mdev->ov_last_oos_size = 0;
1199                 mdev->ov_last_oos_start = 0;
1200
1201                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1202                         mdev->rs_mark_left[i] = mdev->ov_left;
1203                         mdev->rs_mark_time[i] = now;
1204                 }
1205
1206                 drbd_rs_controller_reset(mdev);
1207
1208                 if (ns.conn == C_VERIFY_S) {
1209                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1210                                         (unsigned long long)mdev->ov_position);
1211                         mod_timer(&mdev->resync_timer, jiffies);
1212                 }
1213         }
1214
1215         if (get_ldev(mdev)) {
1216                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1217                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1218                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1219
1220                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1221                         mdf |= MDF_CRASHED_PRIMARY;
1222                 if (mdev->state.role == R_PRIMARY ||
1223                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1224                         mdf |= MDF_PRIMARY_IND;
1225                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1226                         mdf |= MDF_CONNECTED_IND;
1227                 if (mdev->state.disk > D_INCONSISTENT)
1228                         mdf |= MDF_CONSISTENT;
1229                 if (mdev->state.disk > D_OUTDATED)
1230                         mdf |= MDF_WAS_UP_TO_DATE;
1231                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1232                         mdf |= MDF_PEER_OUT_DATED;
1233                 if (mdf != mdev->ldev->md.flags) {
1234                         mdev->ldev->md.flags = mdf;
1235                         drbd_md_mark_dirty(mdev);
1236                 }
1237                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1238                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1239                 put_ldev(mdev);
1240         }
1241
1242         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1243         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1244             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1245                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1246
1247         /* Receiver should clean up itself */
1248         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1249                 drbd_thread_stop_nowait(&mdev->receiver);
1250
1251         /* Now the receiver finished cleaning up itself, it should die */
1252         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1253                 drbd_thread_stop_nowait(&mdev->receiver);
1254
1255         /* Upon network failure, we need to restart the receiver. */
1256         if (os.conn > C_TEAR_DOWN &&
1257             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1258                 drbd_thread_restart_nowait(&mdev->receiver);
1259
1260         /* Resume AL writing if we get a connection */
1261         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1262                 drbd_resume_al(mdev);
1263
1264         /* Start a new epoch in case we start to mirror write requests */
1265         if (!drbd_should_do_remote(os) && drbd_should_do_remote(ns))
1266                 tl_forget(mdev);
1267
1268         /* Do not add local-only requests to an epoch with mirrored requests */
1269         if (drbd_should_do_remote(os) && !drbd_should_do_remote(ns))
1270                 set_bit(CREATE_BARRIER, &mdev->flags);
1271
1272         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1273         if (ascw) {
1274                 ascw->os = os;
1275                 ascw->ns = ns;
1276                 ascw->flags = flags;
1277                 ascw->w.cb = w_after_state_ch;
1278                 ascw->done = done;
1279                 drbd_queue_work(&mdev->data.work, &ascw->w);
1280         } else {
1281                 dev_warn(DEV, "Could not kmalloc an ascw\n");
1282         }
1283
1284         return rv;
1285 }
1286
1287 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1288 {
1289         struct after_state_chg_work *ascw =
1290                 container_of(w, struct after_state_chg_work, w);
1291         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1292         if (ascw->flags & CS_WAIT_COMPLETE) {
1293                 D_ASSERT(ascw->done != NULL);
1294                 complete(ascw->done);
1295         }
1296         kfree(ascw);
1297
1298         return 1;
1299 }
1300
1301 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1302 {
1303         if (rv) {
1304                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1305                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1306                 return;
1307         }
1308
1309         switch (mdev->state.conn) {
1310         case C_STARTING_SYNC_T:
1311                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1312                 break;
1313         case C_STARTING_SYNC_S:
1314                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1315                 break;
1316         }
1317 }
1318
1319 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
1320 {
1321         int rv;
1322
1323         D_ASSERT(current == mdev->worker.task);
1324
1325         /* open coded non-blocking drbd_suspend_io(mdev); */
1326         set_bit(SUSPEND_IO, &mdev->flags);
1327         if (!is_susp(mdev->state))
1328                 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
1329
1330         drbd_bm_lock(mdev, why);
1331         rv = io_fn(mdev);
1332         drbd_bm_unlock(mdev);
1333
1334         drbd_resume_io(mdev);
1335
1336         return rv;
1337 }
1338
1339 /**
1340  * after_state_ch() - Perform after state change actions that may sleep
1341  * @mdev:       DRBD device.
1342  * @os:         old state.
1343  * @ns:         new state.
1344  * @flags:      Flags
1345  */
1346 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1347                            union drbd_state ns, enum chg_state_flags flags)
1348 {
1349         enum drbd_fencing_p fp;
1350         enum drbd_req_event what = nothing;
1351         union drbd_state nsm = (union drbd_state){ .i = -1 };
1352
1353         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1354                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1355                 if (mdev->p_uuid)
1356                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1357         }
1358
1359         fp = FP_DONT_CARE;
1360         if (get_ldev(mdev)) {
1361                 fp = mdev->ldev->dc.fencing;
1362                 put_ldev(mdev);
1363         }
1364
1365         /* Inform userspace about the change... */
1366         drbd_bcast_state(mdev, ns);
1367
1368         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1369             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1370                 drbd_khelper(mdev, "pri-on-incon-degr");
1371
1372         /* Here we have the actions that are performed after a
1373            state change. This function might sleep */
1374
1375         nsm.i = -1;
1376         if (ns.susp_nod) {
1377                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1378                         what = resend;
1379
1380                 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1381                         what = restart_frozen_disk_io;
1382
1383                 if (what != nothing)
1384                         nsm.susp_nod = 0;
1385         }
1386
1387         if (ns.susp_fen) {
1388                 /* case1: The outdate peer handler is successful: */
1389                 if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
1390                         tl_clear(mdev);
1391                         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1392                                 drbd_uuid_new_current(mdev);
1393                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1394                         }
1395                         spin_lock_irq(&mdev->req_lock);
1396                         _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1397                         spin_unlock_irq(&mdev->req_lock);
1398                 }
1399                 /* case2: The connection was established again: */
1400                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1401                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1402                         what = resend;
1403                         nsm.susp_fen = 0;
1404                 }
1405         }
1406
1407         if (what != nothing) {
1408                 spin_lock_irq(&mdev->req_lock);
1409                 _tl_restart(mdev, what);
1410                 nsm.i &= mdev->state.i;
1411                 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1412                 spin_unlock_irq(&mdev->req_lock);
1413         }
1414
1415         /* Became sync source.  With protocol >= 96, we still need to send out
1416          * the sync uuid now. Need to do that before any drbd_send_state, or
1417          * the other side may go "paused sync" before receiving the sync uuids,
1418          * which is unexpected. */
1419         if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1420             (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1421             mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1422                 drbd_gen_and_send_sync_uuid(mdev);
1423                 put_ldev(mdev);
1424         }
1425
1426         /* Do not change the order of the if above and the two below... */
1427         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1428                 drbd_send_uuids(mdev);
1429                 drbd_send_state(mdev);
1430         }
1431         /* No point in queuing send_bitmap if we don't have a connection
1432          * anymore, so check also the _current_ state, not only the new state
1433          * at the time this work was queued. */
1434         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1435             mdev->state.conn == C_WF_BITMAP_S)
1436                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1437                                 "send_bitmap (WFBitMapS)");
1438
1439         /* Lost contact to peer's copy of the data */
1440         if ((os.pdsk >= D_INCONSISTENT &&
1441              os.pdsk != D_UNKNOWN &&
1442              os.pdsk != D_OUTDATED)
1443         &&  (ns.pdsk < D_INCONSISTENT ||
1444              ns.pdsk == D_UNKNOWN ||
1445              ns.pdsk == D_OUTDATED)) {
1446                 if (get_ldev(mdev)) {
1447                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1448                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1449                                 if (is_susp(mdev->state)) {
1450                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1451                                 } else {
1452                                         drbd_uuid_new_current(mdev);
1453                                         drbd_send_uuids(mdev);
1454                                 }
1455                         }
1456                         put_ldev(mdev);
1457                 }
1458         }
1459
1460         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1461                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1462                         drbd_uuid_new_current(mdev);
1463                         drbd_send_uuids(mdev);
1464                 }
1465
1466                 /* D_DISKLESS Peer becomes secondary */
1467                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1468                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote diskless peer");
1469                 put_ldev(mdev);
1470         }
1471
1472         /* Write out all changed bits on demote.
1473          * Though, no need to da that just yet
1474          * if there is a resync going on still */
1475         if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1476                 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1477                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write, "demote");
1478                 put_ldev(mdev);
1479         }
1480
1481         /* Last part of the attaching process ... */
1482         if (ns.conn >= C_CONNECTED &&
1483             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1484                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1485                 drbd_send_uuids(mdev);
1486                 drbd_send_state(mdev);
1487         }
1488
1489         /* We want to pause/continue resync, tell peer. */
1490         if (ns.conn >= C_CONNECTED &&
1491              ((os.aftr_isp != ns.aftr_isp) ||
1492               (os.user_isp != ns.user_isp)))
1493                 drbd_send_state(mdev);
1494
1495         /* In case one of the isp bits got set, suspend other devices. */
1496         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1497             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1498                 suspend_other_sg(mdev);
1499
1500         /* Make sure the peer gets informed about eventual state
1501            changes (ISP bits) while we were in WFReportParams. */
1502         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1503                 drbd_send_state(mdev);
1504
1505         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1506                 drbd_send_state(mdev);
1507
1508         /* We are in the progress to start a full sync... */
1509         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1510             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1511                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1512
1513         /* We are invalidating our self... */
1514         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1515             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1516                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1517
1518         /* first half of local IO error, failure to attach,
1519          * or administrative detach */
1520         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1521                 enum drbd_io_error_p eh;
1522                 int was_io_error;
1523                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1524                  * our cleanup here with the transition to D_DISKLESS,
1525                  * so it is safe to dreference ldev here. */
1526                 eh = mdev->ldev->dc.on_io_error;
1527                 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1528
1529                 /* current state still has to be D_FAILED,
1530                  * there is only one way out: to D_DISKLESS,
1531                  * and that may only happen after our put_ldev below. */
1532                 if (mdev->state.disk != D_FAILED)
1533                         dev_err(DEV,
1534                                 "ASSERT FAILED: disk is %s during detach\n",
1535                                 drbd_disk_str(mdev->state.disk));
1536
1537                 if (drbd_send_state(mdev))
1538                         dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1539                 else
1540                         dev_err(DEV, "Sending state for detaching disk failed\n");
1541
1542                 drbd_rs_cancel_all(mdev);
1543
1544                 /* In case we want to get something to stable storage still,
1545                  * this may be the last chance.
1546                  * Following put_ldev may transition to D_DISKLESS. */
1547                 drbd_md_sync(mdev);
1548                 put_ldev(mdev);
1549
1550                 if (was_io_error && eh == EP_CALL_HELPER)
1551                         drbd_khelper(mdev, "local-io-error");
1552         }
1553
1554         /* second half of local IO error, failure to attach,
1555          * or administrative detach,
1556          * after local_cnt references have reached zero again */
1557         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1558                 /* We must still be diskless,
1559                  * re-attach has to be serialized with this! */
1560                 if (mdev->state.disk != D_DISKLESS)
1561                         dev_err(DEV,
1562                                 "ASSERT FAILED: disk is %s while going diskless\n",
1563                                 drbd_disk_str(mdev->state.disk));
1564
1565                 mdev->rs_total = 0;
1566                 mdev->rs_failed = 0;
1567                 atomic_set(&mdev->rs_pending_cnt, 0);
1568
1569                 if (drbd_send_state(mdev))
1570                         dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1571                 else
1572                         dev_err(DEV, "Sending state for being diskless failed\n");
1573                 /* corresponding get_ldev in __drbd_set_state
1574                  * this may finaly trigger drbd_ldev_destroy. */
1575                 put_ldev(mdev);
1576         }
1577
1578         /* Disks got bigger while they were detached */
1579         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1580             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1581                 if (ns.conn == C_CONNECTED)
1582                         resync_after_online_grow(mdev);
1583         }
1584
1585         /* A resync finished or aborted, wake paused devices... */
1586         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1587             (os.peer_isp && !ns.peer_isp) ||
1588             (os.user_isp && !ns.user_isp))
1589                 resume_next_sg(mdev);
1590
1591         /* sync target done with resync.  Explicitly notify peer, even though
1592          * it should (at least for non-empty resyncs) already know itself. */
1593         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1594                 drbd_send_state(mdev);
1595
1596         if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED)
1597                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
1598
1599         /* free tl_hash if we Got thawed and are C_STANDALONE */
1600         if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1601                 drbd_free_tl_hash(mdev);
1602
1603         /* Upon network connection, we need to start the receiver */
1604         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1605                 drbd_thread_start(&mdev->receiver);
1606
1607         /* Terminate worker thread if we are unconfigured - it will be
1608            restarted as needed... */
1609         if (ns.disk == D_DISKLESS &&
1610             ns.conn == C_STANDALONE &&
1611             ns.role == R_SECONDARY) {
1612                 if (os.aftr_isp != ns.aftr_isp)
1613                         resume_next_sg(mdev);
1614                 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1615                 if (test_bit(DEVICE_DYING, &mdev->flags))
1616                         drbd_thread_stop_nowait(&mdev->worker);
1617         }
1618
1619         drbd_md_sync(mdev);
1620 }
1621
1622
1623 static int drbd_thread_setup(void *arg)
1624 {
1625         struct drbd_thread *thi = (struct drbd_thread *) arg;
1626         struct drbd_conf *mdev = thi->mdev;
1627         unsigned long flags;
1628         int retval;
1629
1630 restart:
1631         retval = thi->function(thi);
1632
1633         spin_lock_irqsave(&thi->t_lock, flags);
1634
1635         /* if the receiver has been "Exiting", the last thing it did
1636          * was set the conn state to "StandAlone",
1637          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1638          * and receiver thread will be "started".
1639          * drbd_thread_start needs to set "Restarting" in that case.
1640          * t_state check and assignment needs to be within the same spinlock,
1641          * so either thread_start sees Exiting, and can remap to Restarting,
1642          * or thread_start see None, and can proceed as normal.
1643          */
1644
1645         if (thi->t_state == Restarting) {
1646                 dev_info(DEV, "Restarting %s\n", current->comm);
1647                 thi->t_state = Running;
1648                 spin_unlock_irqrestore(&thi->t_lock, flags);
1649                 goto restart;
1650         }
1651
1652         thi->task = NULL;
1653         thi->t_state = None;
1654         smp_mb();
1655         complete(&thi->stop);
1656         spin_unlock_irqrestore(&thi->t_lock, flags);
1657
1658         dev_info(DEV, "Terminating %s\n", current->comm);
1659
1660         /* Release mod reference taken when thread was started */
1661         module_put(THIS_MODULE);
1662         return retval;
1663 }
1664
1665 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1666                       int (*func) (struct drbd_thread *))
1667 {
1668         spin_lock_init(&thi->t_lock);
1669         thi->task    = NULL;
1670         thi->t_state = None;
1671         thi->function = func;
1672         thi->mdev = mdev;
1673 }
1674
1675 int drbd_thread_start(struct drbd_thread *thi)
1676 {
1677         struct drbd_conf *mdev = thi->mdev;
1678         struct task_struct *nt;
1679         unsigned long flags;
1680
1681         const char *me =
1682                 thi == &mdev->receiver ? "receiver" :
1683                 thi == &mdev->asender  ? "asender"  :
1684                 thi == &mdev->worker   ? "worker"   : "NONSENSE";
1685
1686         /* is used from state engine doing drbd_thread_stop_nowait,
1687          * while holding the req lock irqsave */
1688         spin_lock_irqsave(&thi->t_lock, flags);
1689
1690         switch (thi->t_state) {
1691         case None:
1692                 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1693                                 me, current->comm, current->pid);
1694
1695                 /* Get ref on module for thread - this is released when thread exits */
1696                 if (!try_module_get(THIS_MODULE)) {
1697                         dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1698                         spin_unlock_irqrestore(&thi->t_lock, flags);
1699                         return false;
1700                 }
1701
1702                 init_completion(&thi->stop);
1703                 D_ASSERT(thi->task == NULL);
1704                 thi->reset_cpu_mask = 1;
1705                 thi->t_state = Running;
1706                 spin_unlock_irqrestore(&thi->t_lock, flags);
1707                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1708
1709                 nt = kthread_create(drbd_thread_setup, (void *) thi,
1710                                     "drbd%d_%s", mdev_to_minor(mdev), me);
1711
1712                 if (IS_ERR(nt)) {
1713                         dev_err(DEV, "Couldn't start thread\n");
1714
1715                         module_put(THIS_MODULE);
1716                         return false;
1717                 }
1718                 spin_lock_irqsave(&thi->t_lock, flags);
1719                 thi->task = nt;
1720                 thi->t_state = Running;
1721                 spin_unlock_irqrestore(&thi->t_lock, flags);
1722                 wake_up_process(nt);
1723                 break;
1724         case Exiting:
1725                 thi->t_state = Restarting;
1726                 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1727                                 me, current->comm, current->pid);
1728                 /* fall through */
1729         case Running:
1730         case Restarting:
1731         default:
1732                 spin_unlock_irqrestore(&thi->t_lock, flags);
1733                 break;
1734         }
1735
1736         return true;
1737 }
1738
1739
1740 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1741 {
1742         unsigned long flags;
1743
1744         enum drbd_thread_state ns = restart ? Restarting : Exiting;
1745
1746         /* may be called from state engine, holding the req lock irqsave */
1747         spin_lock_irqsave(&thi->t_lock, flags);
1748
1749         if (thi->t_state == None) {
1750                 spin_unlock_irqrestore(&thi->t_lock, flags);
1751                 if (restart)
1752                         drbd_thread_start(thi);
1753                 return;
1754         }
1755
1756         if (thi->t_state != ns) {
1757                 if (thi->task == NULL) {
1758                         spin_unlock_irqrestore(&thi->t_lock, flags);
1759                         return;
1760                 }
1761
1762                 thi->t_state = ns;
1763                 smp_mb();
1764                 init_completion(&thi->stop);
1765                 if (thi->task != current)
1766                         force_sig(DRBD_SIGKILL, thi->task);
1767
1768         }
1769
1770         spin_unlock_irqrestore(&thi->t_lock, flags);
1771
1772         if (wait)
1773                 wait_for_completion(&thi->stop);
1774 }
1775
1776 #ifdef CONFIG_SMP
1777 /**
1778  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1779  * @mdev:       DRBD device.
1780  *
1781  * Forces all threads of a device onto the same CPU. This is beneficial for
1782  * DRBD's performance. May be overwritten by user's configuration.
1783  */
1784 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1785 {
1786         int ord, cpu;
1787
1788         /* user override. */
1789         if (cpumask_weight(mdev->cpu_mask))
1790                 return;
1791
1792         ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1793         for_each_online_cpu(cpu) {
1794                 if (ord-- == 0) {
1795                         cpumask_set_cpu(cpu, mdev->cpu_mask);
1796                         return;
1797                 }
1798         }
1799         /* should not be reached */
1800         cpumask_setall(mdev->cpu_mask);
1801 }
1802
1803 /**
1804  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1805  * @mdev:       DRBD device.
1806  *
1807  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1808  * prematurely.
1809  */
1810 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1811 {
1812         struct task_struct *p = current;
1813         struct drbd_thread *thi =
1814                 p == mdev->asender.task  ? &mdev->asender  :
1815                 p == mdev->receiver.task ? &mdev->receiver :
1816                 p == mdev->worker.task   ? &mdev->worker   :
1817                 NULL;
1818         ERR_IF(thi == NULL)
1819                 return;
1820         if (!thi->reset_cpu_mask)
1821                 return;
1822         thi->reset_cpu_mask = 0;
1823         set_cpus_allowed_ptr(p, mdev->cpu_mask);
1824 }
1825 #endif
1826
1827 /* the appropriate socket mutex must be held already */
1828 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1829                           enum drbd_packets cmd, struct p_header80 *h,
1830                           size_t size, unsigned msg_flags)
1831 {
1832         int sent, ok;
1833
1834         ERR_IF(!h) return false;
1835         ERR_IF(!size) return false;
1836
1837         h->magic   = BE_DRBD_MAGIC;
1838         h->command = cpu_to_be16(cmd);
1839         h->length  = cpu_to_be16(size-sizeof(struct p_header80));
1840
1841         sent = drbd_send(mdev, sock, h, size, msg_flags);
1842
1843         ok = (sent == size);
1844         if (!ok)
1845                 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1846                     cmdname(cmd), (int)size, sent);
1847         return ok;
1848 }
1849
1850 /* don't pass the socket. we may only look at it
1851  * when we hold the appropriate socket mutex.
1852  */
1853 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1854                   enum drbd_packets cmd, struct p_header80 *h, size_t size)
1855 {
1856         int ok = 0;
1857         struct socket *sock;
1858
1859         if (use_data_socket) {
1860                 mutex_lock(&mdev->data.mutex);
1861                 sock = mdev->data.socket;
1862         } else {
1863                 mutex_lock(&mdev->meta.mutex);
1864                 sock = mdev->meta.socket;
1865         }
1866
1867         /* drbd_disconnect() could have called drbd_free_sock()
1868          * while we were waiting in down()... */
1869         if (likely(sock != NULL))
1870                 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1871
1872         if (use_data_socket)
1873                 mutex_unlock(&mdev->data.mutex);
1874         else
1875                 mutex_unlock(&mdev->meta.mutex);
1876         return ok;
1877 }
1878
1879 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1880                    size_t size)
1881 {
1882         struct p_header80 h;
1883         int ok;
1884
1885         h.magic   = BE_DRBD_MAGIC;
1886         h.command = cpu_to_be16(cmd);
1887         h.length  = cpu_to_be16(size);
1888
1889         if (!drbd_get_data_sock(mdev))
1890                 return 0;
1891
1892         ok = (sizeof(h) ==
1893                 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1894         ok = ok && (size ==
1895                 drbd_send(mdev, mdev->data.socket, data, size, 0));
1896
1897         drbd_put_data_sock(mdev);
1898
1899         return ok;
1900 }
1901
1902 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1903 {
1904         struct p_rs_param_95 *p;
1905         struct socket *sock;
1906         int size, rv;
1907         const int apv = mdev->agreed_pro_version;
1908
1909         size = apv <= 87 ? sizeof(struct p_rs_param)
1910                 : apv == 88 ? sizeof(struct p_rs_param)
1911                         + strlen(mdev->sync_conf.verify_alg) + 1
1912                 : apv <= 94 ? sizeof(struct p_rs_param_89)
1913                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
1914
1915         /* used from admin command context and receiver/worker context.
1916          * to avoid kmalloc, grab the socket right here,
1917          * then use the pre-allocated sbuf there */
1918         mutex_lock(&mdev->data.mutex);
1919         sock = mdev->data.socket;
1920
1921         if (likely(sock != NULL)) {
1922                 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1923
1924                 p = &mdev->data.sbuf.rs_param_95;
1925
1926                 /* initialize verify_alg and csums_alg */
1927                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1928
1929                 p->rate = cpu_to_be32(sc->rate);
1930                 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1931                 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1932                 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1933                 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
1934
1935                 if (apv >= 88)
1936                         strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1937                 if (apv >= 89)
1938                         strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1939
1940                 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1941         } else
1942                 rv = 0; /* not ok */
1943
1944         mutex_unlock(&mdev->data.mutex);
1945
1946         return rv;
1947 }
1948
1949 int drbd_send_protocol(struct drbd_conf *mdev)
1950 {
1951         struct p_protocol *p;
1952         int size, cf, rv;
1953
1954         size = sizeof(struct p_protocol);
1955
1956         if (mdev->agreed_pro_version >= 87)
1957                 size += strlen(mdev->net_conf->integrity_alg) + 1;
1958
1959         /* we must not recurse into our own queue,
1960          * as that is blocked during handshake */
1961         p = kmalloc(size, GFP_NOIO);
1962         if (p == NULL)
1963                 return 0;
1964
1965         p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
1966         p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
1967         p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
1968         p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
1969         p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1970
1971         cf = 0;
1972         if (mdev->net_conf->want_lose)
1973                 cf |= CF_WANT_LOSE;
1974         if (mdev->net_conf->dry_run) {
1975                 if (mdev->agreed_pro_version >= 92)
1976                         cf |= CF_DRY_RUN;
1977                 else {
1978                         dev_err(DEV, "--dry-run is not supported by peer");
1979                         kfree(p);
1980                         return -1;
1981                 }
1982         }
1983         p->conn_flags    = cpu_to_be32(cf);
1984
1985         if (mdev->agreed_pro_version >= 87)
1986                 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1987
1988         rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1989                            (struct p_header80 *)p, size);
1990         kfree(p);
1991         return rv;
1992 }
1993
1994 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1995 {
1996         struct p_uuids p;
1997         int i;
1998
1999         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2000                 return 1;
2001
2002         for (i = UI_CURRENT; i < UI_SIZE; i++)
2003                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2004
2005         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2006         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2007         uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2008         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2009         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2010         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2011
2012         put_ldev(mdev);
2013
2014         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
2015                              (struct p_header80 *)&p, sizeof(p));
2016 }
2017
2018 int drbd_send_uuids(struct drbd_conf *mdev)
2019 {
2020         return _drbd_send_uuids(mdev, 0);
2021 }
2022
2023 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2024 {
2025         return _drbd_send_uuids(mdev, 8);
2026 }
2027
2028 int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
2029 {
2030         struct p_rs_uuid p;
2031         u64 uuid;
2032
2033         D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2034
2035         uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
2036         drbd_uuid_set(mdev, UI_BITMAP, uuid);
2037         drbd_md_sync(mdev);
2038         p.uuid = cpu_to_be64(uuid);
2039
2040         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
2041                              (struct p_header80 *)&p, sizeof(p));
2042 }
2043
2044 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
2045 {
2046         struct p_sizes p;
2047         sector_t d_size, u_size;
2048         int q_order_type;
2049         int ok;
2050
2051         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2052                 D_ASSERT(mdev->ldev->backing_bdev);
2053                 d_size = drbd_get_max_capacity(mdev->ldev);
2054                 u_size = mdev->ldev->dc.disk_size;
2055                 q_order_type = drbd_queue_order_type(mdev);
2056                 put_ldev(mdev);
2057         } else {
2058                 d_size = 0;
2059                 u_size = 0;
2060                 q_order_type = QUEUE_ORDERED_NONE;
2061         }
2062
2063         p.d_size = cpu_to_be64(d_size);
2064         p.u_size = cpu_to_be64(u_size);
2065         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2066         p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
2067         p.queue_order_type = cpu_to_be16(q_order_type);
2068         p.dds_flags = cpu_to_be16(flags);
2069
2070         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
2071                            (struct p_header80 *)&p, sizeof(p));
2072         return ok;
2073 }
2074
2075 /**
2076  * drbd_send_state() - Sends the drbd state to the peer
2077  * @mdev:       DRBD device.
2078  */
2079 int drbd_send_state(struct drbd_conf *mdev)
2080 {
2081         struct socket *sock;
2082         struct p_state p;
2083         int ok = 0;
2084
2085         /* Grab state lock so we wont send state if we're in the middle
2086          * of a cluster wide state change on another thread */
2087         drbd_state_lock(mdev);
2088
2089         mutex_lock(&mdev->data.mutex);
2090
2091         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2092         sock = mdev->data.socket;
2093
2094         if (likely(sock != NULL)) {
2095                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2096                                     (struct p_header80 *)&p, sizeof(p), 0);
2097         }
2098
2099         mutex_unlock(&mdev->data.mutex);
2100
2101         drbd_state_unlock(mdev);
2102         return ok;
2103 }
2104
2105 int drbd_send_state_req(struct drbd_conf *mdev,
2106         union drbd_state mask, union drbd_state val)
2107 {
2108         struct p_req_state p;
2109
2110         p.mask    = cpu_to_be32(mask.i);
2111         p.val     = cpu_to_be32(val.i);
2112
2113         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
2114                              (struct p_header80 *)&p, sizeof(p));
2115 }
2116
2117 int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
2118 {
2119         struct p_req_state_reply p;
2120
2121         p.retcode    = cpu_to_be32(retcode);
2122
2123         return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
2124                              (struct p_header80 *)&p, sizeof(p));
2125 }
2126
2127 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2128         struct p_compressed_bm *p,
2129         struct bm_xfer_ctx *c)
2130 {
2131         struct bitstream bs;
2132         unsigned long plain_bits;
2133         unsigned long tmp;
2134         unsigned long rl;
2135         unsigned len;
2136         unsigned toggle;
2137         int bits;
2138
2139         /* may we use this feature? */
2140         if ((mdev->sync_conf.use_rle == 0) ||
2141                 (mdev->agreed_pro_version < 90))
2142                         return 0;
2143
2144         if (c->bit_offset >= c->bm_bits)
2145                 return 0; /* nothing to do. */
2146
2147         /* use at most thus many bytes */
2148         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2149         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2150         /* plain bits covered in this code string */
2151         plain_bits = 0;
2152
2153         /* p->encoding & 0x80 stores whether the first run length is set.
2154          * bit offset is implicit.
2155          * start with toggle == 2 to be able to tell the first iteration */
2156         toggle = 2;
2157
2158         /* see how much plain bits we can stuff into one packet
2159          * using RLE and VLI. */
2160         do {
2161                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2162                                     : _drbd_bm_find_next(mdev, c->bit_offset);
2163                 if (tmp == -1UL)
2164                         tmp = c->bm_bits;
2165                 rl = tmp - c->bit_offset;
2166
2167                 if (toggle == 2) { /* first iteration */
2168                         if (rl == 0) {
2169                                 /* the first checked bit was set,
2170                                  * store start value, */
2171                                 DCBP_set_start(p, 1);
2172                                 /* but skip encoding of zero run length */
2173                                 toggle = !toggle;
2174                                 continue;
2175                         }
2176                         DCBP_set_start(p, 0);
2177                 }
2178
2179                 /* paranoia: catch zero runlength.
2180                  * can only happen if bitmap is modified while we scan it. */
2181                 if (rl == 0) {
2182                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2183                             "t:%u bo:%lu\n", toggle, c->bit_offset);
2184                         return -1;
2185                 }
2186
2187                 bits = vli_encode_bits(&bs, rl);
2188                 if (bits == -ENOBUFS) /* buffer full */
2189                         break;
2190                 if (bits <= 0) {
2191                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2192                         return 0;
2193                 }
2194
2195                 toggle = !toggle;
2196                 plain_bits += rl;
2197                 c->bit_offset = tmp;
2198         } while (c->bit_offset < c->bm_bits);
2199
2200         len = bs.cur.b - p->code + !!bs.cur.bit;
2201
2202         if (plain_bits < (len << 3)) {
2203                 /* incompressible with this method.
2204                  * we need to rewind both word and bit position. */
2205                 c->bit_offset -= plain_bits;
2206                 bm_xfer_ctx_bit_to_word_offset(c);
2207                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2208                 return 0;
2209         }
2210
2211         /* RLE + VLI was able to compress it just fine.
2212          * update c->word_offset. */
2213         bm_xfer_ctx_bit_to_word_offset(c);
2214
2215         /* store pad_bits */
2216         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2217
2218         return len;
2219 }
2220
2221 /**
2222  * send_bitmap_rle_or_plain
2223  *
2224  * Return 0 when done, 1 when another iteration is needed, and a negative error
2225  * code upon failure.
2226  */
2227 static int
2228 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2229                          struct p_header80 *h, struct bm_xfer_ctx *c)
2230 {
2231         struct p_compressed_bm *p = (void*)h;
2232         unsigned long num_words;
2233         int len;
2234         int ok;
2235
2236         len = fill_bitmap_rle_bits(mdev, p, c);
2237
2238         if (len < 0)
2239                 return -EIO;
2240
2241         if (len) {
2242                 DCBP_set_code(p, RLE_VLI_Bits);
2243                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2244                         sizeof(*p) + len, 0);
2245
2246                 c->packets[0]++;
2247                 c->bytes[0] += sizeof(*p) + len;
2248
2249                 if (c->bit_offset >= c->bm_bits)
2250                         len = 0; /* DONE */
2251         } else {
2252                 /* was not compressible.
2253                  * send a buffer full of plain text bits instead. */
2254                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2255                 len = num_words * sizeof(long);
2256                 if (len)
2257                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2258                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2259                                    h, sizeof(struct p_header80) + len, 0);
2260                 c->word_offset += num_words;
2261                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2262
2263                 c->packets[1]++;
2264                 c->bytes[1] += sizeof(struct p_header80) + len;
2265
2266                 if (c->bit_offset > c->bm_bits)
2267                         c->bit_offset = c->bm_bits;
2268         }
2269         if (ok) {
2270                 if (len == 0) {
2271                         INFO_bm_xfer_stats(mdev, "send", c);
2272                         return 0;
2273                 } else
2274                         return 1;
2275         }
2276         return -EIO;
2277 }
2278
2279 /* See the comment at receive_bitmap() */
2280 int _drbd_send_bitmap(struct drbd_conf *mdev)
2281 {
2282         struct bm_xfer_ctx c;
2283         struct p_header80 *p;
2284         int err;
2285
2286         ERR_IF(!mdev->bitmap) return false;
2287
2288         /* maybe we should use some per thread scratch page,
2289          * and allocate that during initial device creation? */
2290         p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2291         if (!p) {
2292                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2293                 return false;
2294         }
2295
2296         if (get_ldev(mdev)) {
2297                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2298                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2299                         drbd_bm_set_all(mdev);
2300                         if (drbd_bm_write(mdev)) {
2301                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2302                                  * but otherwise process as per normal - need to tell other
2303                                  * side that a full resync is required! */
2304                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
2305                         } else {
2306                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2307                                 drbd_md_sync(mdev);
2308                         }
2309                 }
2310                 put_ldev(mdev);
2311         }
2312
2313         c = (struct bm_xfer_ctx) {
2314                 .bm_bits = drbd_bm_bits(mdev),
2315                 .bm_words = drbd_bm_words(mdev),
2316         };
2317
2318         do {
2319                 err = send_bitmap_rle_or_plain(mdev, p, &c);
2320         } while (err > 0);
2321
2322         free_page((unsigned long) p);
2323         return err == 0;
2324 }
2325
2326 int drbd_send_bitmap(struct drbd_conf *mdev)
2327 {
2328         int err;
2329
2330         if (!drbd_get_data_sock(mdev))
2331                 return -1;
2332         err = !_drbd_send_bitmap(mdev);
2333         drbd_put_data_sock(mdev);
2334         return err;
2335 }
2336
2337 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2338 {
2339         int ok;
2340         struct p_barrier_ack p;
2341
2342         p.barrier  = barrier_nr;
2343         p.set_size = cpu_to_be32(set_size);
2344
2345         if (mdev->state.conn < C_CONNECTED)
2346                 return false;
2347         ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2348                         (struct p_header80 *)&p, sizeof(p));
2349         return ok;
2350 }
2351
2352 /**
2353  * _drbd_send_ack() - Sends an ack packet
2354  * @mdev:       DRBD device.
2355  * @cmd:        Packet command code.
2356  * @sector:     sector, needs to be in big endian byte order
2357  * @blksize:    size in byte, needs to be in big endian byte order
2358  * @block_id:   Id, big endian byte order
2359  */
2360 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2361                           u64 sector,
2362                           u32 blksize,
2363                           u64 block_id)
2364 {
2365         int ok;
2366         struct p_block_ack p;
2367
2368         p.sector   = sector;
2369         p.block_id = block_id;
2370         p.blksize  = blksize;
2371         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2372
2373         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2374                 return false;
2375         ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2376                                 (struct p_header80 *)&p, sizeof(p));
2377         return ok;
2378 }
2379
2380 /* dp->sector and dp->block_id already/still in network byte order,
2381  * data_size is payload size according to dp->head,
2382  * and may need to be corrected for digest size. */
2383 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2384                      struct p_data *dp, int data_size)
2385 {
2386         data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2387                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2388         return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2389                               dp->block_id);
2390 }
2391
2392 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2393                      struct p_block_req *rp)
2394 {
2395         return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2396 }
2397
2398 /**
2399  * drbd_send_ack() - Sends an ack packet
2400  * @mdev:       DRBD device.
2401  * @cmd:        Packet command code.
2402  * @e:          Epoch entry.
2403  */
2404 int drbd_send_ack(struct drbd_conf *mdev,
2405         enum drbd_packets cmd, struct drbd_epoch_entry *e)
2406 {
2407         return _drbd_send_ack(mdev, cmd,
2408                               cpu_to_be64(e->sector),
2409                               cpu_to_be32(e->size),
2410                               e->block_id);
2411 }
2412
2413 /* This function misuses the block_id field to signal if the blocks
2414  * are is sync or not. */
2415 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2416                      sector_t sector, int blksize, u64 block_id)
2417 {
2418         return _drbd_send_ack(mdev, cmd,
2419                               cpu_to_be64(sector),
2420                               cpu_to_be32(blksize),
2421                               cpu_to_be64(block_id));
2422 }
2423
2424 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2425                        sector_t sector, int size, u64 block_id)
2426 {
2427         int ok;
2428         struct p_block_req p;
2429
2430         p.sector   = cpu_to_be64(sector);
2431         p.block_id = block_id;
2432         p.blksize  = cpu_to_be32(size);
2433
2434         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2435                                 (struct p_header80 *)&p, sizeof(p));
2436         return ok;
2437 }
2438
2439 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2440                             sector_t sector, int size,
2441                             void *digest, int digest_size,
2442                             enum drbd_packets cmd)
2443 {
2444         int ok;
2445         struct p_block_req p;
2446
2447         p.sector   = cpu_to_be64(sector);
2448         p.block_id = BE_DRBD_MAGIC + 0xbeef;
2449         p.blksize  = cpu_to_be32(size);
2450
2451         p.head.magic   = BE_DRBD_MAGIC;
2452         p.head.command = cpu_to_be16(cmd);
2453         p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2454
2455         mutex_lock(&mdev->data.mutex);
2456
2457         ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2458         ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2459
2460         mutex_unlock(&mdev->data.mutex);
2461
2462         return ok;
2463 }
2464
2465 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2466 {
2467         int ok;
2468         struct p_block_req p;
2469
2470         p.sector   = cpu_to_be64(sector);
2471         p.block_id = BE_DRBD_MAGIC + 0xbabe;
2472         p.blksize  = cpu_to_be32(size);
2473
2474         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2475                            (struct p_header80 *)&p, sizeof(p));
2476         return ok;
2477 }
2478
2479 /* called on sndtimeo
2480  * returns false if we should retry,
2481  * true if we think connection is dead
2482  */
2483 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2484 {
2485         int drop_it;
2486         /* long elapsed = (long)(jiffies - mdev->last_received); */
2487
2488         drop_it =   mdev->meta.socket == sock
2489                 || !mdev->asender.task
2490                 || get_t_state(&mdev->asender) != Running
2491                 || mdev->state.conn < C_CONNECTED;
2492
2493         if (drop_it)
2494                 return true;
2495
2496         drop_it = !--mdev->ko_count;
2497         if (!drop_it) {
2498                 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2499                        current->comm, current->pid, mdev->ko_count);
2500                 request_ping(mdev);
2501         }
2502
2503         return drop_it; /* && (mdev->state == R_PRIMARY) */;
2504 }
2505
2506 /* The idea of sendpage seems to be to put some kind of reference
2507  * to the page into the skb, and to hand it over to the NIC. In
2508  * this process get_page() gets called.
2509  *
2510  * As soon as the page was really sent over the network put_page()
2511  * gets called by some part of the network layer. [ NIC driver? ]
2512  *
2513  * [ get_page() / put_page() increment/decrement the count. If count
2514  *   reaches 0 the page will be freed. ]
2515  *
2516  * This works nicely with pages from FSs.
2517  * But this means that in protocol A we might signal IO completion too early!
2518  *
2519  * In order not to corrupt data during a resync we must make sure
2520  * that we do not reuse our own buffer pages (EEs) to early, therefore
2521  * we have the net_ee list.
2522  *
2523  * XFS seems to have problems, still, it submits pages with page_count == 0!
2524  * As a workaround, we disable sendpage on pages
2525  * with page_count == 0 or PageSlab.
2526  */
2527 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2528                    int offset, size_t size, unsigned msg_flags)
2529 {
2530         int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2531         kunmap(page);
2532         if (sent == size)
2533                 mdev->send_cnt += size>>9;
2534         return sent == size;
2535 }
2536
2537 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2538                     int offset, size_t size, unsigned msg_flags)
2539 {
2540         mm_segment_t oldfs = get_fs();
2541         int sent, ok;
2542         int len = size;
2543
2544         /* e.g. XFS meta- & log-data is in slab pages, which have a
2545          * page_count of 0 and/or have PageSlab() set.
2546          * we cannot use send_page for those, as that does get_page();
2547          * put_page(); and would cause either a VM_BUG directly, or
2548          * __page_cache_release a page that would actually still be referenced
2549          * by someone, leading to some obscure delayed Oops somewhere else. */
2550         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2551                 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2552
2553         msg_flags |= MSG_NOSIGNAL;
2554         drbd_update_congested(mdev);
2555         set_fs(KERNEL_DS);
2556         do {
2557                 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2558                                                         offset, len,
2559                                                         msg_flags);
2560                 if (sent == -EAGAIN) {
2561                         if (we_should_drop_the_connection(mdev,
2562                                                           mdev->data.socket))
2563                                 break;
2564                         else
2565                                 continue;
2566                 }
2567                 if (sent <= 0) {
2568                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2569                              __func__, (int)size, len, sent);
2570                         break;
2571                 }
2572                 len    -= sent;
2573                 offset += sent;
2574         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2575         set_fs(oldfs);
2576         clear_bit(NET_CONGESTED, &mdev->flags);
2577
2578         ok = (len == 0);
2579         if (likely(ok))
2580                 mdev->send_cnt += size>>9;
2581         return ok;
2582 }
2583
2584 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2585 {
2586         struct bio_vec *bvec;
2587         int i;
2588         /* hint all but last page with MSG_MORE */
2589         __bio_for_each_segment(bvec, bio, i, 0) {
2590                 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2591                                      bvec->bv_offset, bvec->bv_len,
2592                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2593                         return 0;
2594         }
2595         return 1;
2596 }
2597
2598 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2599 {
2600         struct bio_vec *bvec;
2601         int i;
2602         /* hint all but last page with MSG_MORE */
2603         __bio_for_each_segment(bvec, bio, i, 0) {
2604                 if (!_drbd_send_page(mdev, bvec->bv_page,
2605                                      bvec->bv_offset, bvec->bv_len,
2606                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2607                         return 0;
2608         }
2609         return 1;
2610 }
2611
2612 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2613 {
2614         struct page *page = e->pages;
2615         unsigned len = e->size;
2616         /* hint all but last page with MSG_MORE */
2617         page_chain_for_each(page) {
2618                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2619                 if (!_drbd_send_page(mdev, page, 0, l,
2620                                 page_chain_next(page) ? MSG_MORE : 0))
2621                         return 0;
2622                 len -= l;
2623         }
2624         return 1;
2625 }
2626
2627 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2628 {
2629         if (mdev->agreed_pro_version >= 95)
2630                 return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2631                         (bi_rw & REQ_FUA ? DP_FUA : 0) |
2632                         (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2633                         (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2634         else
2635                 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2636 }
2637
2638 /* Used to send write requests
2639  * R_PRIMARY -> Peer    (P_DATA)
2640  */
2641 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2642 {
2643         int ok = 1;
2644         struct p_data p;
2645         unsigned int dp_flags = 0;
2646         void *dgb;
2647         int dgs;
2648
2649         if (!drbd_get_data_sock(mdev))
2650                 return 0;
2651
2652         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2653                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2654
2655         if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2656                 p.head.h80.magic   = BE_DRBD_MAGIC;
2657                 p.head.h80.command = cpu_to_be16(P_DATA);
2658                 p.head.h80.length  =
2659                         cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2660         } else {
2661                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2662                 p.head.h95.command = cpu_to_be16(P_DATA);
2663                 p.head.h95.length  =
2664                         cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2665         }
2666
2667         p.sector   = cpu_to_be64(req->sector);
2668         p.block_id = (unsigned long)req;
2669         p.seq_num  = cpu_to_be32(req->seq_num =
2670                                  atomic_add_return(1, &mdev->packet_seq));
2671
2672         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2673
2674         if (mdev->state.conn >= C_SYNC_SOURCE &&
2675             mdev->state.conn <= C_PAUSED_SYNC_T)
2676                 dp_flags |= DP_MAY_SET_IN_SYNC;
2677
2678         p.dp_flags = cpu_to_be32(dp_flags);
2679         set_bit(UNPLUG_REMOTE, &mdev->flags);
2680         ok = (sizeof(p) ==
2681                 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2682         if (ok && dgs) {
2683                 dgb = mdev->int_dig_out;
2684                 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2685                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2686         }
2687         if (ok) {
2688                 /* For protocol A, we have to memcpy the payload into
2689                  * socket buffers, as we may complete right away
2690                  * as soon as we handed it over to tcp, at which point the data
2691                  * pages may become invalid.
2692                  *
2693                  * For data-integrity enabled, we copy it as well, so we can be
2694                  * sure that even if the bio pages may still be modified, it
2695                  * won't change the data on the wire, thus if the digest checks
2696                  * out ok after sending on this side, but does not fit on the
2697                  * receiving side, we sure have detected corruption elsewhere.
2698                  */
2699                 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
2700                         ok = _drbd_send_bio(mdev, req->master_bio);
2701                 else
2702                         ok = _drbd_send_zc_bio(mdev, req->master_bio);
2703
2704                 /* double check digest, sometimes buffers have been modified in flight. */
2705                 if (dgs > 0 && dgs <= 64) {
2706                         /* 64 byte, 512 bit, is the larges digest size
2707                          * currently supported in kernel crypto. */
2708                         unsigned char digest[64];
2709                         drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2710                         if (memcmp(mdev->int_dig_out, digest, dgs)) {
2711                                 dev_warn(DEV,
2712                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2713                                         (unsigned long long)req->sector, req->size);
2714                         }
2715                 } /* else if (dgs > 64) {
2716                      ... Be noisy about digest too large ...
2717                 } */
2718         }
2719
2720         drbd_put_data_sock(mdev);
2721
2722         return ok;
2723 }
2724
2725 /* answer packet, used to send data back for read requests:
2726  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
2727  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
2728  */
2729 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2730                     struct drbd_epoch_entry *e)
2731 {
2732         int ok;
2733         struct p_data p;
2734         void *dgb;
2735         int dgs;
2736
2737         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2738                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2739
2740         if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2741                 p.head.h80.magic   = BE_DRBD_MAGIC;
2742                 p.head.h80.command = cpu_to_be16(cmd);
2743                 p.head.h80.length  =
2744                         cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2745         } else {
2746                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2747                 p.head.h95.command = cpu_to_be16(cmd);
2748                 p.head.h95.length  =
2749                         cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2750         }
2751
2752         p.sector   = cpu_to_be64(e->sector);
2753         p.block_id = e->block_id;
2754         /* p.seq_num  = 0;    No sequence numbers here.. */
2755
2756         /* Only called by our kernel thread.
2757          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2758          * in response to admin command or module unload.
2759          */
2760         if (!drbd_get_data_sock(mdev))
2761                 return 0;
2762
2763         ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2764         if (ok && dgs) {
2765                 dgb = mdev->int_dig_out;
2766                 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2767                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2768         }
2769         if (ok)
2770                 ok = _drbd_send_zc_ee(mdev, e);
2771
2772         drbd_put_data_sock(mdev);
2773
2774         return ok;
2775 }
2776
2777 int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2778 {
2779         struct p_block_desc p;
2780
2781         p.sector  = cpu_to_be64(req->sector);
2782         p.blksize = cpu_to_be32(req->size);
2783
2784         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2785 }
2786
2787 /*
2788   drbd_send distinguishes two cases:
2789
2790   Packets sent via the data socket "sock"
2791   and packets sent via the meta data socket "msock"
2792
2793                     sock                      msock
2794   -----------------+-------------------------+------------------------------
2795   timeout           conf.timeout / 2          conf.timeout / 2
2796   timeout action    send a ping via msock     Abort communication
2797                                               and close all sockets
2798 */
2799
2800 /*
2801  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2802  */
2803 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2804               void *buf, size_t size, unsigned msg_flags)
2805 {
2806         struct kvec iov;
2807         struct msghdr msg;
2808         int rv, sent = 0;
2809
2810         if (!sock)
2811                 return -1000;
2812
2813         /* THINK  if (signal_pending) return ... ? */
2814
2815         iov.iov_base = buf;
2816         iov.iov_len  = size;
2817
2818         msg.msg_name       = NULL;
2819         msg.msg_namelen    = 0;
2820         msg.msg_control    = NULL;
2821         msg.msg_controllen = 0;
2822         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
2823
2824         if (sock == mdev->data.socket) {
2825                 mdev->ko_count = mdev->net_conf->ko_count;
2826                 drbd_update_congested(mdev);
2827         }
2828         do {
2829                 /* STRANGE
2830                  * tcp_sendmsg does _not_ use its size parameter at all ?
2831                  *
2832                  * -EAGAIN on timeout, -EINTR on signal.
2833                  */
2834 /* THINK
2835  * do we need to block DRBD_SIG if sock == &meta.socket ??
2836  * otherwise wake_asender() might interrupt some send_*Ack !
2837  */
2838                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2839                 if (rv == -EAGAIN) {
2840                         if (we_should_drop_the_connection(mdev, sock))
2841                                 break;
2842                         else
2843                                 continue;
2844                 }
2845                 D_ASSERT(rv != 0);
2846                 if (rv == -EINTR) {
2847                         flush_signals(current);
2848                         rv = 0;
2849                 }
2850                 if (rv < 0)
2851                         break;
2852                 sent += rv;
2853                 iov.iov_base += rv;
2854                 iov.iov_len  -= rv;
2855         } while (sent < size);
2856
2857         if (sock == mdev->data.socket)
2858                 clear_bit(NET_CONGESTED, &mdev->flags);
2859
2860         if (rv <= 0) {
2861                 if (rv != -EAGAIN) {
2862                         dev_err(DEV, "%s_sendmsg returned %d\n",
2863                             sock == mdev->meta.socket ? "msock" : "sock",
2864                             rv);
2865                         drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2866                 } else
2867                         drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2868         }
2869
2870         return sent;
2871 }
2872
2873 static int drbd_open(struct block_device *bdev, fmode_t mode)
2874 {
2875         struct drbd_conf *mdev = bdev->bd_disk->private_data;
2876         unsigned long flags;
2877         int rv = 0;
2878
2879         mutex_lock(&drbd_main_mutex);
2880         spin_lock_irqsave(&mdev->req_lock, flags);
2881         /* to have a stable mdev->state.role
2882          * and no race with updating open_cnt */
2883
2884         if (mdev->state.role != R_PRIMARY) {
2885                 if (mode & FMODE_WRITE)
2886                         rv = -EROFS;
2887                 else if (!allow_oos)
2888                         rv = -EMEDIUMTYPE;
2889         }
2890
2891         if (!rv)
2892                 mdev->open_cnt++;
2893         spin_unlock_irqrestore(&mdev->req_lock, flags);
2894         mutex_unlock(&drbd_main_mutex);
2895
2896         return rv;
2897 }
2898
2899 static int drbd_release(struct gendisk *gd, fmode_t mode)
2900 {
2901         struct drbd_conf *mdev = gd->private_data;
2902         mutex_lock(&drbd_main_mutex);
2903         mdev->open_cnt--;
2904         mutex_unlock(&drbd_main_mutex);
2905         return 0;
2906 }
2907
2908 static void drbd_set_defaults(struct drbd_conf *mdev)
2909 {
2910         /* This way we get a compile error when sync_conf grows,
2911            and we forgot to initialize it here */
2912         mdev->sync_conf = (struct syncer_conf) {
2913                 /* .rate = */           DRBD_RATE_DEF,
2914                 /* .after = */          DRBD_AFTER_DEF,
2915                 /* .al_extents = */     DRBD_AL_EXTENTS_DEF,
2916                 /* .verify_alg = */     {}, 0,
2917                 /* .cpu_mask = */       {}, 0,
2918                 /* .csums_alg = */      {}, 0,
2919                 /* .use_rle = */        0,
2920                 /* .on_no_data = */     DRBD_ON_NO_DATA_DEF,
2921                 /* .c_plan_ahead = */   DRBD_C_PLAN_AHEAD_DEF,
2922                 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2923                 /* .c_fill_target = */  DRBD_C_FILL_TARGET_DEF,
2924                 /* .c_max_rate = */     DRBD_C_MAX_RATE_DEF,
2925                 /* .c_min_rate = */     DRBD_C_MIN_RATE_DEF
2926         };
2927
2928         /* Have to use that way, because the layout differs between
2929            big endian and little endian */
2930         mdev->state = (union drbd_state) {
2931                 { .role = R_SECONDARY,
2932                   .peer = R_UNKNOWN,
2933                   .conn = C_STANDALONE,
2934                   .disk = D_DISKLESS,
2935                   .pdsk = D_UNKNOWN,
2936                   .susp = 0,
2937                   .susp_nod = 0,
2938                   .susp_fen = 0
2939                 } };
2940 }
2941
2942 void drbd_init_set_defaults(struct drbd_conf *mdev)
2943 {
2944         /* the memset(,0,) did most of this.
2945          * note: only assignments, no allocation in here */
2946
2947         drbd_set_defaults(mdev);
2948
2949         atomic_set(&mdev->ap_bio_cnt, 0);
2950         atomic_set(&mdev->ap_pending_cnt, 0);
2951         atomic_set(&mdev->rs_pending_cnt, 0);
2952         atomic_set(&mdev->unacked_cnt, 0);
2953         atomic_set(&mdev->local_cnt, 0);
2954         atomic_set(&mdev->net_cnt, 0);
2955         atomic_set(&mdev->packet_seq, 0);
2956         atomic_set(&mdev->pp_in_use, 0);
2957         atomic_set(&mdev->pp_in_use_by_net, 0);
2958         atomic_set(&mdev->rs_sect_in, 0);
2959         atomic_set(&mdev->rs_sect_ev, 0);
2960         atomic_set(&mdev->ap_in_flight, 0);
2961
2962         mutex_init(&mdev->md_io_mutex);
2963         mutex_init(&mdev->data.mutex);
2964         mutex_init(&mdev->meta.mutex);
2965         sema_init(&mdev->data.work.s, 0);
2966         sema_init(&mdev->meta.work.s, 0);
2967         mutex_init(&mdev->state_mutex);
2968
2969         spin_lock_init(&mdev->data.work.q_lock);
2970         spin_lock_init(&mdev->meta.work.q_lock);
2971
2972         spin_lock_init(&mdev->al_lock);
2973         spin_lock_init(&mdev->req_lock);
2974         spin_lock_init(&mdev->peer_seq_lock);
2975         spin_lock_init(&mdev->epoch_lock);
2976
2977         INIT_LIST_HEAD(&mdev->active_ee);
2978         INIT_LIST_HEAD(&mdev->sync_ee);
2979         INIT_LIST_HEAD(&mdev->done_ee);
2980         INIT_LIST_HEAD(&mdev->read_ee);
2981         INIT_LIST_HEAD(&mdev->net_ee);
2982         INIT_LIST_HEAD(&mdev->resync_reads);
2983         INIT_LIST_HEAD(&mdev->data.work.q);
2984         INIT_LIST_HEAD(&mdev->meta.work.q);
2985         INIT_LIST_HEAD(&mdev->resync_work.list);
2986         INIT_LIST_HEAD(&mdev->unplug_work.list);
2987         INIT_LIST_HEAD(&mdev->go_diskless.list);
2988         INIT_LIST_HEAD(&mdev->md_sync_work.list);
2989         INIT_LIST_HEAD(&mdev->start_resync_work.list);
2990         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2991
2992         mdev->resync_work.cb  = w_resync_timer;
2993         mdev->unplug_work.cb  = w_send_write_hint;
2994         mdev->go_diskless.cb  = w_go_diskless;
2995         mdev->md_sync_work.cb = w_md_sync;
2996         mdev->bm_io_work.w.cb = w_bitmap_io;
2997         mdev->start_resync_work.cb = w_start_resync;
2998         init_timer(&mdev->resync_timer);
2999         init_timer(&mdev->md_sync_timer);
3000         init_timer(&mdev->start_resync_timer);
3001         mdev->resync_timer.function = resync_timer_fn;
3002         mdev->resync_timer.data = (unsigned long) mdev;
3003         mdev->md_sync_timer.function = md_sync_timer_fn;
3004         mdev->md_sync_timer.data = (unsigned long) mdev;
3005         mdev->start_resync_timer.function = start_resync_timer_fn;
3006         mdev->start_resync_timer.data = (unsigned long) mdev;
3007
3008         init_waitqueue_head(&mdev->misc_wait);
3009         init_waitqueue_head(&mdev->state_wait);
3010         init_waitqueue_head(&mdev->net_cnt_wait);
3011         init_waitqueue_head(&mdev->ee_wait);
3012         init_waitqueue_head(&mdev->al_wait);
3013         init_waitqueue_head(&mdev->seq_wait);
3014
3015         drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3016         drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3017         drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3018
3019         mdev->agreed_pro_version = PRO_VERSION_MAX;
3020         mdev->write_ordering = WO_bdev_flush;
3021         mdev->resync_wenr = LC_FREE;
3022 }
3023
3024 void drbd_mdev_cleanup(struct drbd_conf *mdev)
3025 {
3026         int i;
3027         if (mdev->receiver.t_state != None)
3028                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3029                                 mdev->receiver.t_state);
3030
3031         /* no need to lock it, I'm the only thread alive */
3032         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
3033                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3034         mdev->al_writ_cnt  =
3035         mdev->bm_writ_cnt  =
3036         mdev->read_cnt     =
3037         mdev->recv_cnt     =
3038         mdev->send_cnt     =
3039         mdev->writ_cnt     =
3040         mdev->p_size       =
3041         mdev->rs_start     =
3042         mdev->rs_total     =
3043         mdev->rs_failed    = 0;
3044         mdev->rs_last_events = 0;
3045         mdev->rs_last_sect_ev = 0;
3046         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3047                 mdev->rs_mark_left[i] = 0;
3048                 mdev->rs_mark_time[i] = 0;
3049         }
3050         D_ASSERT(mdev->net_conf == NULL);
3051
3052         drbd_set_my_capacity(mdev, 0);
3053         if (mdev->bitmap) {
3054                 /* maybe never allocated. */
3055                 drbd_bm_resize(mdev, 0, 1);
3056                 drbd_bm_cleanup(mdev);
3057         }
3058
3059         drbd_free_resources(mdev);
3060         clear_bit(AL_SUSPENDED, &mdev->flags);
3061
3062         /*
3063          * currently we drbd_init_ee only on module load, so
3064          * we may do drbd_release_ee only on module unload!
3065          */
3066         D_ASSERT(list_empty(&mdev->active_ee));
3067         D_ASSERT(list_empty(&mdev->sync_ee));
3068         D_ASSERT(list_empty(&mdev->done_ee));
3069         D_ASSERT(list_empty(&mdev->read_ee));
3070         D_ASSERT(list_empty(&mdev->net_ee));
3071         D_ASSERT(list_empty(&mdev->resync_reads));
3072         D_ASSERT(list_empty(&mdev->data.work.q));
3073         D_ASSERT(list_empty(&mdev->meta.work.q));
3074         D_ASSERT(list_empty(&mdev->resync_work.list));
3075         D_ASSERT(list_empty(&mdev->unplug_work.list));
3076         D_ASSERT(list_empty(&mdev->go_diskless.list));
3077
3078         drbd_set_defaults(mdev);
3079 }
3080
3081
3082 static void drbd_destroy_mempools(void)
3083 {
3084         struct page *page;
3085
3086         while (drbd_pp_pool) {
3087                 page = drbd_pp_pool;
3088                 drbd_pp_pool = (struct page *)page_private(page);
3089                 __free_page(page);
3090                 drbd_pp_vacant--;
3091         }
3092
3093         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3094
3095         if (drbd_ee_mempool)
3096                 mempool_destroy(drbd_ee_mempool);
3097         if (drbd_request_mempool)
3098                 mempool_destroy(drbd_request_mempool);
3099         if (drbd_ee_cache)
3100                 kmem_cache_destroy(drbd_ee_cache);
3101         if (drbd_request_cache)
3102                 kmem_cache_destroy(drbd_request_cache);
3103         if (drbd_bm_ext_cache)
3104                 kmem_cache_destroy(drbd_bm_ext_cache);
3105         if (drbd_al_ext_cache)
3106                 kmem_cache_destroy(drbd_al_ext_cache);
3107
3108         drbd_ee_mempool      = NULL;
3109         drbd_request_mempool = NULL;
3110         drbd_ee_cache        = NULL;
3111         drbd_request_cache   = NULL;
3112         drbd_bm_ext_cache    = NULL;
3113         drbd_al_ext_cache    = NULL;
3114
3115         return;
3116 }
3117
3118 static int drbd_create_mempools(void)
3119 {
3120         struct page *page;
3121         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
3122         int i;
3123
3124         /* prepare our caches and mempools */
3125         drbd_request_mempool = NULL;
3126         drbd_ee_cache        = NULL;
3127         drbd_request_cache   = NULL;
3128         drbd_bm_ext_cache    = NULL;
3129         drbd_al_ext_cache    = NULL;
3130         drbd_pp_pool         = NULL;
3131
3132         /* caches */
3133         drbd_request_cache = kmem_cache_create(
3134                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3135         if (drbd_request_cache == NULL)
3136                 goto Enomem;
3137
3138         drbd_ee_cache = kmem_cache_create(
3139                 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3140         if (drbd_ee_cache == NULL)
3141                 goto Enomem;
3142
3143         drbd_bm_ext_cache = kmem_cache_create(
3144                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3145         if (drbd_bm_ext_cache == NULL)
3146                 goto Enomem;
3147
3148         drbd_al_ext_cache = kmem_cache_create(
3149                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3150         if (drbd_al_ext_cache == NULL)
3151                 goto Enomem;
3152
3153         /* mempools */
3154         drbd_request_mempool = mempool_create(number,
3155                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3156         if (drbd_request_mempool == NULL)
3157                 goto Enomem;
3158
3159         drbd_ee_mempool = mempool_create(number,
3160                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
3161         if (drbd_ee_mempool == NULL)
3162                 goto Enomem;
3163
3164         /* drbd's page pool */
3165         spin_lock_init(&drbd_pp_lock);
3166
3167         for (i = 0; i < number; i++) {
3168                 page = alloc_page(GFP_HIGHUSER);
3169                 if (!page)
3170                         goto Enomem;
3171                 set_page_private(page, (unsigned long)drbd_pp_pool);
3172                 drbd_pp_pool = page;
3173         }
3174         drbd_pp_vacant = number;
3175
3176         return 0;
3177
3178 Enomem:
3179         drbd_destroy_mempools(); /* in case we allocated some */
3180         return -ENOMEM;
3181 }
3182
3183 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3184         void *unused)
3185 {
3186         /* just so we have it.  you never know what interesting things we
3187          * might want to do here some day...
3188          */
3189
3190         return NOTIFY_DONE;
3191 }
3192
3193 static struct notifier_block drbd_notifier = {
3194         .notifier_call = drbd_notify_sys,
3195 };
3196
3197 static void drbd_release_ee_lists(struct drbd_conf *mdev)
3198 {
3199         int rr;
3200
3201         rr = drbd_release_ee(mdev, &mdev->active_ee);
3202         if (rr)
3203                 dev_err(DEV, "%d EEs in active list found!\n", rr);
3204
3205         rr = drbd_release_ee(mdev, &mdev->sync_ee);
3206         if (rr)
3207                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3208
3209         rr = drbd_release_ee(mdev, &mdev->read_ee);
3210         if (rr)
3211                 dev_err(DEV, "%d EEs in read list found!\n", rr);
3212
3213         rr = drbd_release_ee(mdev, &mdev->done_ee);
3214         if (rr)
3215                 dev_err(DEV, "%d EEs in done list found!\n", rr);
3216
3217         rr = drbd_release_ee(mdev, &mdev->net_ee);
3218         if (rr)
3219                 dev_err(DEV, "%d EEs in net list found!\n", rr);
3220 }
3221
3222 /* caution. no locking.
3223  * currently only used from module cleanup code. */
3224 static void drbd_delete_device(unsigned int minor)
3225 {
3226         struct drbd_conf *mdev = minor_to_mdev(minor);
3227
3228         if (!mdev)
3229                 return;
3230
3231         /* paranoia asserts */
3232         if (mdev->open_cnt != 0)
3233                 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3234                                 __FILE__ , __LINE__);
3235
3236         ERR_IF (!list_empty(&mdev->data.work.q)) {
3237                 struct list_head *lp;
3238                 list_for_each(lp, &mdev->data.work.q) {
3239                         dev_err(DEV, "lp = %p\n", lp);
3240                 }
3241         };
3242         /* end paranoia asserts */
3243
3244         del_gendisk(mdev->vdisk);
3245
3246         /* cleanup stuff that may have been allocated during
3247          * device (re-)configuration or state changes */
3248
3249         if (mdev->this_bdev)
3250                 bdput(mdev->this_bdev);
3251
3252         drbd_free_resources(mdev);
3253
3254         drbd_release_ee_lists(mdev);
3255
3256         /* should be free'd on disconnect? */
3257         kfree(mdev->ee_hash);
3258         /*
3259         mdev->ee_hash_s = 0;
3260         mdev->ee_hash = NULL;
3261         */
3262
3263         lc_destroy(mdev->act_log);
3264         lc_destroy(mdev->resync);
3265
3266         kfree(mdev->p_uuid);
3267         /* mdev->p_uuid = NULL; */
3268
3269         kfree(mdev->int_dig_out);
3270         kfree(mdev->int_dig_in);
3271         kfree(mdev->int_dig_vv);
3272
3273         /* cleanup the rest that has been
3274          * allocated from drbd_new_device
3275          * and actually free the mdev itself */
3276         drbd_free_mdev(mdev);
3277 }
3278
3279 static void drbd_cleanup(void)
3280 {
3281         unsigned int i;
3282
3283         unregister_reboot_notifier(&drbd_notifier);
3284
3285         /* first remove proc,
3286          * drbdsetup uses it's presence to detect
3287          * whether DRBD is loaded.
3288          * If we would get stuck in proc removal,
3289          * but have netlink already deregistered,
3290          * some drbdsetup commands may wait forever
3291          * for an answer.
3292          */
3293         if (drbd_proc)
3294                 remove_proc_entry("drbd", NULL);
3295
3296         drbd_nl_cleanup();
3297
3298         if (minor_table) {
3299                 i = minor_count;
3300                 while (i--)
3301                         drbd_delete_device(i);
3302                 drbd_destroy_mempools();
3303         }
3304
3305         kfree(minor_table);
3306
3307         unregister_blkdev(DRBD_MAJOR, "drbd");
3308
3309         printk(KERN_INFO "drbd: module cleanup done.\n");
3310 }
3311
3312 /**
3313  * drbd_congested() - Callback for pdflush
3314  * @congested_data:     User data
3315  * @bdi_bits:           Bits pdflush is currently interested in
3316  *
3317  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3318  */
3319 static int drbd_congested(void *congested_data, int bdi_bits)
3320 {
3321         struct drbd_conf *mdev = congested_data;
3322         struct request_queue *q;
3323         char reason = '-';
3324         int r = 0;
3325
3326         if (!may_inc_ap_bio(mdev)) {
3327                 /* DRBD has frozen IO */
3328                 r = bdi_bits;
3329                 reason = 'd';
3330                 goto out;
3331         }
3332
3333         if (get_ldev(mdev)) {
3334                 q = bdev_get_queue(mdev->ldev->backing_bdev);
3335                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3336                 put_ldev(mdev);
3337                 if (r)
3338                         reason = 'b';
3339         }
3340
3341         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3342                 r |= (1 << BDI_async_congested);
3343                 reason = reason == 'b' ? 'a' : 'n';
3344         }
3345
3346 out:
3347         mdev->congestion_reason = reason;
3348         return r;
3349 }
3350
3351 struct drbd_conf *drbd_new_device(unsigned int minor)
3352 {
3353         struct drbd_conf *mdev;
3354         struct gendisk *disk;
3355         struct request_queue *q;
3356
3357         /* GFP_KERNEL, we are outside of all write-out paths */
3358         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3359         if (!mdev)
3360                 return NULL;
3361         if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3362                 goto out_no_cpumask;
3363
3364         mdev->minor = minor;
3365
3366         drbd_init_set_defaults(mdev);
3367
3368         q = blk_alloc_queue(GFP_KERNEL);
3369         if (!q)
3370                 goto out_no_q;
3371         mdev->rq_queue = q;
3372         q->queuedata   = mdev;
3373
3374         disk = alloc_disk(1);
3375         if (!disk)
3376                 goto out_no_disk;
3377         mdev->vdisk = disk;
3378
3379         set_disk_ro(disk, true);
3380
3381         disk->queue = q;
3382         disk->major = DRBD_MAJOR;
3383         disk->first_minor = minor;
3384         disk->fops = &drbd_ops;
3385         sprintf(disk->disk_name, "drbd%d", minor);
3386         disk->private_data = mdev;
3387
3388         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3389         /* we have no partitions. we contain only ourselves. */
3390         mdev->this_bdev->bd_contains = mdev->this_bdev;
3391
3392         q->backing_dev_info.congested_fn = drbd_congested;
3393         q->backing_dev_info.congested_data = mdev;
3394
3395         blk_queue_make_request(q, drbd_make_request);
3396         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
3397         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3398         blk_queue_merge_bvec(q, drbd_merge_bvec);
3399         q->queue_lock = &mdev->req_lock;
3400
3401         mdev->md_io_page = alloc_page(GFP_KERNEL);
3402         if (!mdev->md_io_page)
3403                 goto out_no_io_page;
3404
3405         if (drbd_bm_init(mdev))
3406                 goto out_no_bitmap;
3407         /* no need to lock access, we are still initializing this minor device. */
3408         if (!tl_init(mdev))
3409                 goto out_no_tl;
3410
3411         mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3412         if (!mdev->app_reads_hash)
3413                 goto out_no_app_reads;
3414
3415         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3416         if (!mdev->current_epoch)
3417                 goto out_no_epoch;
3418
3419         INIT_LIST_HEAD(&mdev->current_epoch->list);
3420         mdev->epochs = 1;
3421
3422         return mdev;
3423
3424 /* out_whatever_else:
3425         kfree(mdev->current_epoch); */
3426 out_no_epoch:
3427         kfree(mdev->app_reads_hash);
3428 out_no_app_reads:
3429         tl_cleanup(mdev);
3430 out_no_tl:
3431         drbd_bm_cleanup(mdev);
3432 out_no_bitmap:
3433         __free_page(mdev->md_io_page);
3434 out_no_io_page:
3435         put_disk(disk);
3436 out_no_disk:
3437         blk_cleanup_queue(q);
3438 out_no_q:
3439         free_cpumask_var(mdev->cpu_mask);
3440 out_no_cpumask:
3441         kfree(mdev);
3442         return NULL;
3443 }
3444
3445 /* counterpart of drbd_new_device.
3446  * last part of drbd_delete_device. */
3447 void drbd_free_mdev(struct drbd_conf *mdev)
3448 {
3449         kfree(mdev->current_epoch);
3450         kfree(mdev->app_reads_hash);
3451         tl_cleanup(mdev);
3452         if (mdev->bitmap) /* should no longer be there. */
3453                 drbd_bm_cleanup(mdev);
3454         __free_page(mdev->md_io_page);
3455         put_disk(mdev->vdisk);
3456         blk_cleanup_queue(mdev->rq_queue);
3457         free_cpumask_var(mdev->cpu_mask);
3458         drbd_free_tl_hash(mdev);
3459         kfree(mdev);
3460 }
3461
3462
3463 int __init drbd_init(void)
3464 {
3465         int err;
3466
3467         if (sizeof(struct p_handshake) != 80) {
3468                 printk(KERN_ERR
3469                        "drbd: never change the size or layout "
3470                        "of the HandShake packet.\n");
3471                 return -EINVAL;
3472         }
3473
3474         if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
3475                 printk(KERN_ERR
3476                         "drbd: invalid minor_count (%d)\n", minor_count);
3477 #ifdef MODULE
3478                 return -EINVAL;
3479 #else
3480                 minor_count = 8;
3481 #endif
3482         }
3483
3484         err = drbd_nl_init();
3485         if (err)
3486                 return err;
3487
3488         err = register_blkdev(DRBD_MAJOR, "drbd");
3489         if (err) {
3490                 printk(KERN_ERR
3491                        "drbd: unable to register block device major %d\n",
3492                        DRBD_MAJOR);
3493                 return err;
3494         }
3495
3496         register_reboot_notifier(&drbd_notifier);
3497
3498         /*
3499          * allocate all necessary structs
3500          */
3501         err = -ENOMEM;
3502
3503         init_waitqueue_head(&drbd_pp_wait);
3504
3505         drbd_proc = NULL; /* play safe for drbd_cleanup */
3506         minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3507                                 GFP_KERNEL);
3508         if (!minor_table)
3509                 goto Enomem;
3510
3511         err = drbd_create_mempools();
3512         if (err)
3513                 goto Enomem;
3514
3515         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3516         if (!drbd_proc) {
3517                 printk(KERN_ERR "drbd: unable to register proc file\n");
3518                 goto Enomem;
3519         }
3520
3521         rwlock_init(&global_state_lock);
3522
3523         printk(KERN_INFO "drbd: initialized. "
3524                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3525                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3526         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3527         printk(KERN_INFO "drbd: registered as block device major %d\n",
3528                 DRBD_MAJOR);
3529         printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3530
3531         return 0; /* Success! */
3532
3533 Enomem:
3534         drbd_cleanup();
3535         if (err == -ENOMEM)
3536                 /* currently always the case */
3537                 printk(KERN_ERR "drbd: ran out of memory\n");
3538         else
3539                 printk(KERN_ERR "drbd: initialization failure\n");
3540         return err;
3541 }
3542
3543 void drbd_free_bc(struct drbd_backing_dev *ldev)
3544 {
3545         if (ldev == NULL)
3546                 return;
3547
3548         blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3549         blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3550
3551         kfree(ldev);
3552 }
3553
3554 void drbd_free_sock(struct drbd_conf *mdev)
3555 {
3556         if (mdev->data.socket) {
3557                 mutex_lock(&mdev->data.mutex);
3558                 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3559                 sock_release(mdev->data.socket);
3560                 mdev->data.socket = NULL;
3561                 mutex_unlock(&mdev->data.mutex);
3562         }
3563         if (mdev->meta.socket) {
3564                 mutex_lock(&mdev->meta.mutex);
3565                 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3566                 sock_release(mdev->meta.socket);
3567                 mdev->meta.socket = NULL;
3568                 mutex_unlock(&mdev->meta.mutex);
3569         }
3570 }
3571
3572
3573 void drbd_free_resources(struct drbd_conf *mdev)
3574 {
3575         crypto_free_hash(mdev->csums_tfm);
3576         mdev->csums_tfm = NULL;
3577         crypto_free_hash(mdev->verify_tfm);
3578         mdev->verify_tfm = NULL;
3579         crypto_free_hash(mdev->cram_hmac_tfm);
3580         mdev->cram_hmac_tfm = NULL;
3581         crypto_free_hash(mdev->integrity_w_tfm);
3582         mdev->integrity_w_tfm = NULL;
3583         crypto_free_hash(mdev->integrity_r_tfm);
3584         mdev->integrity_r_tfm = NULL;
3585
3586         drbd_free_sock(mdev);
3587
3588         __no_warn(local,
3589                   drbd_free_bc(mdev->ldev);
3590                   mdev->ldev = NULL;);
3591 }
3592
3593 /* meta data management */
3594
3595 struct meta_data_on_disk {
3596         u64 la_size;           /* last agreed size. */
3597         u64 uuid[UI_SIZE];   /* UUIDs. */
3598         u64 device_uuid;
3599         u64 reserved_u64_1;
3600         u32 flags;             /* MDF */
3601         u32 magic;
3602         u32 md_size_sect;
3603         u32 al_offset;         /* offset to this block */
3604         u32 al_nr_extents;     /* important for restoring the AL */
3605               /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3606         u32 bm_offset;         /* offset to the bitmap, from here */
3607         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3608         u32 reserved_u32[4];
3609
3610 } __packed;
3611
3612 /**
3613  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3614  * @mdev:       DRBD device.
3615  */
3616 void drbd_md_sync(struct drbd_conf *mdev)
3617 {
3618         struct meta_data_on_disk *buffer;
3619         sector_t sector;
3620         int i;
3621
3622         del_timer(&mdev->md_sync_timer);
3623         /* timer may be rearmed by drbd_md_mark_dirty() now. */
3624         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3625                 return;
3626
3627         /* We use here D_FAILED and not D_ATTACHING because we try to write
3628          * metadata even if we detach due to a disk failure! */
3629         if (!get_ldev_if_state(mdev, D_FAILED))
3630                 return;
3631
3632         mutex_lock(&mdev->md_io_mutex);
3633         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3634         memset(buffer, 0, 512);
3635
3636         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3637         for (i = UI_CURRENT; i < UI_SIZE; i++)
3638                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3639         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3640         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3641
3642         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
3643         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
3644         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3645         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3646         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3647
3648         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3649
3650         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3651         sector = mdev->ldev->md.md_offset;
3652
3653         if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3654                 /* this was a try anyways ... */
3655                 dev_err(DEV, "meta data update failed!\n");
3656                 drbd_chk_io_error(mdev, 1, true);
3657         }
3658
3659         /* Update mdev->ldev->md.la_size_sect,
3660          * since we updated it on metadata. */
3661         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3662
3663         mutex_unlock(&mdev->md_io_mutex);
3664         put_ldev(mdev);
3665 }
3666
3667 /**
3668  * drbd_md_read() - Reads in the meta data super block
3669  * @mdev:       DRBD device.
3670  * @bdev:       Device from which the meta data should be read in.
3671  *
3672  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3673  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3674  */
3675 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3676 {
3677         struct meta_data_on_disk *buffer;
3678         int i, rv = NO_ERROR;
3679
3680         if (!get_ldev_if_state(mdev, D_ATTACHING))
3681                 return ERR_IO_MD_DISK;
3682
3683         mutex_lock(&mdev->md_io_mutex);
3684         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3685
3686         if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3687                 /* NOTE: cant do normal error processing here as this is
3688                    called BEFORE disk is attached */
3689                 dev_err(DEV, "Error while reading metadata.\n");
3690                 rv = ERR_IO_MD_DISK;
3691                 goto err;
3692         }
3693
3694         if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3695                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3696                 rv = ERR_MD_INVALID;
3697                 goto err;
3698         }
3699         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3700                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3701                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3702                 rv = ERR_MD_INVALID;
3703                 goto err;
3704         }
3705         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3706                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3707                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3708                 rv = ERR_MD_INVALID;
3709                 goto err;
3710         }
3711         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3712                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3713                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3714                 rv = ERR_MD_INVALID;
3715                 goto err;
3716         }
3717
3718         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3719                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3720                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3721                 rv = ERR_MD_INVALID;
3722                 goto err;
3723         }
3724
3725         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3726         for (i = UI_CURRENT; i < UI_SIZE; i++)
3727                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3728         bdev->md.flags = be32_to_cpu(buffer->flags);
3729         mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3730         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3731
3732         if (mdev->sync_conf.al_extents < 7)
3733                 mdev->sync_conf.al_extents = 127;
3734
3735  err:
3736         mutex_unlock(&mdev->md_io_mutex);
3737         put_ldev(mdev);
3738
3739         return rv;
3740 }
3741
3742 static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index)
3743 {
3744         static char *uuid_str[UI_EXTENDED_SIZE] = {
3745                 [UI_CURRENT] = "CURRENT",
3746                 [UI_BITMAP] = "BITMAP",
3747                 [UI_HISTORY_START] = "HISTORY_START",
3748                 [UI_HISTORY_END] = "HISTORY_END",
3749                 [UI_SIZE] = "SIZE",
3750                 [UI_FLAGS] = "FLAGS",
3751         };
3752
3753         if (index >= UI_EXTENDED_SIZE) {
3754                 dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n");
3755                 return;
3756         }
3757
3758         dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n",
3759                  uuid_str[index],
3760                  (unsigned long long)mdev->ldev->md.uuid[index]);
3761 }
3762
3763
3764 /**
3765  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3766  * @mdev:       DRBD device.
3767  *
3768  * Call this function if you change anything that should be written to
3769  * the meta-data super block. This function sets MD_DIRTY, and starts a
3770  * timer that ensures that within five seconds you have to call drbd_md_sync().
3771  */
3772 #ifdef DEBUG
3773 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3774 {
3775         if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3776                 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3777                 mdev->last_md_mark_dirty.line = line;
3778                 mdev->last_md_mark_dirty.func = func;
3779         }
3780 }
3781 #else
3782 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3783 {
3784         if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3785                 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3786 }
3787 #endif
3788
3789 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3790 {
3791         int i;
3792
3793         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
3794                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3795                 debug_drbd_uuid(mdev, i+1);
3796         }
3797 }
3798
3799 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3800 {
3801         if (idx == UI_CURRENT) {
3802                 if (mdev->state.role == R_PRIMARY)
3803                         val |= 1;
3804                 else
3805                         val &= ~((u64)1);
3806
3807                 drbd_set_ed_uuid(mdev, val);
3808         }
3809
3810         mdev->ldev->md.uuid[idx] = val;
3811         debug_drbd_uuid(mdev, idx);
3812         drbd_md_mark_dirty(mdev);
3813 }
3814
3815
3816 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3817 {
3818         if (mdev->ldev->md.uuid[idx]) {
3819                 drbd_uuid_move_history(mdev);
3820                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3821                 debug_drbd_uuid(mdev, UI_HISTORY_START);
3822         }
3823         _drbd_uuid_set(mdev, idx, val);
3824 }
3825
3826 /**
3827  * drbd_uuid_new_current() - Creates a new current UUID
3828  * @mdev:       DRBD device.
3829  *
3830  * Creates a new current UUID, and rotates the old current UUID into
3831  * the bitmap slot. Causes an incremental resync upon next connect.
3832  */
3833 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3834 {
3835         u64 val;
3836
3837         dev_info(DEV, "Creating new current UUID\n");
3838         D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3839         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3840         debug_drbd_uuid(mdev, UI_BITMAP);
3841
3842         get_random_bytes(&val, sizeof(u64));
3843         _drbd_uuid_set(mdev, UI_CURRENT, val);
3844         /* get it to stable storage _now_ */
3845         drbd_md_sync(mdev);
3846 }
3847
3848 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3849 {
3850         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3851                 return;
3852
3853         if (val == 0) {
3854                 drbd_uuid_move_history(mdev);
3855                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3856                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3857                 debug_drbd_uuid(mdev, UI_HISTORY_START);
3858                 debug_drbd_uuid(mdev, UI_BITMAP);
3859         } else {
3860                 if (mdev->ldev->md.uuid[UI_BITMAP])
3861                         dev_warn(DEV, "bm UUID already set");
3862
3863                 mdev->ldev->md.uuid[UI_BITMAP] = val;
3864                 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3865
3866                 debug_drbd_uuid(mdev, UI_BITMAP);
3867         }
3868         drbd_md_mark_dirty(mdev);
3869 }
3870
3871 /**
3872  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3873  * @mdev:       DRBD device.
3874  *
3875  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3876  */
3877 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3878 {
3879         int rv = -EIO;
3880
3881         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3882                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3883                 drbd_md_sync(mdev);
3884                 drbd_bm_set_all(mdev);
3885
3886                 rv = drbd_bm_write(mdev);
3887
3888                 if (!rv) {
3889                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3890                         drbd_md_sync(mdev);
3891                 }
3892
3893                 put_ldev(mdev);
3894         }
3895
3896         return rv;
3897 }
3898
3899 /**
3900  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3901  * @mdev:       DRBD device.
3902  *
3903  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3904  */
3905 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3906 {
3907         int rv = -EIO;
3908
3909         drbd_resume_al(mdev);
3910         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3911                 drbd_bm_clear_all(mdev);
3912                 rv = drbd_bm_write(mdev);
3913                 put_ldev(mdev);
3914         }
3915
3916         return rv;
3917 }
3918
3919 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3920 {
3921         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3922         int rv = -EIO;
3923
3924         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3925
3926         if (get_ldev(mdev)) {
3927                 drbd_bm_lock(mdev, work->why);
3928                 rv = work->io_fn(mdev);
3929                 drbd_bm_unlock(mdev);
3930                 put_ldev(mdev);
3931         }
3932
3933         clear_bit(BITMAP_IO, &mdev->flags);
3934         smp_mb__after_clear_bit();
3935         wake_up(&mdev->misc_wait);
3936
3937         if (work->done)
3938                 work->done(mdev, rv);
3939
3940         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3941         work->why = NULL;
3942
3943         return 1;
3944 }
3945
3946 void drbd_ldev_destroy(struct drbd_conf *mdev)
3947 {
3948         lc_destroy(mdev->resync);
3949         mdev->resync = NULL;
3950         lc_destroy(mdev->act_log);
3951         mdev->act_log = NULL;
3952         __no_warn(local,
3953                 drbd_free_bc(mdev->ldev);
3954                 mdev->ldev = NULL;);
3955
3956         if (mdev->md_io_tmpp) {
3957                 __free_page(mdev->md_io_tmpp);
3958                 mdev->md_io_tmpp = NULL;
3959         }
3960         clear_bit(GO_DISKLESS, &mdev->flags);
3961 }
3962
3963 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3964 {
3965         D_ASSERT(mdev->state.disk == D_FAILED);
3966         /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3967          * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3968          * the protected members anymore, though, so once put_ldev reaches zero
3969          * again, it will be safe to free them. */
3970         drbd_force_state(mdev, NS(disk, D_DISKLESS));
3971         return 1;
3972 }
3973
3974 void drbd_go_diskless(struct drbd_conf *mdev)
3975 {
3976         D_ASSERT(mdev->state.disk == D_FAILED);
3977         if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3978                 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
3979 }
3980
3981 /**
3982  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3983  * @mdev:       DRBD device.
3984  * @io_fn:      IO callback to be called when bitmap IO is possible
3985  * @done:       callback to be called after the bitmap IO was performed
3986  * @why:        Descriptive text of the reason for doing the IO
3987  *
3988  * While IO on the bitmap happens we freeze application IO thus we ensure
3989  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3990  * called from worker context. It MUST NOT be used while a previous such
3991  * work is still pending!
3992  */
3993 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3994                           int (*io_fn)(struct drbd_conf *),
3995                           void (*done)(struct drbd_conf *, int),
3996                           char *why)
3997 {
3998         D_ASSERT(current == mdev->worker.task);
3999
4000         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4001         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4002         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4003         if (mdev->bm_io_work.why)
4004                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4005                         why, mdev->bm_io_work.why);
4006
4007         mdev->bm_io_work.io_fn = io_fn;
4008         mdev->bm_io_work.done = done;
4009         mdev->bm_io_work.why = why;
4010
4011         spin_lock_irq(&mdev->req_lock);
4012         set_bit(BITMAP_IO, &mdev->flags);
4013         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4014                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
4015                         drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
4016         }
4017         spin_unlock_irq(&mdev->req_lock);
4018 }
4019
4020 /**
4021  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
4022  * @mdev:       DRBD device.
4023  * @io_fn:      IO callback to be called when bitmap IO is possible
4024  * @why:        Descriptive text of the reason for doing the IO
4025  *
4026  * freezes application IO while that the actual IO operations runs. This
4027  * functions MAY NOT be called from worker context.
4028  */
4029 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
4030 {
4031         int rv;
4032
4033         D_ASSERT(current != mdev->worker.task);
4034
4035         drbd_suspend_io(mdev);
4036
4037         drbd_bm_lock(mdev, why);
4038         rv = io_fn(mdev);
4039         drbd_bm_unlock(mdev);
4040
4041         drbd_resume_io(mdev);
4042
4043         return rv;
4044 }
4045
4046 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4047 {
4048         if ((mdev->ldev->md.flags & flag) != flag) {
4049                 drbd_md_mark_dirty(mdev);
4050                 mdev->ldev->md.flags |= flag;
4051         }
4052 }
4053
4054 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4055 {
4056         if ((mdev->ldev->md.flags & flag) != 0) {
4057                 drbd_md_mark_dirty(mdev);
4058                 mdev->ldev->md.flags &= ~flag;
4059         }
4060 }
4061 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4062 {
4063         return (bdev->md.flags & flag) != 0;
4064 }
4065
4066 static void md_sync_timer_fn(unsigned long data)
4067 {
4068         struct drbd_conf *mdev = (struct drbd_conf *) data;
4069
4070         drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4071 }
4072
4073 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4074 {
4075         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
4076 #ifdef DEBUG
4077         dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4078                 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4079 #endif
4080         drbd_md_sync(mdev);
4081         return 1;
4082 }
4083
4084 #ifdef CONFIG_DRBD_FAULT_INJECTION
4085 /* Fault insertion support including random number generator shamelessly
4086  * stolen from kernel/rcutorture.c */
4087 struct fault_random_state {
4088         unsigned long state;
4089         unsigned long count;
4090 };
4091
4092 #define FAULT_RANDOM_MULT 39916801  /* prime */
4093 #define FAULT_RANDOM_ADD        479001701 /* prime */
4094 #define FAULT_RANDOM_REFRESH 10000
4095
4096 /*
4097  * Crude but fast random-number generator.  Uses a linear congruential
4098  * generator, with occasional help from get_random_bytes().
4099  */
4100 static unsigned long
4101 _drbd_fault_random(struct fault_random_state *rsp)
4102 {
4103         long refresh;
4104
4105         if (!rsp->count--) {
4106                 get_random_bytes(&refresh, sizeof(refresh));
4107                 rsp->state += refresh;
4108                 rsp->count = FAULT_RANDOM_REFRESH;
4109         }
4110         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4111         return swahw32(rsp->state);
4112 }
4113
4114 static char *
4115 _drbd_fault_str(unsigned int type) {
4116         static char *_faults[] = {
4117                 [DRBD_FAULT_MD_WR] = "Meta-data write",
4118                 [DRBD_FAULT_MD_RD] = "Meta-data read",
4119                 [DRBD_FAULT_RS_WR] = "Resync write",
4120                 [DRBD_FAULT_RS_RD] = "Resync read",
4121                 [DRBD_FAULT_DT_WR] = "Data write",
4122                 [DRBD_FAULT_DT_RD] = "Data read",
4123                 [DRBD_FAULT_DT_RA] = "Data read ahead",
4124                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
4125                 [DRBD_FAULT_AL_EE] = "EE allocation",
4126                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
4127         };
4128
4129         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4130 }
4131
4132 unsigned int
4133 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4134 {
4135         static struct fault_random_state rrs = {0, 0};
4136
4137         unsigned int ret = (
4138                 (fault_devs == 0 ||
4139                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4140                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4141
4142         if (ret) {
4143                 fault_count++;
4144
4145                 if (__ratelimit(&drbd_ratelimit_state))
4146                         dev_warn(DEV, "***Simulating %s failure\n",
4147                                 _drbd_fault_str(type));
4148         }
4149
4150         return ret;
4151 }
4152 #endif
4153
4154 const char *drbd_buildtag(void)
4155 {
4156         /* DRBD built from external sources has here a reference to the
4157            git hash of the source code. */
4158
4159         static char buildtag[38] = "\0uilt-in";
4160
4161         if (buildtag[0] == 0) {
4162 #ifdef CONFIG_MODULES
4163                 if (THIS_MODULE != NULL)
4164                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4165                 else
4166 #endif
4167                         buildtag[0] = 'b';
4168         }
4169
4170         return buildtag;
4171 }
4172
4173 module_init(drbd_init)
4174 module_exit(drbd_cleanup)
4175
4176 EXPORT_SYMBOL(drbd_conn_str);
4177 EXPORT_SYMBOL(drbd_role_str);
4178 EXPORT_SYMBOL(drbd_disk_str);
4179 EXPORT_SYMBOL(drbd_set_st_err_str);