]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_main.c
drbd: remove now unused seq_num member from struct drbd_request
[karo-tx-linux.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 struct after_state_chg_work {
60         struct drbd_work w;
61         union drbd_state os;
62         union drbd_state ns;
63         enum chg_state_flags flags;
64         struct completion *done;
65 };
66
67 static DEFINE_MUTEX(drbd_main_mutex);
68 int drbdd_init(struct drbd_thread *);
69 int drbd_worker(struct drbd_thread *);
70 int drbd_asender(struct drbd_thread *);
71
72 int drbd_init(void);
73 static int drbd_open(struct block_device *bdev, fmode_t mode);
74 static int drbd_release(struct gendisk *gd, fmode_t mode);
75 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77                            union drbd_state ns, enum chg_state_flags flags);
78 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79 static void md_sync_timer_fn(unsigned long data);
80 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
82
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84               "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86 MODULE_VERSION(REL_VERSION);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89                  __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
90 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92 #include <linux/moduleparam.h>
93 /* allow_open_on_secondary */
94 MODULE_PARM_DESC(allow_oos, "DONT USE!");
95 /* thanks to these macros, if compiled into the kernel (not-module),
96  * this becomes the boot parameter drbd.minor_count */
97 module_param(minor_count, uint, 0444);
98 module_param(disable_sendpage, bool, 0644);
99 module_param(allow_oos, bool, 0);
100 module_param(cn_idx, uint, 0444);
101 module_param(proc_details, int, 0644);
102
103 #ifdef CONFIG_DRBD_FAULT_INJECTION
104 int enable_faults;
105 int fault_rate;
106 static int fault_count;
107 int fault_devs;
108 /* bitmap of enabled faults */
109 module_param(enable_faults, int, 0664);
110 /* fault rate % value - applies to all enabled faults */
111 module_param(fault_rate, int, 0664);
112 /* count of faults inserted */
113 module_param(fault_count, int, 0664);
114 /* bitmap of devices to insert faults on */
115 module_param(fault_devs, int, 0644);
116 #endif
117
118 /* module parameter, defined */
119 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
120 bool disable_sendpage;
121 bool allow_oos;
122 unsigned int cn_idx = CN_IDX_DRBD;
123 int proc_details;       /* Detail level in proc drbd*/
124
125 /* Module parameter for setting the user mode helper program
126  * to run. Default is /sbin/drbdadm */
127 char usermode_helper[80] = "/sbin/drbdadm";
128
129 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
132  * as member "struct gendisk *vdisk;"
133  */
134 struct drbd_conf **minor_table;
135
136 struct kmem_cache *drbd_request_cache;
137 struct kmem_cache *drbd_ee_cache;       /* epoch entries */
138 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
139 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
140 mempool_t *drbd_request_mempool;
141 mempool_t *drbd_ee_mempool;
142
143 /* I do not use a standard mempool, because:
144    1) I want to hand out the pre-allocated objects first.
145    2) I want to be able to interrupt sleeping allocation with a signal.
146    Note: This is a single linked list, the next pointer is the private
147          member of struct page.
148  */
149 struct page *drbd_pp_pool;
150 spinlock_t   drbd_pp_lock;
151 int          drbd_pp_vacant;
152 wait_queue_head_t drbd_pp_wait;
153
154 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
156 static const struct block_device_operations drbd_ops = {
157         .owner =   THIS_MODULE,
158         .open =    drbd_open,
159         .release = drbd_release,
160 };
161
162 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164 #ifdef __CHECKER__
165 /* When checking with sparse, and this is an inline function, sparse will
166    give tons of false positives. When this is a real functions sparse works.
167  */
168 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169 {
170         int io_allowed;
171
172         atomic_inc(&mdev->local_cnt);
173         io_allowed = (mdev->state.disk >= mins);
174         if (!io_allowed) {
175                 if (atomic_dec_and_test(&mdev->local_cnt))
176                         wake_up(&mdev->misc_wait);
177         }
178         return io_allowed;
179 }
180
181 #endif
182
183 /**
184  * DOC: The transfer log
185  *
186  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187  * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188  * of the list. There is always at least one &struct drbd_tl_epoch object.
189  *
190  * Each &struct drbd_tl_epoch has a circular double linked list of requests
191  * attached.
192  */
193 static int tl_init(struct drbd_conf *mdev)
194 {
195         struct drbd_tl_epoch *b;
196
197         /* during device minor initialization, we may well use GFP_KERNEL */
198         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199         if (!b)
200                 return 0;
201         INIT_LIST_HEAD(&b->requests);
202         INIT_LIST_HEAD(&b->w.list);
203         b->next = NULL;
204         b->br_number = 4711;
205         b->n_writes = 0;
206         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208         mdev->oldest_tle = b;
209         mdev->newest_tle = b;
210         INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211         INIT_LIST_HEAD(&mdev->barrier_acked_requests);
212
213         mdev->tl_hash = NULL;
214         mdev->tl_hash_s = 0;
215
216         return 1;
217 }
218
219 static void tl_cleanup(struct drbd_conf *mdev)
220 {
221         D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
222         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
223         kfree(mdev->oldest_tle);
224         mdev->oldest_tle = NULL;
225         kfree(mdev->unused_spare_tle);
226         mdev->unused_spare_tle = NULL;
227         kfree(mdev->tl_hash);
228         mdev->tl_hash = NULL;
229         mdev->tl_hash_s = 0;
230 }
231
232 /**
233  * _tl_add_barrier() - Adds a barrier to the transfer log
234  * @mdev:       DRBD device.
235  * @new:        Barrier to be added before the current head of the TL.
236  *
237  * The caller must hold the req_lock.
238  */
239 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
240 {
241         struct drbd_tl_epoch *newest_before;
242
243         INIT_LIST_HEAD(&new->requests);
244         INIT_LIST_HEAD(&new->w.list);
245         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
246         new->next = NULL;
247         new->n_writes = 0;
248
249         newest_before = mdev->newest_tle;
250         /* never send a barrier number == 0, because that is special-cased
251          * when using TCQ for our write ordering code */
252         new->br_number = (newest_before->br_number+1) ?: 1;
253         if (mdev->newest_tle != new) {
254                 mdev->newest_tle->next = new;
255                 mdev->newest_tle = new;
256         }
257 }
258
259 /**
260  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
261  * @mdev:       DRBD device.
262  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
263  * @set_size:   Expected number of requests before that barrier.
264  *
265  * In case the passed barrier_nr or set_size does not match the oldest
266  * &struct drbd_tl_epoch objects this function will cause a termination
267  * of the connection.
268  */
269 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
270                        unsigned int set_size)
271 {
272         struct drbd_tl_epoch *b, *nob; /* next old barrier */
273         struct list_head *le, *tle;
274         struct drbd_request *r;
275
276         spin_lock_irq(&mdev->req_lock);
277
278         b = mdev->oldest_tle;
279
280         /* first some paranoia code */
281         if (b == NULL) {
282                 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
283                         barrier_nr);
284                 goto bail;
285         }
286         if (b->br_number != barrier_nr) {
287                 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
288                         barrier_nr, b->br_number);
289                 goto bail;
290         }
291         if (b->n_writes != set_size) {
292                 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
293                         barrier_nr, set_size, b->n_writes);
294                 goto bail;
295         }
296
297         /* Clean up list of requests processed during current epoch */
298         list_for_each_safe(le, tle, &b->requests) {
299                 r = list_entry(le, struct drbd_request, tl_requests);
300                 _req_mod(r, barrier_acked);
301         }
302         /* There could be requests on the list waiting for completion
303            of the write to the local disk. To avoid corruptions of
304            slab's data structures we have to remove the lists head.
305
306            Also there could have been a barrier ack out of sequence, overtaking
307            the write acks - which would be a bug and violating write ordering.
308            To not deadlock in case we lose connection while such requests are
309            still pending, we need some way to find them for the
310            _req_mode(connection_lost_while_pending).
311
312            These have been list_move'd to the out_of_sequence_requests list in
313            _req_mod(, barrier_acked) above.
314            */
315         list_splice_init(&b->requests, &mdev->barrier_acked_requests);
316
317         nob = b->next;
318         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
319                 _tl_add_barrier(mdev, b);
320                 if (nob)
321                         mdev->oldest_tle = nob;
322                 /* if nob == NULL b was the only barrier, and becomes the new
323                    barrier. Therefore mdev->oldest_tle points already to b */
324         } else {
325                 D_ASSERT(nob != NULL);
326                 mdev->oldest_tle = nob;
327                 kfree(b);
328         }
329
330         spin_unlock_irq(&mdev->req_lock);
331         dec_ap_pending(mdev);
332
333         return;
334
335 bail:
336         spin_unlock_irq(&mdev->req_lock);
337         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
338 }
339
340
341 /**
342  * _tl_restart() - Walks the transfer log, and applies an action to all requests
343  * @mdev:       DRBD device.
344  * @what:       The action/event to perform with all request objects
345  *
346  * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
347  * restart_frozen_disk_io.
348  */
349 static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
350 {
351         struct drbd_tl_epoch *b, *tmp, **pn;
352         struct list_head *le, *tle, carry_reads;
353         struct drbd_request *req;
354         int rv, n_writes, n_reads;
355
356         b = mdev->oldest_tle;
357         pn = &mdev->oldest_tle;
358         while (b) {
359                 n_writes = 0;
360                 n_reads = 0;
361                 INIT_LIST_HEAD(&carry_reads);
362                 list_for_each_safe(le, tle, &b->requests) {
363                         req = list_entry(le, struct drbd_request, tl_requests);
364                         rv = _req_mod(req, what);
365
366                         n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
367                         n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
368                 }
369                 tmp = b->next;
370
371                 if (n_writes) {
372                         if (what == resend) {
373                                 b->n_writes = n_writes;
374                                 if (b->w.cb == NULL) {
375                                         b->w.cb = w_send_barrier;
376                                         inc_ap_pending(mdev);
377                                         set_bit(CREATE_BARRIER, &mdev->flags);
378                                 }
379
380                                 drbd_queue_work(&mdev->data.work, &b->w);
381                         }
382                         pn = &b->next;
383                 } else {
384                         if (n_reads)
385                                 list_add(&carry_reads, &b->requests);
386                         /* there could still be requests on that ring list,
387                          * in case local io is still pending */
388                         list_del(&b->requests);
389
390                         /* dec_ap_pending corresponding to queue_barrier.
391                          * the newest barrier may not have been queued yet,
392                          * in which case w.cb is still NULL. */
393                         if (b->w.cb != NULL)
394                                 dec_ap_pending(mdev);
395
396                         if (b == mdev->newest_tle) {
397                                 /* recycle, but reinit! */
398                                 D_ASSERT(tmp == NULL);
399                                 INIT_LIST_HEAD(&b->requests);
400                                 list_splice(&carry_reads, &b->requests);
401                                 INIT_LIST_HEAD(&b->w.list);
402                                 b->w.cb = NULL;
403                                 b->br_number = net_random();
404                                 b->n_writes = 0;
405
406                                 *pn = b;
407                                 break;
408                         }
409                         *pn = tmp;
410                         kfree(b);
411                 }
412                 b = tmp;
413                 list_splice(&carry_reads, &b->requests);
414         }
415
416         /* Actions operating on the disk state, also want to work on
417            requests that got barrier acked. */
418         switch (what) {
419         case fail_frozen_disk_io:
420         case restart_frozen_disk_io:
421                 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
422                         req = list_entry(le, struct drbd_request, tl_requests);
423                         _req_mod(req, what);
424                 }
425
426         case connection_lost_while_pending:
427         case resend:
428                 break;
429         default:
430                 dev_err(DEV, "what = %d in _tl_restart()\n", what);
431         }
432 }
433
434
435 /**
436  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
437  * @mdev:       DRBD device.
438  *
439  * This is called after the connection to the peer was lost. The storage covered
440  * by the requests on the transfer gets marked as our of sync. Called from the
441  * receiver thread and the worker thread.
442  */
443 void tl_clear(struct drbd_conf *mdev)
444 {
445         struct list_head *le, *tle;
446         struct drbd_request *r;
447
448         spin_lock_irq(&mdev->req_lock);
449
450         _tl_restart(mdev, connection_lost_while_pending);
451
452         /* we expect this list to be empty. */
453         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
454
455         /* but just in case, clean it up anyways! */
456         list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
457                 r = list_entry(le, struct drbd_request, tl_requests);
458                 /* It would be nice to complete outside of spinlock.
459                  * But this is easier for now. */
460                 _req_mod(r, connection_lost_while_pending);
461         }
462
463         /* ensure bit indicating barrier is required is clear */
464         clear_bit(CREATE_BARRIER, &mdev->flags);
465
466         memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
467
468         spin_unlock_irq(&mdev->req_lock);
469 }
470
471 void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
472 {
473         spin_lock_irq(&mdev->req_lock);
474         _tl_restart(mdev, what);
475         spin_unlock_irq(&mdev->req_lock);
476 }
477
478 /**
479  * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
480  * @mdev:       DRBD device.
481  */
482 void tl_abort_disk_io(struct drbd_conf *mdev)
483 {
484         struct drbd_tl_epoch *b;
485         struct list_head *le, *tle;
486         struct drbd_request *req;
487
488         spin_lock_irq(&mdev->req_lock);
489         b = mdev->oldest_tle;
490         while (b) {
491                 list_for_each_safe(le, tle, &b->requests) {
492                         req = list_entry(le, struct drbd_request, tl_requests);
493                         if (!(req->rq_state & RQ_LOCAL_PENDING))
494                                 continue;
495                         _req_mod(req, abort_disk_io);
496                 }
497                 b = b->next;
498         }
499
500         list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
501                 req = list_entry(le, struct drbd_request, tl_requests);
502                 if (!(req->rq_state & RQ_LOCAL_PENDING))
503                         continue;
504                 _req_mod(req, abort_disk_io);
505         }
506
507         spin_unlock_irq(&mdev->req_lock);
508 }
509
510 /**
511  * cl_wide_st_chg() - true if the state change is a cluster wide one
512  * @mdev:       DRBD device.
513  * @os:         old (current) state.
514  * @ns:         new (wanted) state.
515  */
516 static int cl_wide_st_chg(struct drbd_conf *mdev,
517                           union drbd_state os, union drbd_state ns)
518 {
519         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
520                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
521                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
522                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
523                   (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
524                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
525                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
526 }
527
528 enum drbd_state_rv
529 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
530                   union drbd_state mask, union drbd_state val)
531 {
532         unsigned long flags;
533         union drbd_state os, ns;
534         enum drbd_state_rv rv;
535
536         spin_lock_irqsave(&mdev->req_lock, flags);
537         os = mdev->state;
538         ns.i = (os.i & ~mask.i) | val.i;
539         rv = _drbd_set_state(mdev, ns, f, NULL);
540         ns = mdev->state;
541         spin_unlock_irqrestore(&mdev->req_lock, flags);
542
543         return rv;
544 }
545
546 /**
547  * drbd_force_state() - Impose a change which happens outside our control on our state
548  * @mdev:       DRBD device.
549  * @mask:       mask of state bits to change.
550  * @val:        value of new state bits.
551  */
552 void drbd_force_state(struct drbd_conf *mdev,
553         union drbd_state mask, union drbd_state val)
554 {
555         drbd_change_state(mdev, CS_HARD, mask, val);
556 }
557
558 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
559 static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
560                                                     union drbd_state,
561                                                     union drbd_state);
562 enum sanitize_state_warnings {
563         NO_WARNING,
564         ABORTED_ONLINE_VERIFY,
565         ABORTED_RESYNC,
566         CONNECTION_LOST_NEGOTIATING,
567         IMPLICITLY_UPGRADED_DISK,
568         IMPLICITLY_UPGRADED_PDSK,
569 };
570 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
571                                        union drbd_state ns, enum sanitize_state_warnings *warn);
572 int drbd_send_state_req(struct drbd_conf *,
573                         union drbd_state, union drbd_state);
574
575 static enum drbd_state_rv
576 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
577              union drbd_state val)
578 {
579         union drbd_state os, ns;
580         unsigned long flags;
581         enum drbd_state_rv rv;
582
583         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
584                 return SS_CW_SUCCESS;
585
586         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
587                 return SS_CW_FAILED_BY_PEER;
588
589         rv = 0;
590         spin_lock_irqsave(&mdev->req_lock, flags);
591         os = mdev->state;
592         ns.i = (os.i & ~mask.i) | val.i;
593         ns = sanitize_state(mdev, os, ns, NULL);
594
595         if (!cl_wide_st_chg(mdev, os, ns))
596                 rv = SS_CW_NO_NEED;
597         if (!rv) {
598                 rv = is_valid_state(mdev, ns);
599                 if (rv == SS_SUCCESS) {
600                         rv = is_valid_state_transition(mdev, ns, os);
601                         if (rv == SS_SUCCESS)
602                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
603                 }
604         }
605         spin_unlock_irqrestore(&mdev->req_lock, flags);
606
607         return rv;
608 }
609
610 /**
611  * drbd_req_state() - Perform an eventually cluster wide state change
612  * @mdev:       DRBD device.
613  * @mask:       mask of state bits to change.
614  * @val:        value of new state bits.
615  * @f:          flags
616  *
617  * Should not be called directly, use drbd_request_state() or
618  * _drbd_request_state().
619  */
620 static enum drbd_state_rv
621 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
622                union drbd_state val, enum chg_state_flags f)
623 {
624         struct completion done;
625         unsigned long flags;
626         union drbd_state os, ns;
627         enum drbd_state_rv rv;
628
629         init_completion(&done);
630
631         if (f & CS_SERIALIZE)
632                 mutex_lock(&mdev->state_mutex);
633
634         spin_lock_irqsave(&mdev->req_lock, flags);
635         os = mdev->state;
636         ns.i = (os.i & ~mask.i) | val.i;
637         ns = sanitize_state(mdev, os, ns, NULL);
638
639         if (cl_wide_st_chg(mdev, os, ns)) {
640                 rv = is_valid_state(mdev, ns);
641                 if (rv == SS_SUCCESS)
642                         rv = is_valid_state_transition(mdev, ns, os);
643                 spin_unlock_irqrestore(&mdev->req_lock, flags);
644
645                 if (rv < SS_SUCCESS) {
646                         if (f & CS_VERBOSE)
647                                 print_st_err(mdev, os, ns, rv);
648                         goto abort;
649                 }
650
651                 drbd_state_lock(mdev);
652                 if (!drbd_send_state_req(mdev, mask, val)) {
653                         drbd_state_unlock(mdev);
654                         rv = SS_CW_FAILED_BY_PEER;
655                         if (f & CS_VERBOSE)
656                                 print_st_err(mdev, os, ns, rv);
657                         goto abort;
658                 }
659
660                 wait_event(mdev->state_wait,
661                         (rv = _req_st_cond(mdev, mask, val)));
662
663                 if (rv < SS_SUCCESS) {
664                         drbd_state_unlock(mdev);
665                         if (f & CS_VERBOSE)
666                                 print_st_err(mdev, os, ns, rv);
667                         goto abort;
668                 }
669                 spin_lock_irqsave(&mdev->req_lock, flags);
670                 os = mdev->state;
671                 ns.i = (os.i & ~mask.i) | val.i;
672                 rv = _drbd_set_state(mdev, ns, f, &done);
673                 drbd_state_unlock(mdev);
674         } else {
675                 rv = _drbd_set_state(mdev, ns, f, &done);
676         }
677
678         spin_unlock_irqrestore(&mdev->req_lock, flags);
679
680         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
681                 D_ASSERT(current != mdev->worker.task);
682                 wait_for_completion(&done);
683         }
684
685 abort:
686         if (f & CS_SERIALIZE)
687                 mutex_unlock(&mdev->state_mutex);
688
689         return rv;
690 }
691
692 /**
693  * _drbd_request_state() - Request a state change (with flags)
694  * @mdev:       DRBD device.
695  * @mask:       mask of state bits to change.
696  * @val:        value of new state bits.
697  * @f:          flags
698  *
699  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
700  * flag, or when logging of failed state change requests is not desired.
701  */
702 enum drbd_state_rv
703 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
704                     union drbd_state val, enum chg_state_flags f)
705 {
706         enum drbd_state_rv rv;
707
708         wait_event(mdev->state_wait,
709                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
710
711         return rv;
712 }
713
714 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
715 {
716         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
717             name,
718             drbd_conn_str(ns.conn),
719             drbd_role_str(ns.role),
720             drbd_role_str(ns.peer),
721             drbd_disk_str(ns.disk),
722             drbd_disk_str(ns.pdsk),
723             is_susp(ns) ? 's' : 'r',
724             ns.aftr_isp ? 'a' : '-',
725             ns.peer_isp ? 'p' : '-',
726             ns.user_isp ? 'u' : '-'
727             );
728 }
729
730 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
731                   union drbd_state ns, enum drbd_state_rv err)
732 {
733         if (err == SS_IN_TRANSIENT_STATE)
734                 return;
735         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
736         print_st(mdev, " state", os);
737         print_st(mdev, "wanted", ns);
738 }
739
740
741 /**
742  * is_valid_state() - Returns an SS_ error code if ns is not valid
743  * @mdev:       DRBD device.
744  * @ns:         State to consider.
745  */
746 static enum drbd_state_rv
747 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
748 {
749         /* See drbd_state_sw_errors in drbd_strings.c */
750
751         enum drbd_fencing_p fp;
752         enum drbd_state_rv rv = SS_SUCCESS;
753
754         fp = FP_DONT_CARE;
755         if (get_ldev(mdev)) {
756                 fp = mdev->ldev->dc.fencing;
757                 put_ldev(mdev);
758         }
759
760         if (get_net_conf(mdev)) {
761                 if (!mdev->net_conf->two_primaries &&
762                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
763                         rv = SS_TWO_PRIMARIES;
764                 put_net_conf(mdev);
765         }
766
767         if (rv <= 0)
768                 /* already found a reason to abort */;
769         else if (ns.role == R_SECONDARY && mdev->open_cnt)
770                 rv = SS_DEVICE_IN_USE;
771
772         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
773                 rv = SS_NO_UP_TO_DATE_DISK;
774
775         else if (fp >= FP_RESOURCE &&
776                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
777                 rv = SS_PRIMARY_NOP;
778
779         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
780                 rv = SS_NO_UP_TO_DATE_DISK;
781
782         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
783                 rv = SS_NO_LOCAL_DISK;
784
785         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
786                 rv = SS_NO_REMOTE_DISK;
787
788         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
789                 rv = SS_NO_UP_TO_DATE_DISK;
790
791         else if ((ns.conn == C_CONNECTED ||
792                   ns.conn == C_WF_BITMAP_S ||
793                   ns.conn == C_SYNC_SOURCE ||
794                   ns.conn == C_PAUSED_SYNC_S) &&
795                   ns.disk == D_OUTDATED)
796                 rv = SS_CONNECTED_OUTDATES;
797
798         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
799                  (mdev->sync_conf.verify_alg[0] == 0))
800                 rv = SS_NO_VERIFY_ALG;
801
802         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
803                   mdev->agreed_pro_version < 88)
804                 rv = SS_NOT_SUPPORTED;
805
806         else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
807                 rv = SS_CONNECTED_OUTDATES;
808
809         return rv;
810 }
811
812 /**
813  * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
814  * @mdev:       DRBD device.
815  * @ns:         new state.
816  * @os:         old state.
817  */
818 static enum drbd_state_rv
819 is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
820                           union drbd_state os)
821 {
822         enum drbd_state_rv rv = SS_SUCCESS;
823
824         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
825             os.conn > C_CONNECTED)
826                 rv = SS_RESYNC_RUNNING;
827
828         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
829                 rv = SS_ALREADY_STANDALONE;
830
831         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
832                 rv = SS_IS_DISKLESS;
833
834         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
835                 rv = SS_NO_NET_CONFIG;
836
837         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
838                 rv = SS_LOWER_THAN_OUTDATED;
839
840         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
841                 rv = SS_IN_TRANSIENT_STATE;
842
843         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
844                 rv = SS_IN_TRANSIENT_STATE;
845
846         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
847                 rv = SS_NEED_CONNECTION;
848
849         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
850             ns.conn != os.conn && os.conn > C_CONNECTED)
851                 rv = SS_RESYNC_RUNNING;
852
853         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
854             os.conn < C_CONNECTED)
855                 rv = SS_NEED_CONNECTION;
856
857         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
858             && os.conn < C_WF_REPORT_PARAMS)
859                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
860
861         return rv;
862 }
863
864 static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
865 {
866         static const char *msg_table[] = {
867                 [NO_WARNING] = "",
868                 [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
869                 [ABORTED_RESYNC] = "Resync aborted.",
870                 [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
871                 [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
872                 [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
873         };
874
875         if (warn != NO_WARNING)
876                 dev_warn(DEV, "%s\n", msg_table[warn]);
877 }
878
879 /**
880  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
881  * @mdev:       DRBD device.
882  * @os:         old state.
883  * @ns:         new state.
884  * @warn_sync_abort:
885  *
886  * When we loose connection, we have to set the state of the peers disk (pdsk)
887  * to D_UNKNOWN. This rule and many more along those lines are in this function.
888  */
889 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
890                                        union drbd_state ns, enum sanitize_state_warnings *warn)
891 {
892         enum drbd_fencing_p fp;
893         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
894
895         if (warn)
896                 *warn = NO_WARNING;
897
898         fp = FP_DONT_CARE;
899         if (get_ldev(mdev)) {
900                 fp = mdev->ldev->dc.fencing;
901                 put_ldev(mdev);
902         }
903
904         /* Disallow Network errors to configure a device's network part */
905         if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
906             os.conn <= C_DISCONNECTING)
907                 ns.conn = os.conn;
908
909         /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
910          * If you try to go into some Sync* state, that shall fail (elsewhere). */
911         if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
912             ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
913                 ns.conn = os.conn;
914
915         /* we cannot fail (again) if we already detached */
916         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
917                 ns.disk = D_DISKLESS;
918
919         /* After C_DISCONNECTING only C_STANDALONE may follow */
920         if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
921                 ns.conn = os.conn;
922
923         if (ns.conn < C_CONNECTED) {
924                 ns.peer_isp = 0;
925                 ns.peer = R_UNKNOWN;
926                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
927                         ns.pdsk = D_UNKNOWN;
928         }
929
930         /* Clear the aftr_isp when becoming unconfigured */
931         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
932                 ns.aftr_isp = 0;
933
934         /* Abort resync if a disk fails/detaches */
935         if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
936             (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
937                 if (warn)
938                         *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
939                                 ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
940                 ns.conn = C_CONNECTED;
941         }
942
943         /* Connection breaks down before we finished "Negotiating" */
944         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
945             get_ldev_if_state(mdev, D_NEGOTIATING)) {
946                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
947                         ns.disk = mdev->new_state_tmp.disk;
948                         ns.pdsk = mdev->new_state_tmp.pdsk;
949                 } else {
950                         if (warn)
951                                 *warn = CONNECTION_LOST_NEGOTIATING;
952                         ns.disk = D_DISKLESS;
953                         ns.pdsk = D_UNKNOWN;
954                 }
955                 put_ldev(mdev);
956         }
957
958         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
959         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
960                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
961                         ns.disk = D_UP_TO_DATE;
962                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
963                         ns.pdsk = D_UP_TO_DATE;
964         }
965
966         /* Implications of the connection stat on the disk states */
967         disk_min = D_DISKLESS;
968         disk_max = D_UP_TO_DATE;
969         pdsk_min = D_INCONSISTENT;
970         pdsk_max = D_UNKNOWN;
971         switch ((enum drbd_conns)ns.conn) {
972         case C_WF_BITMAP_T:
973         case C_PAUSED_SYNC_T:
974         case C_STARTING_SYNC_T:
975         case C_WF_SYNC_UUID:
976         case C_BEHIND:
977                 disk_min = D_INCONSISTENT;
978                 disk_max = D_OUTDATED;
979                 pdsk_min = D_UP_TO_DATE;
980                 pdsk_max = D_UP_TO_DATE;
981                 break;
982         case C_VERIFY_S:
983         case C_VERIFY_T:
984                 disk_min = D_UP_TO_DATE;
985                 disk_max = D_UP_TO_DATE;
986                 pdsk_min = D_UP_TO_DATE;
987                 pdsk_max = D_UP_TO_DATE;
988                 break;
989         case C_CONNECTED:
990                 disk_min = D_DISKLESS;
991                 disk_max = D_UP_TO_DATE;
992                 pdsk_min = D_DISKLESS;
993                 pdsk_max = D_UP_TO_DATE;
994                 break;
995         case C_WF_BITMAP_S:
996         case C_PAUSED_SYNC_S:
997         case C_STARTING_SYNC_S:
998         case C_AHEAD:
999                 disk_min = D_UP_TO_DATE;
1000                 disk_max = D_UP_TO_DATE;
1001                 pdsk_min = D_INCONSISTENT;
1002                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
1003                 break;
1004         case C_SYNC_TARGET:
1005                 disk_min = D_INCONSISTENT;
1006                 disk_max = D_INCONSISTENT;
1007                 pdsk_min = D_UP_TO_DATE;
1008                 pdsk_max = D_UP_TO_DATE;
1009                 break;
1010         case C_SYNC_SOURCE:
1011                 disk_min = D_UP_TO_DATE;
1012                 disk_max = D_UP_TO_DATE;
1013                 pdsk_min = D_INCONSISTENT;
1014                 pdsk_max = D_INCONSISTENT;
1015                 break;
1016         case C_STANDALONE:
1017         case C_DISCONNECTING:
1018         case C_UNCONNECTED:
1019         case C_TIMEOUT:
1020         case C_BROKEN_PIPE:
1021         case C_NETWORK_FAILURE:
1022         case C_PROTOCOL_ERROR:
1023         case C_TEAR_DOWN:
1024         case C_WF_CONNECTION:
1025         case C_WF_REPORT_PARAMS:
1026         case C_MASK:
1027                 break;
1028         }
1029         if (ns.disk > disk_max)
1030                 ns.disk = disk_max;
1031
1032         if (ns.disk < disk_min) {
1033                 if (warn)
1034                         *warn = IMPLICITLY_UPGRADED_DISK;
1035                 ns.disk = disk_min;
1036         }
1037         if (ns.pdsk > pdsk_max)
1038                 ns.pdsk = pdsk_max;
1039
1040         if (ns.pdsk < pdsk_min) {
1041                 if (warn)
1042                         *warn = IMPLICITLY_UPGRADED_PDSK;
1043                 ns.pdsk = pdsk_min;
1044         }
1045
1046         if (fp == FP_STONITH &&
1047             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
1048             !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
1049                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
1050
1051         if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
1052             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
1053             !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
1054                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
1055
1056         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1057                 if (ns.conn == C_SYNC_SOURCE)
1058                         ns.conn = C_PAUSED_SYNC_S;
1059                 if (ns.conn == C_SYNC_TARGET)
1060                         ns.conn = C_PAUSED_SYNC_T;
1061         } else {
1062                 if (ns.conn == C_PAUSED_SYNC_S)
1063                         ns.conn = C_SYNC_SOURCE;
1064                 if (ns.conn == C_PAUSED_SYNC_T)
1065                         ns.conn = C_SYNC_TARGET;
1066         }
1067
1068         return ns;
1069 }
1070
1071 /* helper for __drbd_set_state */
1072 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1073 {
1074         if (mdev->agreed_pro_version < 90)
1075                 mdev->ov_start_sector = 0;
1076         mdev->rs_total = drbd_bm_bits(mdev);
1077         mdev->ov_position = 0;
1078         if (cs == C_VERIFY_T) {
1079                 /* starting online verify from an arbitrary position
1080                  * does not fit well into the existing protocol.
1081                  * on C_VERIFY_T, we initialize ov_left and friends
1082                  * implicitly in receive_DataRequest once the
1083                  * first P_OV_REQUEST is received */
1084                 mdev->ov_start_sector = ~(sector_t)0;
1085         } else {
1086                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
1087                 if (bit >= mdev->rs_total) {
1088                         mdev->ov_start_sector =
1089                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
1090                         mdev->rs_total = 1;
1091                 } else
1092                         mdev->rs_total -= bit;
1093                 mdev->ov_position = mdev->ov_start_sector;
1094         }
1095         mdev->ov_left = mdev->rs_total;
1096 }
1097
1098 static void drbd_resume_al(struct drbd_conf *mdev)
1099 {
1100         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1101                 dev_info(DEV, "Resumed AL updates\n");
1102 }
1103
1104 /**
1105  * __drbd_set_state() - Set a new DRBD state
1106  * @mdev:       DRBD device.
1107  * @ns:         new state.
1108  * @flags:      Flags
1109  * @done:       Optional completion, that will get completed after the after_state_ch() finished
1110  *
1111  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1112  */
1113 enum drbd_state_rv
1114 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1115                  enum chg_state_flags flags, struct completion *done)
1116 {
1117         union drbd_state os;
1118         enum drbd_state_rv rv = SS_SUCCESS;
1119         enum sanitize_state_warnings ssw;
1120         struct after_state_chg_work *ascw;
1121
1122         os = mdev->state;
1123
1124         ns = sanitize_state(mdev, os, ns, &ssw);
1125
1126         if (ns.i == os.i)
1127                 return SS_NOTHING_TO_DO;
1128
1129         if (!(flags & CS_HARD)) {
1130                 /*  pre-state-change checks ; only look at ns  */
1131                 /* See drbd_state_sw_errors in drbd_strings.c */
1132
1133                 rv = is_valid_state(mdev, ns);
1134                 if (rv < SS_SUCCESS) {
1135                         /* If the old state was illegal as well, then let
1136                            this happen...*/
1137
1138                         if (is_valid_state(mdev, os) == rv)
1139                                 rv = is_valid_state_transition(mdev, ns, os);
1140                 } else
1141                         rv = is_valid_state_transition(mdev, ns, os);
1142         }
1143
1144         if (rv < SS_SUCCESS) {
1145                 if (flags & CS_VERBOSE)
1146                         print_st_err(mdev, os, ns, rv);
1147                 return rv;
1148         }
1149
1150         print_sanitize_warnings(mdev, ssw);
1151
1152         {
1153         char *pbp, pb[300];
1154         pbp = pb;
1155         *pbp = 0;
1156         if (ns.role != os.role)
1157                 pbp += sprintf(pbp, "role( %s -> %s ) ",
1158                                drbd_role_str(os.role),
1159                                drbd_role_str(ns.role));
1160         if (ns.peer != os.peer)
1161                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1162                                drbd_role_str(os.peer),
1163                                drbd_role_str(ns.peer));
1164         if (ns.conn != os.conn)
1165                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1166                                drbd_conn_str(os.conn),
1167                                drbd_conn_str(ns.conn));
1168         if (ns.disk != os.disk)
1169                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1170                                drbd_disk_str(os.disk),
1171                                drbd_disk_str(ns.disk));
1172         if (ns.pdsk != os.pdsk)
1173                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1174                                drbd_disk_str(os.pdsk),
1175                                drbd_disk_str(ns.pdsk));
1176         if (is_susp(ns) != is_susp(os))
1177                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1178                                is_susp(os),
1179                                is_susp(ns));
1180         if (ns.aftr_isp != os.aftr_isp)
1181                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1182                                os.aftr_isp,
1183                                ns.aftr_isp);
1184         if (ns.peer_isp != os.peer_isp)
1185                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1186                                os.peer_isp,
1187                                ns.peer_isp);
1188         if (ns.user_isp != os.user_isp)
1189                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1190                                os.user_isp,
1191                                ns.user_isp);
1192         dev_info(DEV, "%s\n", pb);
1193         }
1194
1195         /* solve the race between becoming unconfigured,
1196          * worker doing the cleanup, and
1197          * admin reconfiguring us:
1198          * on (re)configure, first set CONFIG_PENDING,
1199          * then wait for a potentially exiting worker,
1200          * start the worker, and schedule one no_op.
1201          * then proceed with configuration.
1202          */
1203         if (ns.disk == D_DISKLESS &&
1204             ns.conn == C_STANDALONE &&
1205             ns.role == R_SECONDARY &&
1206             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1207                 set_bit(DEVICE_DYING, &mdev->flags);
1208
1209         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1210          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1211          * drbd_ldev_destroy() won't happen before our corresponding
1212          * after_state_ch works run, where we put_ldev again. */
1213         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1214             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1215                 atomic_inc(&mdev->local_cnt);
1216
1217         mdev->state = ns;
1218
1219         if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1220                 drbd_print_uuids(mdev, "attached to UUIDs");
1221
1222         wake_up(&mdev->misc_wait);
1223         wake_up(&mdev->state_wait);
1224
1225         /* aborted verify run. log the last position */
1226         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1227             ns.conn < C_CONNECTED) {
1228                 mdev->ov_start_sector =
1229                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
1230                 dev_info(DEV, "Online Verify reached sector %llu\n",
1231                         (unsigned long long)mdev->ov_start_sector);
1232         }
1233
1234         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1235             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1236                 dev_info(DEV, "Syncer continues.\n");
1237                 mdev->rs_paused += (long)jiffies
1238                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
1239                 if (ns.conn == C_SYNC_TARGET)
1240                         mod_timer(&mdev->resync_timer, jiffies);
1241         }
1242
1243         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1244             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1245                 dev_info(DEV, "Resync suspended\n");
1246                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
1247         }
1248
1249         if (os.conn == C_CONNECTED &&
1250             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1251                 unsigned long now = jiffies;
1252                 int i;
1253
1254                 set_ov_position(mdev, ns.conn);
1255                 mdev->rs_start = now;
1256                 mdev->rs_last_events = 0;
1257                 mdev->rs_last_sect_ev = 0;
1258                 mdev->ov_last_oos_size = 0;
1259                 mdev->ov_last_oos_start = 0;
1260
1261                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1262                         mdev->rs_mark_left[i] = mdev->ov_left;
1263                         mdev->rs_mark_time[i] = now;
1264                 }
1265
1266                 drbd_rs_controller_reset(mdev);
1267
1268                 if (ns.conn == C_VERIFY_S) {
1269                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1270                                         (unsigned long long)mdev->ov_position);
1271                         mod_timer(&mdev->resync_timer, jiffies);
1272                 }
1273         }
1274
1275         if (get_ldev(mdev)) {
1276                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1277                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1278                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1279
1280                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1281                         mdf |= MDF_CRASHED_PRIMARY;
1282                 if (mdev->state.role == R_PRIMARY ||
1283                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1284                         mdf |= MDF_PRIMARY_IND;
1285                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1286                         mdf |= MDF_CONNECTED_IND;
1287                 if (mdev->state.disk > D_INCONSISTENT)
1288                         mdf |= MDF_CONSISTENT;
1289                 if (mdev->state.disk > D_OUTDATED)
1290                         mdf |= MDF_WAS_UP_TO_DATE;
1291                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1292                         mdf |= MDF_PEER_OUT_DATED;
1293                 if (mdf != mdev->ldev->md.flags) {
1294                         mdev->ldev->md.flags = mdf;
1295                         drbd_md_mark_dirty(mdev);
1296                 }
1297                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1298                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1299                 put_ldev(mdev);
1300         }
1301
1302         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1303         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1304             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1305                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1306
1307         /* Receiver should clean up itself */
1308         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1309                 drbd_thread_stop_nowait(&mdev->receiver);
1310
1311         /* Now the receiver finished cleaning up itself, it should die */
1312         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1313                 drbd_thread_stop_nowait(&mdev->receiver);
1314
1315         /* Upon network failure, we need to restart the receiver. */
1316         if (os.conn > C_WF_CONNECTION &&
1317             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1318                 drbd_thread_restart_nowait(&mdev->receiver);
1319
1320         /* Resume AL writing if we get a connection */
1321         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1322                 drbd_resume_al(mdev);
1323
1324         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1325         if (ascw) {
1326                 ascw->os = os;
1327                 ascw->ns = ns;
1328                 ascw->flags = flags;
1329                 ascw->w.cb = w_after_state_ch;
1330                 ascw->done = done;
1331                 drbd_queue_work(&mdev->data.work, &ascw->w);
1332         } else {
1333                 dev_warn(DEV, "Could not kmalloc an ascw\n");
1334         }
1335
1336         return rv;
1337 }
1338
1339 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1340 {
1341         struct after_state_chg_work *ascw =
1342                 container_of(w, struct after_state_chg_work, w);
1343         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1344         if (ascw->flags & CS_WAIT_COMPLETE) {
1345                 D_ASSERT(ascw->done != NULL);
1346                 complete(ascw->done);
1347         }
1348         kfree(ascw);
1349
1350         return 1;
1351 }
1352
1353 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1354 {
1355         if (rv) {
1356                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1357                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1358                 return;
1359         }
1360
1361         switch (mdev->state.conn) {
1362         case C_STARTING_SYNC_T:
1363                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1364                 break;
1365         case C_STARTING_SYNC_S:
1366                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1367                 break;
1368         }
1369 }
1370
1371 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1372                 int (*io_fn)(struct drbd_conf *),
1373                 char *why, enum bm_flag flags)
1374 {
1375         int rv;
1376
1377         D_ASSERT(current == mdev->worker.task);
1378
1379         /* open coded non-blocking drbd_suspend_io(mdev); */
1380         set_bit(SUSPEND_IO, &mdev->flags);
1381
1382         drbd_bm_lock(mdev, why, flags);
1383         rv = io_fn(mdev);
1384         drbd_bm_unlock(mdev);
1385
1386         drbd_resume_io(mdev);
1387
1388         return rv;
1389 }
1390
1391 /**
1392  * after_state_ch() - Perform after state change actions that may sleep
1393  * @mdev:       DRBD device.
1394  * @os:         old state.
1395  * @ns:         new state.
1396  * @flags:      Flags
1397  */
1398 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1399                            union drbd_state ns, enum chg_state_flags flags)
1400 {
1401         enum drbd_fencing_p fp;
1402         enum drbd_req_event what = nothing;
1403         union drbd_state nsm = (union drbd_state){ .i = -1 };
1404
1405         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1406                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1407                 if (mdev->p_uuid)
1408                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1409         }
1410
1411         fp = FP_DONT_CARE;
1412         if (get_ldev(mdev)) {
1413                 fp = mdev->ldev->dc.fencing;
1414                 put_ldev(mdev);
1415         }
1416
1417         /* Inform userspace about the change... */
1418         drbd_bcast_state(mdev, ns);
1419
1420         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1421             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1422                 drbd_khelper(mdev, "pri-on-incon-degr");
1423
1424         /* Here we have the actions that are performed after a
1425            state change. This function might sleep */
1426
1427         if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
1428                 mod_timer(&mdev->request_timer, jiffies + HZ);
1429
1430         nsm.i = -1;
1431         if (ns.susp_nod) {
1432                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1433                         what = resend;
1434
1435                 if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1436                     ns.disk > D_NEGOTIATING)
1437                         what = restart_frozen_disk_io;
1438
1439                 if (what != nothing)
1440                         nsm.susp_nod = 0;
1441         }
1442
1443         if (ns.susp_fen) {
1444                 /* case1: The outdate peer handler is successful: */
1445                 if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
1446                         tl_clear(mdev);
1447                         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1448                                 drbd_uuid_new_current(mdev);
1449                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1450                         }
1451                         spin_lock_irq(&mdev->req_lock);
1452                         _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1453                         spin_unlock_irq(&mdev->req_lock);
1454                 }
1455                 /* case2: The connection was established again: */
1456                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1457                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1458                         what = resend;
1459                         nsm.susp_fen = 0;
1460                 }
1461         }
1462
1463         if (what != nothing) {
1464                 spin_lock_irq(&mdev->req_lock);
1465                 _tl_restart(mdev, what);
1466                 nsm.i &= mdev->state.i;
1467                 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1468                 spin_unlock_irq(&mdev->req_lock);
1469         }
1470
1471         /* Became sync source.  With protocol >= 96, we still need to send out
1472          * the sync uuid now. Need to do that before any drbd_send_state, or
1473          * the other side may go "paused sync" before receiving the sync uuids,
1474          * which is unexpected. */
1475         if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1476             (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1477             mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1478                 drbd_gen_and_send_sync_uuid(mdev);
1479                 put_ldev(mdev);
1480         }
1481
1482         /* Do not change the order of the if above and the two below... */
1483         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1484                 drbd_send_uuids(mdev);
1485                 drbd_send_state(mdev, ns);
1486         }
1487         /* No point in queuing send_bitmap if we don't have a connection
1488          * anymore, so check also the _current_ state, not only the new state
1489          * at the time this work was queued. */
1490         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1491             mdev->state.conn == C_WF_BITMAP_S)
1492                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1493                                 "send_bitmap (WFBitMapS)",
1494                                 BM_LOCKED_TEST_ALLOWED);
1495
1496         /* Lost contact to peer's copy of the data */
1497         if ((os.pdsk >= D_INCONSISTENT &&
1498              os.pdsk != D_UNKNOWN &&
1499              os.pdsk != D_OUTDATED)
1500         &&  (ns.pdsk < D_INCONSISTENT ||
1501              ns.pdsk == D_UNKNOWN ||
1502              ns.pdsk == D_OUTDATED)) {
1503                 if (get_ldev(mdev)) {
1504                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1505                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1506                                 if (is_susp(mdev->state)) {
1507                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1508                                 } else {
1509                                         drbd_uuid_new_current(mdev);
1510                                         drbd_send_uuids(mdev);
1511                                 }
1512                         }
1513                         put_ldev(mdev);
1514                 }
1515         }
1516
1517         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1518                 if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
1519                     mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1520                         drbd_uuid_new_current(mdev);
1521                         drbd_send_uuids(mdev);
1522                 }
1523                 /* D_DISKLESS Peer becomes secondary */
1524                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1525                         /* We may still be Primary ourselves.
1526                          * No harm done if the bitmap still changes,
1527                          * redirtied pages will follow later. */
1528                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1529                                 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1530                 put_ldev(mdev);
1531         }
1532
1533         /* Write out all changed bits on demote.
1534          * Though, no need to da that just yet
1535          * if there is a resync going on still */
1536         if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1537                 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1538                 /* No changes to the bitmap expected this time, so assert that,
1539                  * even though no harm was done if it did change. */
1540                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1541                                 "demote", BM_LOCKED_TEST_ALLOWED);
1542                 put_ldev(mdev);
1543         }
1544
1545         /* Last part of the attaching process ... */
1546         if (ns.conn >= C_CONNECTED &&
1547             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1548                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1549                 drbd_send_uuids(mdev);
1550                 drbd_send_state(mdev, ns);
1551         }
1552
1553         /* We want to pause/continue resync, tell peer. */
1554         if (ns.conn >= C_CONNECTED &&
1555              ((os.aftr_isp != ns.aftr_isp) ||
1556               (os.user_isp != ns.user_isp)))
1557                 drbd_send_state(mdev, ns);
1558
1559         /* In case one of the isp bits got set, suspend other devices. */
1560         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1561             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1562                 suspend_other_sg(mdev);
1563
1564         /* Make sure the peer gets informed about eventual state
1565            changes (ISP bits) while we were in WFReportParams. */
1566         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1567                 drbd_send_state(mdev, ns);
1568
1569         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1570                 drbd_send_state(mdev, ns);
1571
1572         /* We are in the progress to start a full sync... */
1573         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1574             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1575                 /* no other bitmap changes expected during this phase */
1576                 drbd_queue_bitmap_io(mdev,
1577                         &drbd_bmio_set_n_write, &abw_start_sync,
1578                         "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1579
1580         /* We are invalidating our self... */
1581         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1582             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1583                 /* other bitmap operation expected during this phase */
1584                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1585                         "set_n_write from invalidate", BM_LOCKED_MASK);
1586
1587         /* first half of local IO error, failure to attach,
1588          * or administrative detach */
1589         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1590                 enum drbd_io_error_p eh = EP_PASS_ON;
1591                 int was_io_error = 0;
1592                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1593                  * our cleanup here with the transition to D_DISKLESS.
1594                  * But is is still not save to dreference ldev here, since
1595                  * we might come from an failed Attach before ldev was set. */
1596                 if (mdev->ldev) {
1597                         eh = mdev->ldev->dc.on_io_error;
1598                         was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1599
1600                         /* Immediately allow completion of all application IO, that waits
1601                            for completion from the local disk. */
1602                         tl_abort_disk_io(mdev);
1603
1604                         /* current state still has to be D_FAILED,
1605                          * there is only one way out: to D_DISKLESS,
1606                          * and that may only happen after our put_ldev below. */
1607                         if (mdev->state.disk != D_FAILED)
1608                                 dev_err(DEV,
1609                                         "ASSERT FAILED: disk is %s during detach\n",
1610                                         drbd_disk_str(mdev->state.disk));
1611
1612                         if (ns.conn >= C_CONNECTED)
1613                                 drbd_send_state(mdev, ns);
1614
1615                         drbd_rs_cancel_all(mdev);
1616
1617                         /* In case we want to get something to stable storage still,
1618                          * this may be the last chance.
1619                          * Following put_ldev may transition to D_DISKLESS. */
1620                         drbd_md_sync(mdev);
1621                 }
1622                 put_ldev(mdev);
1623
1624                 if (was_io_error && eh == EP_CALL_HELPER)
1625                         drbd_khelper(mdev, "local-io-error");
1626         }
1627
1628         /* second half of local IO error, failure to attach,
1629          * or administrative detach,
1630          * after local_cnt references have reached zero again */
1631         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1632                 /* We must still be diskless,
1633                  * re-attach has to be serialized with this! */
1634                 if (mdev->state.disk != D_DISKLESS)
1635                         dev_err(DEV,
1636                                 "ASSERT FAILED: disk is %s while going diskless\n",
1637                                 drbd_disk_str(mdev->state.disk));
1638
1639                 mdev->rs_total = 0;
1640                 mdev->rs_failed = 0;
1641                 atomic_set(&mdev->rs_pending_cnt, 0);
1642
1643                 if (ns.conn >= C_CONNECTED)
1644                         drbd_send_state(mdev, ns);
1645
1646                 /* corresponding get_ldev in __drbd_set_state
1647                  * this may finally trigger drbd_ldev_destroy. */
1648                 put_ldev(mdev);
1649         }
1650
1651         /* Notify peer that I had a local IO error, and did not detached.. */
1652         if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
1653                 drbd_send_state(mdev, ns);
1654
1655         /* Disks got bigger while they were detached */
1656         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1657             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1658                 if (ns.conn == C_CONNECTED)
1659                         resync_after_online_grow(mdev);
1660         }
1661
1662         /* A resync finished or aborted, wake paused devices... */
1663         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1664             (os.peer_isp && !ns.peer_isp) ||
1665             (os.user_isp && !ns.user_isp))
1666                 resume_next_sg(mdev);
1667
1668         /* sync target done with resync.  Explicitly notify peer, even though
1669          * it should (at least for non-empty resyncs) already know itself. */
1670         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1671                 drbd_send_state(mdev, ns);
1672
1673         /* This triggers bitmap writeout of potentially still unwritten pages
1674          * if the resync finished cleanly, or aborted because of peer disk
1675          * failure, or because of connection loss.
1676          * For resync aborted because of local disk failure, we cannot do
1677          * any bitmap writeout anymore.
1678          * No harm done if some bits change during this phase.
1679          */
1680         if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1681                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1682                         "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1683                 put_ldev(mdev);
1684         }
1685
1686         /* free tl_hash if we Got thawed and are C_STANDALONE */
1687         if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
1688                 drbd_free_tl_hash(mdev);
1689
1690         /* Upon network connection, we need to start the receiver */
1691         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1692                 drbd_thread_start(&mdev->receiver);
1693
1694         /* Terminate worker thread if we are unconfigured - it will be
1695            restarted as needed... */
1696         if (ns.disk == D_DISKLESS &&
1697             ns.conn == C_STANDALONE &&
1698             ns.role == R_SECONDARY) {
1699                 if (os.aftr_isp != ns.aftr_isp)
1700                         resume_next_sg(mdev);
1701                 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1702                 if (test_bit(DEVICE_DYING, &mdev->flags))
1703                         drbd_thread_stop_nowait(&mdev->worker);
1704         }
1705
1706         drbd_md_sync(mdev);
1707 }
1708
1709
1710 static int drbd_thread_setup(void *arg)
1711 {
1712         struct drbd_thread *thi = (struct drbd_thread *) arg;
1713         struct drbd_conf *mdev = thi->mdev;
1714         unsigned long flags;
1715         int retval;
1716
1717 restart:
1718         retval = thi->function(thi);
1719
1720         spin_lock_irqsave(&thi->t_lock, flags);
1721
1722         /* if the receiver has been "Exiting", the last thing it did
1723          * was set the conn state to "StandAlone",
1724          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1725          * and receiver thread will be "started".
1726          * drbd_thread_start needs to set "Restarting" in that case.
1727          * t_state check and assignment needs to be within the same spinlock,
1728          * so either thread_start sees Exiting, and can remap to Restarting,
1729          * or thread_start see None, and can proceed as normal.
1730          */
1731
1732         if (thi->t_state == Restarting) {
1733                 dev_info(DEV, "Restarting %s\n", current->comm);
1734                 thi->t_state = Running;
1735                 spin_unlock_irqrestore(&thi->t_lock, flags);
1736                 goto restart;
1737         }
1738
1739         thi->task = NULL;
1740         thi->t_state = None;
1741         smp_mb();
1742         complete(&thi->stop);
1743         spin_unlock_irqrestore(&thi->t_lock, flags);
1744
1745         dev_info(DEV, "Terminating %s\n", current->comm);
1746
1747         /* Release mod reference taken when thread was started */
1748         module_put(THIS_MODULE);
1749         return retval;
1750 }
1751
1752 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1753                       int (*func) (struct drbd_thread *))
1754 {
1755         spin_lock_init(&thi->t_lock);
1756         thi->task    = NULL;
1757         thi->t_state = None;
1758         thi->function = func;
1759         thi->mdev = mdev;
1760 }
1761
1762 int drbd_thread_start(struct drbd_thread *thi)
1763 {
1764         struct drbd_conf *mdev = thi->mdev;
1765         struct task_struct *nt;
1766         unsigned long flags;
1767
1768         const char *me =
1769                 thi == &mdev->receiver ? "receiver" :
1770                 thi == &mdev->asender  ? "asender"  :
1771                 thi == &mdev->worker   ? "worker"   : "NONSENSE";
1772
1773         /* is used from state engine doing drbd_thread_stop_nowait,
1774          * while holding the req lock irqsave */
1775         spin_lock_irqsave(&thi->t_lock, flags);
1776
1777         switch (thi->t_state) {
1778         case None:
1779                 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1780                                 me, current->comm, current->pid);
1781
1782                 /* Get ref on module for thread - this is released when thread exits */
1783                 if (!try_module_get(THIS_MODULE)) {
1784                         dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1785                         spin_unlock_irqrestore(&thi->t_lock, flags);
1786                         return false;
1787                 }
1788
1789                 init_completion(&thi->stop);
1790                 D_ASSERT(thi->task == NULL);
1791                 thi->reset_cpu_mask = 1;
1792                 thi->t_state = Running;
1793                 spin_unlock_irqrestore(&thi->t_lock, flags);
1794                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1795
1796                 nt = kthread_create(drbd_thread_setup, (void *) thi,
1797                                     "drbd%d_%s", mdev_to_minor(mdev), me);
1798
1799                 if (IS_ERR(nt)) {
1800                         dev_err(DEV, "Couldn't start thread\n");
1801
1802                         module_put(THIS_MODULE);
1803                         return false;
1804                 }
1805                 spin_lock_irqsave(&thi->t_lock, flags);
1806                 thi->task = nt;
1807                 thi->t_state = Running;
1808                 spin_unlock_irqrestore(&thi->t_lock, flags);
1809                 wake_up_process(nt);
1810                 break;
1811         case Exiting:
1812                 thi->t_state = Restarting;
1813                 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1814                                 me, current->comm, current->pid);
1815                 /* fall through */
1816         case Running:
1817         case Restarting:
1818         default:
1819                 spin_unlock_irqrestore(&thi->t_lock, flags);
1820                 break;
1821         }
1822
1823         return true;
1824 }
1825
1826
1827 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1828 {
1829         unsigned long flags;
1830
1831         enum drbd_thread_state ns = restart ? Restarting : Exiting;
1832
1833         /* may be called from state engine, holding the req lock irqsave */
1834         spin_lock_irqsave(&thi->t_lock, flags);
1835
1836         if (thi->t_state == None) {
1837                 spin_unlock_irqrestore(&thi->t_lock, flags);
1838                 if (restart)
1839                         drbd_thread_start(thi);
1840                 return;
1841         }
1842
1843         if (thi->t_state != ns) {
1844                 if (thi->task == NULL) {
1845                         spin_unlock_irqrestore(&thi->t_lock, flags);
1846                         return;
1847                 }
1848
1849                 thi->t_state = ns;
1850                 smp_mb();
1851                 init_completion(&thi->stop);
1852                 if (thi->task != current)
1853                         force_sig(DRBD_SIGKILL, thi->task);
1854
1855         }
1856
1857         spin_unlock_irqrestore(&thi->t_lock, flags);
1858
1859         if (wait)
1860                 wait_for_completion(&thi->stop);
1861 }
1862
1863 #ifdef CONFIG_SMP
1864 /**
1865  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1866  * @mdev:       DRBD device.
1867  *
1868  * Forces all threads of a device onto the same CPU. This is beneficial for
1869  * DRBD's performance. May be overwritten by user's configuration.
1870  */
1871 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1872 {
1873         int ord, cpu;
1874
1875         /* user override. */
1876         if (cpumask_weight(mdev->cpu_mask))
1877                 return;
1878
1879         ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1880         for_each_online_cpu(cpu) {
1881                 if (ord-- == 0) {
1882                         cpumask_set_cpu(cpu, mdev->cpu_mask);
1883                         return;
1884                 }
1885         }
1886         /* should not be reached */
1887         cpumask_setall(mdev->cpu_mask);
1888 }
1889
1890 /**
1891  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1892  * @mdev:       DRBD device.
1893  *
1894  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1895  * prematurely.
1896  */
1897 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1898 {
1899         struct task_struct *p = current;
1900         struct drbd_thread *thi =
1901                 p == mdev->asender.task  ? &mdev->asender  :
1902                 p == mdev->receiver.task ? &mdev->receiver :
1903                 p == mdev->worker.task   ? &mdev->worker   :
1904                 NULL;
1905         ERR_IF(thi == NULL)
1906                 return;
1907         if (!thi->reset_cpu_mask)
1908                 return;
1909         thi->reset_cpu_mask = 0;
1910         set_cpus_allowed_ptr(p, mdev->cpu_mask);
1911 }
1912 #endif
1913
1914 /* the appropriate socket mutex must be held already */
1915 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1916                           enum drbd_packets cmd, struct p_header80 *h,
1917                           size_t size, unsigned msg_flags)
1918 {
1919         int sent, ok;
1920
1921         ERR_IF(!h) return false;
1922         ERR_IF(!size) return false;
1923
1924         h->magic   = BE_DRBD_MAGIC;
1925         h->command = cpu_to_be16(cmd);
1926         h->length  = cpu_to_be16(size-sizeof(struct p_header80));
1927
1928         sent = drbd_send(mdev, sock, h, size, msg_flags);
1929
1930         ok = (sent == size);
1931         if (!ok && !signal_pending(current))
1932                 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
1933                     cmdname(cmd), (int)size, sent);
1934         return ok;
1935 }
1936
1937 /* don't pass the socket. we may only look at it
1938  * when we hold the appropriate socket mutex.
1939  */
1940 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1941                   enum drbd_packets cmd, struct p_header80 *h, size_t size)
1942 {
1943         int ok = 0;
1944         struct socket *sock;
1945
1946         if (use_data_socket) {
1947                 mutex_lock(&mdev->data.mutex);
1948                 sock = mdev->data.socket;
1949         } else {
1950                 mutex_lock(&mdev->meta.mutex);
1951                 sock = mdev->meta.socket;
1952         }
1953
1954         /* drbd_disconnect() could have called drbd_free_sock()
1955          * while we were waiting in down()... */
1956         if (likely(sock != NULL))
1957                 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1958
1959         if (use_data_socket)
1960                 mutex_unlock(&mdev->data.mutex);
1961         else
1962                 mutex_unlock(&mdev->meta.mutex);
1963         return ok;
1964 }
1965
1966 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1967                    size_t size)
1968 {
1969         struct p_header80 h;
1970         int ok;
1971
1972         h.magic   = BE_DRBD_MAGIC;
1973         h.command = cpu_to_be16(cmd);
1974         h.length  = cpu_to_be16(size);
1975
1976         if (!drbd_get_data_sock(mdev))
1977                 return 0;
1978
1979         ok = (sizeof(h) ==
1980                 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1981         ok = ok && (size ==
1982                 drbd_send(mdev, mdev->data.socket, data, size, 0));
1983
1984         drbd_put_data_sock(mdev);
1985
1986         return ok;
1987 }
1988
1989 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1990 {
1991         struct p_rs_param_95 *p;
1992         struct socket *sock;
1993         int size, rv;
1994         const int apv = mdev->agreed_pro_version;
1995
1996         size = apv <= 87 ? sizeof(struct p_rs_param)
1997                 : apv == 88 ? sizeof(struct p_rs_param)
1998                         + strlen(mdev->sync_conf.verify_alg) + 1
1999                 : apv <= 94 ? sizeof(struct p_rs_param_89)
2000                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2001
2002         /* used from admin command context and receiver/worker context.
2003          * to avoid kmalloc, grab the socket right here,
2004          * then use the pre-allocated sbuf there */
2005         mutex_lock(&mdev->data.mutex);
2006         sock = mdev->data.socket;
2007
2008         if (likely(sock != NULL)) {
2009                 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
2010
2011                 p = &mdev->data.sbuf.rs_param_95;
2012
2013                 /* initialize verify_alg and csums_alg */
2014                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2015
2016                 p->rate = cpu_to_be32(sc->rate);
2017                 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
2018                 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
2019                 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
2020                 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
2021
2022                 if (apv >= 88)
2023                         strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
2024                 if (apv >= 89)
2025                         strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
2026
2027                 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
2028         } else
2029                 rv = 0; /* not ok */
2030
2031         mutex_unlock(&mdev->data.mutex);
2032
2033         return rv;
2034 }
2035
2036 int drbd_send_protocol(struct drbd_conf *mdev)
2037 {
2038         struct p_protocol *p;
2039         int size, cf, rv;
2040
2041         size = sizeof(struct p_protocol);
2042
2043         if (mdev->agreed_pro_version >= 87)
2044                 size += strlen(mdev->net_conf->integrity_alg) + 1;
2045
2046         /* we must not recurse into our own queue,
2047          * as that is blocked during handshake */
2048         p = kmalloc(size, GFP_NOIO);
2049         if (p == NULL)
2050                 return 0;
2051
2052         p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
2053         p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
2054         p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
2055         p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
2056         p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
2057
2058         cf = 0;
2059         if (mdev->net_conf->want_lose)
2060                 cf |= CF_WANT_LOSE;
2061         if (mdev->net_conf->dry_run) {
2062                 if (mdev->agreed_pro_version >= 92)
2063                         cf |= CF_DRY_RUN;
2064                 else {
2065                         dev_err(DEV, "--dry-run is not supported by peer");
2066                         kfree(p);
2067                         return -1;
2068                 }
2069         }
2070         p->conn_flags    = cpu_to_be32(cf);
2071
2072         if (mdev->agreed_pro_version >= 87)
2073                 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
2074
2075         rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
2076                            (struct p_header80 *)p, size);
2077         kfree(p);
2078         return rv;
2079 }
2080
2081 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2082 {
2083         struct p_uuids p;
2084         int i;
2085
2086         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2087                 return 1;
2088
2089         for (i = UI_CURRENT; i < UI_SIZE; i++)
2090                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2091
2092         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2093         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2094         uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2095         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2096         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2097         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2098
2099         put_ldev(mdev);
2100
2101         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
2102                              (struct p_header80 *)&p, sizeof(p));
2103 }
2104
2105 int drbd_send_uuids(struct drbd_conf *mdev)
2106 {
2107         return _drbd_send_uuids(mdev, 0);
2108 }
2109
2110 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2111 {
2112         return _drbd_send_uuids(mdev, 8);
2113 }
2114
2115 void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2116 {
2117         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2118                 u64 *uuid = mdev->ldev->md.uuid;
2119                 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2120                      text,
2121                      (unsigned long long)uuid[UI_CURRENT],
2122                      (unsigned long long)uuid[UI_BITMAP],
2123                      (unsigned long long)uuid[UI_HISTORY_START],
2124                      (unsigned long long)uuid[UI_HISTORY_END]);
2125                 put_ldev(mdev);
2126         } else {
2127                 dev_info(DEV, "%s effective data uuid: %016llX\n",
2128                                 text,
2129                                 (unsigned long long)mdev->ed_uuid);
2130         }
2131 }
2132
2133 int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
2134 {
2135         struct p_rs_uuid p;
2136         u64 uuid;
2137
2138         D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
2139
2140         uuid = mdev->ldev->md.uuid[UI_BITMAP];
2141         if (uuid && uuid != UUID_JUST_CREATED)
2142                 uuid = uuid + UUID_NEW_BM_OFFSET;
2143         else
2144                 get_random_bytes(&uuid, sizeof(u64));
2145         drbd_uuid_set(mdev, UI_BITMAP, uuid);
2146         drbd_print_uuids(mdev, "updated sync UUID");
2147         drbd_md_sync(mdev);
2148         p.uuid = cpu_to_be64(uuid);
2149
2150         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
2151                              (struct p_header80 *)&p, sizeof(p));
2152 }
2153
2154 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
2155 {
2156         struct p_sizes p;
2157         sector_t d_size, u_size;
2158         int q_order_type, max_bio_size;
2159         int ok;
2160
2161         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2162                 D_ASSERT(mdev->ldev->backing_bdev);
2163                 d_size = drbd_get_max_capacity(mdev->ldev);
2164                 u_size = mdev->ldev->dc.disk_size;
2165                 q_order_type = drbd_queue_order_type(mdev);
2166                 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2167                 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
2168                 put_ldev(mdev);
2169         } else {
2170                 d_size = 0;
2171                 u_size = 0;
2172                 q_order_type = QUEUE_ORDERED_NONE;
2173                 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
2174         }
2175
2176         /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
2177         if (mdev->agreed_pro_version <= 94)
2178                 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
2179
2180         p.d_size = cpu_to_be64(d_size);
2181         p.u_size = cpu_to_be64(u_size);
2182         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
2183         p.max_bio_size = cpu_to_be32(max_bio_size);
2184         p.queue_order_type = cpu_to_be16(q_order_type);
2185         p.dds_flags = cpu_to_be16(flags);
2186
2187         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
2188                            (struct p_header80 *)&p, sizeof(p));
2189         return ok;
2190 }
2191
2192 /**
2193  * drbd_send_current_state() - Sends the drbd state to the peer
2194  * @mdev:       DRBD device.
2195  */
2196 int drbd_send_current_state(struct drbd_conf *mdev)
2197 {
2198         struct socket *sock;
2199         struct p_state p;
2200         int ok = 0;
2201
2202         /* Grab state lock so we wont send state if we're in the middle
2203          * of a cluster wide state change on another thread */
2204         drbd_state_lock(mdev);
2205
2206         mutex_lock(&mdev->data.mutex);
2207
2208         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2209         sock = mdev->data.socket;
2210
2211         if (likely(sock != NULL)) {
2212                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2213                                     (struct p_header80 *)&p, sizeof(p), 0);
2214         }
2215
2216         mutex_unlock(&mdev->data.mutex);
2217
2218         drbd_state_unlock(mdev);
2219         return ok;
2220 }
2221
2222 /**
2223  * drbd_send_state() - After a state change, sends the new state to the peer
2224  * @mdev:       DRBD device.
2225  * @state:      the state to send, not necessarily the current state.
2226  *
2227  * Each state change queues an "after_state_ch" work, which will eventually
2228  * send the resulting new state to the peer. If more state changes happen
2229  * between queuing and processing of the after_state_ch work, we still
2230  * want to send each intermediary state in the order it occurred.
2231  */
2232 int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
2233 {
2234         struct socket *sock;
2235         struct p_state p;
2236         int ok = 0;
2237
2238         mutex_lock(&mdev->data.mutex);
2239
2240         p.state = cpu_to_be32(state.i);
2241         sock = mdev->data.socket;
2242
2243         if (likely(sock != NULL)) {
2244                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2245                                     (struct p_header80 *)&p, sizeof(p), 0);
2246         }
2247
2248         mutex_unlock(&mdev->data.mutex);
2249
2250         return ok;
2251 }
2252
2253 int drbd_send_state_req(struct drbd_conf *mdev,
2254         union drbd_state mask, union drbd_state val)
2255 {
2256         struct p_req_state p;
2257
2258         p.mask    = cpu_to_be32(mask.i);
2259         p.val     = cpu_to_be32(val.i);
2260
2261         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
2262                              (struct p_header80 *)&p, sizeof(p));
2263 }
2264
2265 int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
2266 {
2267         struct p_req_state_reply p;
2268
2269         p.retcode    = cpu_to_be32(retcode);
2270
2271         return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
2272                              (struct p_header80 *)&p, sizeof(p));
2273 }
2274
2275 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2276         struct p_compressed_bm *p,
2277         struct bm_xfer_ctx *c)
2278 {
2279         struct bitstream bs;
2280         unsigned long plain_bits;
2281         unsigned long tmp;
2282         unsigned long rl;
2283         unsigned len;
2284         unsigned toggle;
2285         int bits;
2286
2287         /* may we use this feature? */
2288         if ((mdev->sync_conf.use_rle == 0) ||
2289                 (mdev->agreed_pro_version < 90))
2290                         return 0;
2291
2292         if (c->bit_offset >= c->bm_bits)
2293                 return 0; /* nothing to do. */
2294
2295         /* use at most thus many bytes */
2296         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2297         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2298         /* plain bits covered in this code string */
2299         plain_bits = 0;
2300
2301         /* p->encoding & 0x80 stores whether the first run length is set.
2302          * bit offset is implicit.
2303          * start with toggle == 2 to be able to tell the first iteration */
2304         toggle = 2;
2305
2306         /* see how much plain bits we can stuff into one packet
2307          * using RLE and VLI. */
2308         do {
2309                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2310                                     : _drbd_bm_find_next(mdev, c->bit_offset);
2311                 if (tmp == -1UL)
2312                         tmp = c->bm_bits;
2313                 rl = tmp - c->bit_offset;
2314
2315                 if (toggle == 2) { /* first iteration */
2316                         if (rl == 0) {
2317                                 /* the first checked bit was set,
2318                                  * store start value, */
2319                                 DCBP_set_start(p, 1);
2320                                 /* but skip encoding of zero run length */
2321                                 toggle = !toggle;
2322                                 continue;
2323                         }
2324                         DCBP_set_start(p, 0);
2325                 }
2326
2327                 /* paranoia: catch zero runlength.
2328                  * can only happen if bitmap is modified while we scan it. */
2329                 if (rl == 0) {
2330                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2331                             "t:%u bo:%lu\n", toggle, c->bit_offset);
2332                         return -1;
2333                 }
2334
2335                 bits = vli_encode_bits(&bs, rl);
2336                 if (bits == -ENOBUFS) /* buffer full */
2337                         break;
2338                 if (bits <= 0) {
2339                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2340                         return 0;
2341                 }
2342
2343                 toggle = !toggle;
2344                 plain_bits += rl;
2345                 c->bit_offset = tmp;
2346         } while (c->bit_offset < c->bm_bits);
2347
2348         len = bs.cur.b - p->code + !!bs.cur.bit;
2349
2350         if (plain_bits < (len << 3)) {
2351                 /* incompressible with this method.
2352                  * we need to rewind both word and bit position. */
2353                 c->bit_offset -= plain_bits;
2354                 bm_xfer_ctx_bit_to_word_offset(c);
2355                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2356                 return 0;
2357         }
2358
2359         /* RLE + VLI was able to compress it just fine.
2360          * update c->word_offset. */
2361         bm_xfer_ctx_bit_to_word_offset(c);
2362
2363         /* store pad_bits */
2364         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2365
2366         return len;
2367 }
2368
2369 /**
2370  * send_bitmap_rle_or_plain
2371  *
2372  * Return 0 when done, 1 when another iteration is needed, and a negative error
2373  * code upon failure.
2374  */
2375 static int
2376 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
2377                          struct p_header80 *h, struct bm_xfer_ctx *c)
2378 {
2379         struct p_compressed_bm *p = (void*)h;
2380         unsigned long num_words;
2381         int len;
2382         int ok;
2383
2384         len = fill_bitmap_rle_bits(mdev, p, c);
2385
2386         if (len < 0)
2387                 return -EIO;
2388
2389         if (len) {
2390                 DCBP_set_code(p, RLE_VLI_Bits);
2391                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2392                         sizeof(*p) + len, 0);
2393
2394                 c->packets[0]++;
2395                 c->bytes[0] += sizeof(*p) + len;
2396
2397                 if (c->bit_offset >= c->bm_bits)
2398                         len = 0; /* DONE */
2399         } else {
2400                 /* was not compressible.
2401                  * send a buffer full of plain text bits instead. */
2402                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2403                 len = num_words * sizeof(long);
2404                 if (len)
2405                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2406                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
2407                                    h, sizeof(struct p_header80) + len, 0);
2408                 c->word_offset += num_words;
2409                 c->bit_offset = c->word_offset * BITS_PER_LONG;
2410
2411                 c->packets[1]++;
2412                 c->bytes[1] += sizeof(struct p_header80) + len;
2413
2414                 if (c->bit_offset > c->bm_bits)
2415                         c->bit_offset = c->bm_bits;
2416         }
2417         if (ok) {
2418                 if (len == 0) {
2419                         INFO_bm_xfer_stats(mdev, "send", c);
2420                         return 0;
2421                 } else
2422                         return 1;
2423         }
2424         return -EIO;
2425 }
2426
2427 /* See the comment at receive_bitmap() */
2428 int _drbd_send_bitmap(struct drbd_conf *mdev)
2429 {
2430         struct bm_xfer_ctx c;
2431         struct p_header80 *p;
2432         int err;
2433
2434         ERR_IF(!mdev->bitmap) return false;
2435
2436         /* maybe we should use some per thread scratch page,
2437          * and allocate that during initial device creation? */
2438         p = (struct p_header80 *) __get_free_page(GFP_NOIO);
2439         if (!p) {
2440                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2441                 return false;
2442         }
2443
2444         if (get_ldev(mdev)) {
2445                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2446                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2447                         drbd_bm_set_all(mdev);
2448                         if (drbd_bm_write(mdev)) {
2449                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2450                                  * but otherwise process as per normal - need to tell other
2451                                  * side that a full resync is required! */
2452                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
2453                         } else {
2454                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2455                                 drbd_md_sync(mdev);
2456                         }
2457                 }
2458                 put_ldev(mdev);
2459         }
2460
2461         c = (struct bm_xfer_ctx) {
2462                 .bm_bits = drbd_bm_bits(mdev),
2463                 .bm_words = drbd_bm_words(mdev),
2464         };
2465
2466         do {
2467                 err = send_bitmap_rle_or_plain(mdev, p, &c);
2468         } while (err > 0);
2469
2470         free_page((unsigned long) p);
2471         return err == 0;
2472 }
2473
2474 int drbd_send_bitmap(struct drbd_conf *mdev)
2475 {
2476         int err;
2477
2478         if (!drbd_get_data_sock(mdev))
2479                 return -1;
2480         err = !_drbd_send_bitmap(mdev);
2481         drbd_put_data_sock(mdev);
2482         return err;
2483 }
2484
2485 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2486 {
2487         int ok;
2488         struct p_barrier_ack p;
2489
2490         p.barrier  = barrier_nr;
2491         p.set_size = cpu_to_be32(set_size);
2492
2493         if (mdev->state.conn < C_CONNECTED)
2494                 return false;
2495         ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2496                         (struct p_header80 *)&p, sizeof(p));
2497         return ok;
2498 }
2499
2500 /**
2501  * _drbd_send_ack() - Sends an ack packet
2502  * @mdev:       DRBD device.
2503  * @cmd:        Packet command code.
2504  * @sector:     sector, needs to be in big endian byte order
2505  * @blksize:    size in byte, needs to be in big endian byte order
2506  * @block_id:   Id, big endian byte order
2507  */
2508 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2509                           u64 sector,
2510                           u32 blksize,
2511                           u64 block_id)
2512 {
2513         int ok;
2514         struct p_block_ack p;
2515
2516         p.sector   = sector;
2517         p.block_id = block_id;
2518         p.blksize  = blksize;
2519         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2520
2521         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2522                 return false;
2523         ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2524                                 (struct p_header80 *)&p, sizeof(p));
2525         return ok;
2526 }
2527
2528 /* dp->sector and dp->block_id already/still in network byte order,
2529  * data_size is payload size according to dp->head,
2530  * and may need to be corrected for digest size. */
2531 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2532                      struct p_data *dp, int data_size)
2533 {
2534         data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2535                 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
2536         return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2537                               dp->block_id);
2538 }
2539
2540 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2541                      struct p_block_req *rp)
2542 {
2543         return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2544 }
2545
2546 /**
2547  * drbd_send_ack() - Sends an ack packet
2548  * @mdev:       DRBD device.
2549  * @cmd:        Packet command code.
2550  * @e:          Epoch entry.
2551  */
2552 int drbd_send_ack(struct drbd_conf *mdev,
2553         enum drbd_packets cmd, struct drbd_epoch_entry *e)
2554 {
2555         return _drbd_send_ack(mdev, cmd,
2556                               cpu_to_be64(e->sector),
2557                               cpu_to_be32(e->size),
2558                               e->block_id);
2559 }
2560
2561 /* This function misuses the block_id field to signal if the blocks
2562  * are is sync or not. */
2563 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2564                      sector_t sector, int blksize, u64 block_id)
2565 {
2566         return _drbd_send_ack(mdev, cmd,
2567                               cpu_to_be64(sector),
2568                               cpu_to_be32(blksize),
2569                               cpu_to_be64(block_id));
2570 }
2571
2572 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2573                        sector_t sector, int size, u64 block_id)
2574 {
2575         int ok;
2576         struct p_block_req p;
2577
2578         p.sector   = cpu_to_be64(sector);
2579         p.block_id = block_id;
2580         p.blksize  = cpu_to_be32(size);
2581
2582         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2583                                 (struct p_header80 *)&p, sizeof(p));
2584         return ok;
2585 }
2586
2587 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2588                             sector_t sector, int size,
2589                             void *digest, int digest_size,
2590                             enum drbd_packets cmd)
2591 {
2592         int ok;
2593         struct p_block_req p;
2594
2595         p.sector   = cpu_to_be64(sector);
2596         p.block_id = BE_DRBD_MAGIC + 0xbeef;
2597         p.blksize  = cpu_to_be32(size);
2598
2599         p.head.magic   = BE_DRBD_MAGIC;
2600         p.head.command = cpu_to_be16(cmd);
2601         p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
2602
2603         mutex_lock(&mdev->data.mutex);
2604
2605         ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2606         ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2607
2608         mutex_unlock(&mdev->data.mutex);
2609
2610         return ok;
2611 }
2612
2613 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2614 {
2615         int ok;
2616         struct p_block_req p;
2617
2618         p.sector   = cpu_to_be64(sector);
2619         p.block_id = BE_DRBD_MAGIC + 0xbabe;
2620         p.blksize  = cpu_to_be32(size);
2621
2622         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2623                            (struct p_header80 *)&p, sizeof(p));
2624         return ok;
2625 }
2626
2627 /* called on sndtimeo
2628  * returns false if we should retry,
2629  * true if we think connection is dead
2630  */
2631 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2632 {
2633         int drop_it;
2634         /* long elapsed = (long)(jiffies - mdev->last_received); */
2635
2636         drop_it =   mdev->meta.socket == sock
2637                 || !mdev->asender.task
2638                 || get_t_state(&mdev->asender) != Running
2639                 || mdev->state.conn < C_CONNECTED;
2640
2641         if (drop_it)
2642                 return true;
2643
2644         drop_it = !--mdev->ko_count;
2645         if (!drop_it) {
2646                 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2647                        current->comm, current->pid, mdev->ko_count);
2648                 request_ping(mdev);
2649         }
2650
2651         return drop_it; /* && (mdev->state == R_PRIMARY) */;
2652 }
2653
2654 /* The idea of sendpage seems to be to put some kind of reference
2655  * to the page into the skb, and to hand it over to the NIC. In
2656  * this process get_page() gets called.
2657  *
2658  * As soon as the page was really sent over the network put_page()
2659  * gets called by some part of the network layer. [ NIC driver? ]
2660  *
2661  * [ get_page() / put_page() increment/decrement the count. If count
2662  *   reaches 0 the page will be freed. ]
2663  *
2664  * This works nicely with pages from FSs.
2665  * But this means that in protocol A we might signal IO completion too early!
2666  *
2667  * In order not to corrupt data during a resync we must make sure
2668  * that we do not reuse our own buffer pages (EEs) to early, therefore
2669  * we have the net_ee list.
2670  *
2671  * XFS seems to have problems, still, it submits pages with page_count == 0!
2672  * As a workaround, we disable sendpage on pages
2673  * with page_count == 0 or PageSlab.
2674  */
2675 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2676                    int offset, size_t size, unsigned msg_flags)
2677 {
2678         int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
2679         kunmap(page);
2680         if (sent == size)
2681                 mdev->send_cnt += size>>9;
2682         return sent == size;
2683 }
2684
2685 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2686                     int offset, size_t size, unsigned msg_flags)
2687 {
2688         mm_segment_t oldfs = get_fs();
2689         int sent, ok;
2690         int len = size;
2691
2692         /* e.g. XFS meta- & log-data is in slab pages, which have a
2693          * page_count of 0 and/or have PageSlab() set.
2694          * we cannot use send_page for those, as that does get_page();
2695          * put_page(); and would cause either a VM_BUG directly, or
2696          * __page_cache_release a page that would actually still be referenced
2697          * by someone, leading to some obscure delayed Oops somewhere else. */
2698         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2699                 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
2700
2701         msg_flags |= MSG_NOSIGNAL;
2702         drbd_update_congested(mdev);
2703         set_fs(KERNEL_DS);
2704         do {
2705                 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2706                                                         offset, len,
2707                                                         msg_flags);
2708                 if (sent == -EAGAIN) {
2709                         if (we_should_drop_the_connection(mdev,
2710                                                           mdev->data.socket))
2711                                 break;
2712                         else
2713                                 continue;
2714                 }
2715                 if (sent <= 0) {
2716                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2717                              __func__, (int)size, len, sent);
2718                         break;
2719                 }
2720                 len    -= sent;
2721                 offset += sent;
2722         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2723         set_fs(oldfs);
2724         clear_bit(NET_CONGESTED, &mdev->flags);
2725
2726         ok = (len == 0);
2727         if (likely(ok))
2728                 mdev->send_cnt += size>>9;
2729         return ok;
2730 }
2731
2732 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2733 {
2734         struct bio_vec *bvec;
2735         int i;
2736         /* hint all but last page with MSG_MORE */
2737         bio_for_each_segment(bvec, bio, i) {
2738                 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2739                                      bvec->bv_offset, bvec->bv_len,
2740                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2741                         return 0;
2742         }
2743         return 1;
2744 }
2745
2746 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2747 {
2748         struct bio_vec *bvec;
2749         int i;
2750         /* hint all but last page with MSG_MORE */
2751         bio_for_each_segment(bvec, bio, i) {
2752                 if (!_drbd_send_page(mdev, bvec->bv_page,
2753                                      bvec->bv_offset, bvec->bv_len,
2754                                      i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
2755                         return 0;
2756         }
2757         return 1;
2758 }
2759
2760 static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2761 {
2762         struct page *page = e->pages;
2763         unsigned len = e->size;
2764         /* hint all but last page with MSG_MORE */
2765         page_chain_for_each(page) {
2766                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2767                 if (!_drbd_send_page(mdev, page, 0, l,
2768                                 page_chain_next(page) ? MSG_MORE : 0))
2769                         return 0;
2770                 len -= l;
2771         }
2772         return 1;
2773 }
2774
2775 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2776 {
2777         if (mdev->agreed_pro_version >= 95)
2778                 return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
2779                         (bi_rw & REQ_FUA ? DP_FUA : 0) |
2780                         (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2781                         (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2782         else
2783                 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
2784 }
2785
2786 /* Used to send write requests
2787  * R_PRIMARY -> Peer    (P_DATA)
2788  */
2789 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2790 {
2791         int ok = 1;
2792         struct p_data p;
2793         unsigned int dp_flags = 0;
2794         void *dgb;
2795         int dgs;
2796
2797         if (!drbd_get_data_sock(mdev))
2798                 return 0;
2799
2800         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2801                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2802
2803         if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
2804                 p.head.h80.magic   = BE_DRBD_MAGIC;
2805                 p.head.h80.command = cpu_to_be16(P_DATA);
2806                 p.head.h80.length  =
2807                         cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2808         } else {
2809                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2810                 p.head.h95.command = cpu_to_be16(P_DATA);
2811                 p.head.h95.length  =
2812                         cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2813         }
2814
2815         p.sector   = cpu_to_be64(req->sector);
2816         p.block_id = (unsigned long)req;
2817         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2818
2819         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2820
2821         if (mdev->state.conn >= C_SYNC_SOURCE &&
2822             mdev->state.conn <= C_PAUSED_SYNC_T)
2823                 dp_flags |= DP_MAY_SET_IN_SYNC;
2824
2825         p.dp_flags = cpu_to_be32(dp_flags);
2826         set_bit(UNPLUG_REMOTE, &mdev->flags);
2827         ok = (sizeof(p) ==
2828                 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
2829         if (ok && dgs) {
2830                 dgb = mdev->int_dig_out;
2831                 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2832                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2833         }
2834         if (ok) {
2835                 /* For protocol A, we have to memcpy the payload into
2836                  * socket buffers, as we may complete right away
2837                  * as soon as we handed it over to tcp, at which point the data
2838                  * pages may become invalid.
2839                  *
2840                  * For data-integrity enabled, we copy it as well, so we can be
2841                  * sure that even if the bio pages may still be modified, it
2842                  * won't change the data on the wire, thus if the digest checks
2843                  * out ok after sending on this side, but does not fit on the
2844                  * receiving side, we sure have detected corruption elsewhere.
2845                  */
2846                 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
2847                         ok = _drbd_send_bio(mdev, req->master_bio);
2848                 else
2849                         ok = _drbd_send_zc_bio(mdev, req->master_bio);
2850
2851                 /* double check digest, sometimes buffers have been modified in flight. */
2852                 if (dgs > 0 && dgs <= 64) {
2853                         /* 64 byte, 512 bit, is the largest digest size
2854                          * currently supported in kernel crypto. */
2855                         unsigned char digest[64];
2856                         drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2857                         if (memcmp(mdev->int_dig_out, digest, dgs)) {
2858                                 dev_warn(DEV,
2859                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2860                                         (unsigned long long)req->sector, req->size);
2861                         }
2862                 } /* else if (dgs > 64) {
2863                      ... Be noisy about digest too large ...
2864                 } */
2865         }
2866
2867         drbd_put_data_sock(mdev);
2868
2869         return ok;
2870 }
2871
2872 /* answer packet, used to send data back for read requests:
2873  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
2874  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
2875  */
2876 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2877                     struct drbd_epoch_entry *e)
2878 {
2879         int ok;
2880         struct p_data p;
2881         void *dgb;
2882         int dgs;
2883
2884         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2885                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2886
2887         if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
2888                 p.head.h80.magic   = BE_DRBD_MAGIC;
2889                 p.head.h80.command = cpu_to_be16(cmd);
2890                 p.head.h80.length  =
2891                         cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2892         } else {
2893                 p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
2894                 p.head.h95.command = cpu_to_be16(cmd);
2895                 p.head.h95.length  =
2896                         cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2897         }
2898
2899         p.sector   = cpu_to_be64(e->sector);
2900         p.block_id = e->block_id;
2901         /* p.seq_num  = 0;    No sequence numbers here.. */
2902
2903         /* Only called by our kernel thread.
2904          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2905          * in response to admin command or module unload.
2906          */
2907         if (!drbd_get_data_sock(mdev))
2908                 return 0;
2909
2910         ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
2911         if (ok && dgs) {
2912                 dgb = mdev->int_dig_out;
2913                 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
2914                 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
2915         }
2916         if (ok)
2917                 ok = _drbd_send_zc_ee(mdev, e);
2918
2919         drbd_put_data_sock(mdev);
2920
2921         return ok;
2922 }
2923
2924 int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2925 {
2926         struct p_block_desc p;
2927
2928         p.sector  = cpu_to_be64(req->sector);
2929         p.blksize = cpu_to_be32(req->size);
2930
2931         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2932 }
2933
2934 /*
2935   drbd_send distinguishes two cases:
2936
2937   Packets sent via the data socket "sock"
2938   and packets sent via the meta data socket "msock"
2939
2940                     sock                      msock
2941   -----------------+-------------------------+------------------------------
2942   timeout           conf.timeout / 2          conf.timeout / 2
2943   timeout action    send a ping via msock     Abort communication
2944                                               and close all sockets
2945 */
2946
2947 /*
2948  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2949  */
2950 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2951               void *buf, size_t size, unsigned msg_flags)
2952 {
2953         struct kvec iov;
2954         struct msghdr msg;
2955         int rv, sent = 0;
2956
2957         if (!sock)
2958                 return -1000;
2959
2960         /* THINK  if (signal_pending) return ... ? */
2961
2962         iov.iov_base = buf;
2963         iov.iov_len  = size;
2964
2965         msg.msg_name       = NULL;
2966         msg.msg_namelen    = 0;
2967         msg.msg_control    = NULL;
2968         msg.msg_controllen = 0;
2969         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
2970
2971         if (sock == mdev->data.socket) {
2972                 mdev->ko_count = mdev->net_conf->ko_count;
2973                 drbd_update_congested(mdev);
2974         }
2975         do {
2976                 /* STRANGE
2977                  * tcp_sendmsg does _not_ use its size parameter at all ?
2978                  *
2979                  * -EAGAIN on timeout, -EINTR on signal.
2980                  */
2981 /* THINK
2982  * do we need to block DRBD_SIG if sock == &meta.socket ??
2983  * otherwise wake_asender() might interrupt some send_*Ack !
2984  */
2985                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2986                 if (rv == -EAGAIN) {
2987                         if (we_should_drop_the_connection(mdev, sock))
2988                                 break;
2989                         else
2990                                 continue;
2991                 }
2992                 D_ASSERT(rv != 0);
2993                 if (rv == -EINTR) {
2994                         flush_signals(current);
2995                         rv = 0;
2996                 }
2997                 if (rv < 0)
2998                         break;
2999                 sent += rv;
3000                 iov.iov_base += rv;
3001                 iov.iov_len  -= rv;
3002         } while (sent < size);
3003
3004         if (sock == mdev->data.socket)
3005                 clear_bit(NET_CONGESTED, &mdev->flags);
3006
3007         if (rv <= 0) {
3008                 if (rv != -EAGAIN) {
3009                         dev_err(DEV, "%s_sendmsg returned %d\n",
3010                             sock == mdev->meta.socket ? "msock" : "sock",
3011                             rv);
3012                         drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
3013                 } else
3014                         drbd_force_state(mdev, NS(conn, C_TIMEOUT));
3015         }
3016
3017         return sent;
3018 }
3019
3020 static int drbd_open(struct block_device *bdev, fmode_t mode)
3021 {
3022         struct drbd_conf *mdev = bdev->bd_disk->private_data;
3023         unsigned long flags;
3024         int rv = 0;
3025
3026         mutex_lock(&drbd_main_mutex);
3027         spin_lock_irqsave(&mdev->req_lock, flags);
3028         /* to have a stable mdev->state.role
3029          * and no race with updating open_cnt */
3030
3031         if (mdev->state.role != R_PRIMARY) {
3032                 if (mode & FMODE_WRITE)
3033                         rv = -EROFS;
3034                 else if (!allow_oos)
3035                         rv = -EMEDIUMTYPE;
3036         }
3037
3038         if (!rv)
3039                 mdev->open_cnt++;
3040         spin_unlock_irqrestore(&mdev->req_lock, flags);
3041         mutex_unlock(&drbd_main_mutex);
3042
3043         return rv;
3044 }
3045
3046 static int drbd_release(struct gendisk *gd, fmode_t mode)
3047 {
3048         struct drbd_conf *mdev = gd->private_data;
3049         mutex_lock(&drbd_main_mutex);
3050         mdev->open_cnt--;
3051         mutex_unlock(&drbd_main_mutex);
3052         return 0;
3053 }
3054
3055 static void drbd_set_defaults(struct drbd_conf *mdev)
3056 {
3057         /* This way we get a compile error when sync_conf grows,
3058            and we forgot to initialize it here */
3059         mdev->sync_conf = (struct syncer_conf) {
3060                 /* .rate = */           DRBD_RATE_DEF,
3061                 /* .after = */          DRBD_AFTER_DEF,
3062                 /* .al_extents = */     DRBD_AL_EXTENTS_DEF,
3063                 /* .verify_alg = */     {}, 0,
3064                 /* .cpu_mask = */       {}, 0,
3065                 /* .csums_alg = */      {}, 0,
3066                 /* .use_rle = */        0,
3067                 /* .on_no_data = */     DRBD_ON_NO_DATA_DEF,
3068                 /* .c_plan_ahead = */   DRBD_C_PLAN_AHEAD_DEF,
3069                 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
3070                 /* .c_fill_target = */  DRBD_C_FILL_TARGET_DEF,
3071                 /* .c_max_rate = */     DRBD_C_MAX_RATE_DEF,
3072                 /* .c_min_rate = */     DRBD_C_MIN_RATE_DEF
3073         };
3074
3075         /* Have to use that way, because the layout differs between
3076            big endian and little endian */
3077         mdev->state = (union drbd_state) {
3078                 { .role = R_SECONDARY,
3079                   .peer = R_UNKNOWN,
3080                   .conn = C_STANDALONE,
3081                   .disk = D_DISKLESS,
3082                   .pdsk = D_UNKNOWN,
3083                   .susp = 0,
3084                   .susp_nod = 0,
3085                   .susp_fen = 0
3086                 } };
3087 }
3088
3089 void drbd_init_set_defaults(struct drbd_conf *mdev)
3090 {
3091         /* the memset(,0,) did most of this.
3092          * note: only assignments, no allocation in here */
3093
3094         drbd_set_defaults(mdev);
3095
3096         atomic_set(&mdev->ap_bio_cnt, 0);
3097         atomic_set(&mdev->ap_pending_cnt, 0);
3098         atomic_set(&mdev->rs_pending_cnt, 0);
3099         atomic_set(&mdev->unacked_cnt, 0);
3100         atomic_set(&mdev->local_cnt, 0);
3101         atomic_set(&mdev->net_cnt, 0);
3102         atomic_set(&mdev->packet_seq, 0);
3103         atomic_set(&mdev->pp_in_use, 0);
3104         atomic_set(&mdev->pp_in_use_by_net, 0);
3105         atomic_set(&mdev->rs_sect_in, 0);
3106         atomic_set(&mdev->rs_sect_ev, 0);
3107         atomic_set(&mdev->ap_in_flight, 0);
3108         atomic_set(&mdev->md_io_in_use, 0);
3109
3110         mutex_init(&mdev->data.mutex);
3111         mutex_init(&mdev->meta.mutex);
3112         sema_init(&mdev->data.work.s, 0);
3113         sema_init(&mdev->meta.work.s, 0);
3114         mutex_init(&mdev->state_mutex);
3115
3116         spin_lock_init(&mdev->data.work.q_lock);
3117         spin_lock_init(&mdev->meta.work.q_lock);
3118
3119         spin_lock_init(&mdev->al_lock);
3120         spin_lock_init(&mdev->req_lock);
3121         spin_lock_init(&mdev->peer_seq_lock);
3122         spin_lock_init(&mdev->epoch_lock);
3123
3124         INIT_LIST_HEAD(&mdev->active_ee);
3125         INIT_LIST_HEAD(&mdev->sync_ee);
3126         INIT_LIST_HEAD(&mdev->done_ee);
3127         INIT_LIST_HEAD(&mdev->read_ee);
3128         INIT_LIST_HEAD(&mdev->net_ee);
3129         INIT_LIST_HEAD(&mdev->resync_reads);
3130         INIT_LIST_HEAD(&mdev->data.work.q);
3131         INIT_LIST_HEAD(&mdev->meta.work.q);
3132         INIT_LIST_HEAD(&mdev->resync_work.list);
3133         INIT_LIST_HEAD(&mdev->unplug_work.list);
3134         INIT_LIST_HEAD(&mdev->go_diskless.list);
3135         INIT_LIST_HEAD(&mdev->md_sync_work.list);
3136         INIT_LIST_HEAD(&mdev->start_resync_work.list);
3137         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
3138
3139         mdev->resync_work.cb  = w_resync_timer;
3140         mdev->unplug_work.cb  = w_send_write_hint;
3141         mdev->go_diskless.cb  = w_go_diskless;
3142         mdev->md_sync_work.cb = w_md_sync;
3143         mdev->bm_io_work.w.cb = w_bitmap_io;
3144         mdev->start_resync_work.cb = w_start_resync;
3145         init_timer(&mdev->resync_timer);
3146         init_timer(&mdev->md_sync_timer);
3147         init_timer(&mdev->start_resync_timer);
3148         init_timer(&mdev->request_timer);
3149         mdev->resync_timer.function = resync_timer_fn;
3150         mdev->resync_timer.data = (unsigned long) mdev;
3151         mdev->md_sync_timer.function = md_sync_timer_fn;
3152         mdev->md_sync_timer.data = (unsigned long) mdev;
3153         mdev->start_resync_timer.function = start_resync_timer_fn;
3154         mdev->start_resync_timer.data = (unsigned long) mdev;
3155         mdev->request_timer.function = request_timer_fn;
3156         mdev->request_timer.data = (unsigned long) mdev;
3157
3158         init_waitqueue_head(&mdev->misc_wait);
3159         init_waitqueue_head(&mdev->state_wait);
3160         init_waitqueue_head(&mdev->net_cnt_wait);
3161         init_waitqueue_head(&mdev->ee_wait);
3162         init_waitqueue_head(&mdev->al_wait);
3163         init_waitqueue_head(&mdev->seq_wait);
3164
3165         drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3166         drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3167         drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3168
3169         mdev->agreed_pro_version = PRO_VERSION_MAX;
3170         mdev->write_ordering = WO_bdev_flush;
3171         mdev->resync_wenr = LC_FREE;
3172         mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3173         mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3174 }
3175
3176 void drbd_mdev_cleanup(struct drbd_conf *mdev)
3177 {
3178         int i;
3179         if (mdev->receiver.t_state != None)
3180                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3181                                 mdev->receiver.t_state);
3182
3183         /* no need to lock it, I'm the only thread alive */
3184         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
3185                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3186         mdev->al_writ_cnt  =
3187         mdev->bm_writ_cnt  =
3188         mdev->read_cnt     =
3189         mdev->recv_cnt     =
3190         mdev->send_cnt     =
3191         mdev->writ_cnt     =
3192         mdev->p_size       =
3193         mdev->rs_start     =
3194         mdev->rs_total     =
3195         mdev->rs_failed    = 0;
3196         mdev->rs_last_events = 0;
3197         mdev->rs_last_sect_ev = 0;
3198         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3199                 mdev->rs_mark_left[i] = 0;
3200                 mdev->rs_mark_time[i] = 0;
3201         }
3202         D_ASSERT(mdev->net_conf == NULL);
3203
3204         drbd_set_my_capacity(mdev, 0);
3205         if (mdev->bitmap) {
3206                 /* maybe never allocated. */
3207                 drbd_bm_resize(mdev, 0, 1);
3208                 drbd_bm_cleanup(mdev);
3209         }
3210
3211         drbd_free_resources(mdev);
3212         clear_bit(AL_SUSPENDED, &mdev->flags);
3213
3214         /*
3215          * currently we drbd_init_ee only on module load, so
3216          * we may do drbd_release_ee only on module unload!
3217          */
3218         D_ASSERT(list_empty(&mdev->active_ee));
3219         D_ASSERT(list_empty(&mdev->sync_ee));
3220         D_ASSERT(list_empty(&mdev->done_ee));
3221         D_ASSERT(list_empty(&mdev->read_ee));
3222         D_ASSERT(list_empty(&mdev->net_ee));
3223         D_ASSERT(list_empty(&mdev->resync_reads));
3224         D_ASSERT(list_empty(&mdev->data.work.q));
3225         D_ASSERT(list_empty(&mdev->meta.work.q));
3226         D_ASSERT(list_empty(&mdev->resync_work.list));
3227         D_ASSERT(list_empty(&mdev->unplug_work.list));
3228         D_ASSERT(list_empty(&mdev->go_diskless.list));
3229
3230         drbd_set_defaults(mdev);
3231 }
3232
3233
3234 static void drbd_destroy_mempools(void)
3235 {
3236         struct page *page;
3237
3238         while (drbd_pp_pool) {
3239                 page = drbd_pp_pool;
3240                 drbd_pp_pool = (struct page *)page_private(page);
3241                 __free_page(page);
3242                 drbd_pp_vacant--;
3243         }
3244
3245         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3246
3247         if (drbd_ee_mempool)
3248                 mempool_destroy(drbd_ee_mempool);
3249         if (drbd_request_mempool)
3250                 mempool_destroy(drbd_request_mempool);
3251         if (drbd_ee_cache)
3252                 kmem_cache_destroy(drbd_ee_cache);
3253         if (drbd_request_cache)
3254                 kmem_cache_destroy(drbd_request_cache);
3255         if (drbd_bm_ext_cache)
3256                 kmem_cache_destroy(drbd_bm_ext_cache);
3257         if (drbd_al_ext_cache)
3258                 kmem_cache_destroy(drbd_al_ext_cache);
3259
3260         drbd_ee_mempool      = NULL;
3261         drbd_request_mempool = NULL;
3262         drbd_ee_cache        = NULL;
3263         drbd_request_cache   = NULL;
3264         drbd_bm_ext_cache    = NULL;
3265         drbd_al_ext_cache    = NULL;
3266
3267         return;
3268 }
3269
3270 static int drbd_create_mempools(void)
3271 {
3272         struct page *page;
3273         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
3274         int i;
3275
3276         /* prepare our caches and mempools */
3277         drbd_request_mempool = NULL;
3278         drbd_ee_cache        = NULL;
3279         drbd_request_cache   = NULL;
3280         drbd_bm_ext_cache    = NULL;
3281         drbd_al_ext_cache    = NULL;
3282         drbd_pp_pool         = NULL;
3283
3284         /* caches */
3285         drbd_request_cache = kmem_cache_create(
3286                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3287         if (drbd_request_cache == NULL)
3288                 goto Enomem;
3289
3290         drbd_ee_cache = kmem_cache_create(
3291                 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3292         if (drbd_ee_cache == NULL)
3293                 goto Enomem;
3294
3295         drbd_bm_ext_cache = kmem_cache_create(
3296                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3297         if (drbd_bm_ext_cache == NULL)
3298                 goto Enomem;
3299
3300         drbd_al_ext_cache = kmem_cache_create(
3301                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3302         if (drbd_al_ext_cache == NULL)
3303                 goto Enomem;
3304
3305         /* mempools */
3306         drbd_request_mempool = mempool_create(number,
3307                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3308         if (drbd_request_mempool == NULL)
3309                 goto Enomem;
3310
3311         drbd_ee_mempool = mempool_create(number,
3312                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
3313         if (drbd_ee_mempool == NULL)
3314                 goto Enomem;
3315
3316         /* drbd's page pool */
3317         spin_lock_init(&drbd_pp_lock);
3318
3319         for (i = 0; i < number; i++) {
3320                 page = alloc_page(GFP_HIGHUSER);
3321                 if (!page)
3322                         goto Enomem;
3323                 set_page_private(page, (unsigned long)drbd_pp_pool);
3324                 drbd_pp_pool = page;
3325         }
3326         drbd_pp_vacant = number;
3327
3328         return 0;
3329
3330 Enomem:
3331         drbd_destroy_mempools(); /* in case we allocated some */
3332         return -ENOMEM;
3333 }
3334
3335 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3336         void *unused)
3337 {
3338         /* just so we have it.  you never know what interesting things we
3339          * might want to do here some day...
3340          */
3341
3342         return NOTIFY_DONE;
3343 }
3344
3345 static struct notifier_block drbd_notifier = {
3346         .notifier_call = drbd_notify_sys,
3347 };
3348
3349 static void drbd_release_ee_lists(struct drbd_conf *mdev)
3350 {
3351         int rr;
3352
3353         rr = drbd_release_ee(mdev, &mdev->active_ee);
3354         if (rr)
3355                 dev_err(DEV, "%d EEs in active list found!\n", rr);
3356
3357         rr = drbd_release_ee(mdev, &mdev->sync_ee);
3358         if (rr)
3359                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3360
3361         rr = drbd_release_ee(mdev, &mdev->read_ee);
3362         if (rr)
3363                 dev_err(DEV, "%d EEs in read list found!\n", rr);
3364
3365         rr = drbd_release_ee(mdev, &mdev->done_ee);
3366         if (rr)
3367                 dev_err(DEV, "%d EEs in done list found!\n", rr);
3368
3369         rr = drbd_release_ee(mdev, &mdev->net_ee);
3370         if (rr)
3371                 dev_err(DEV, "%d EEs in net list found!\n", rr);
3372 }
3373
3374 /* caution. no locking.
3375  * currently only used from module cleanup code. */
3376 static void drbd_delete_device(unsigned int minor)
3377 {
3378         struct drbd_conf *mdev = minor_to_mdev(minor);
3379
3380         if (!mdev)
3381                 return;
3382
3383         del_timer_sync(&mdev->request_timer);
3384
3385         /* paranoia asserts */
3386         if (mdev->open_cnt != 0)
3387                 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3388                                 __FILE__ , __LINE__);
3389
3390         ERR_IF (!list_empty(&mdev->data.work.q)) {
3391                 struct list_head *lp;
3392                 list_for_each(lp, &mdev->data.work.q) {
3393                         dev_err(DEV, "lp = %p\n", lp);
3394                 }
3395         };
3396         /* end paranoia asserts */
3397
3398         del_gendisk(mdev->vdisk);
3399
3400         /* cleanup stuff that may have been allocated during
3401          * device (re-)configuration or state changes */
3402
3403         if (mdev->this_bdev)
3404                 bdput(mdev->this_bdev);
3405
3406         drbd_free_resources(mdev);
3407
3408         drbd_release_ee_lists(mdev);
3409
3410         /* should be freed on disconnect? */
3411         kfree(mdev->ee_hash);
3412         /*
3413         mdev->ee_hash_s = 0;
3414         mdev->ee_hash = NULL;
3415         */
3416
3417         lc_destroy(mdev->act_log);
3418         lc_destroy(mdev->resync);
3419
3420         kfree(mdev->p_uuid);
3421         /* mdev->p_uuid = NULL; */
3422
3423         kfree(mdev->int_dig_out);
3424         kfree(mdev->int_dig_in);
3425         kfree(mdev->int_dig_vv);
3426
3427         /* cleanup the rest that has been
3428          * allocated from drbd_new_device
3429          * and actually free the mdev itself */
3430         drbd_free_mdev(mdev);
3431 }
3432
3433 static void drbd_cleanup(void)
3434 {
3435         unsigned int i;
3436
3437         unregister_reboot_notifier(&drbd_notifier);
3438
3439         /* first remove proc,
3440          * drbdsetup uses it's presence to detect
3441          * whether DRBD is loaded.
3442          * If we would get stuck in proc removal,
3443          * but have netlink already deregistered,
3444          * some drbdsetup commands may wait forever
3445          * for an answer.
3446          */
3447         if (drbd_proc)
3448                 remove_proc_entry("drbd", NULL);
3449
3450         drbd_nl_cleanup();
3451
3452         if (minor_table) {
3453                 i = minor_count;
3454                 while (i--)
3455                         drbd_delete_device(i);
3456                 drbd_destroy_mempools();
3457         }
3458
3459         kfree(minor_table);
3460
3461         unregister_blkdev(DRBD_MAJOR, "drbd");
3462
3463         printk(KERN_INFO "drbd: module cleanup done.\n");
3464 }
3465
3466 /**
3467  * drbd_congested() - Callback for pdflush
3468  * @congested_data:     User data
3469  * @bdi_bits:           Bits pdflush is currently interested in
3470  *
3471  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3472  */
3473 static int drbd_congested(void *congested_data, int bdi_bits)
3474 {
3475         struct drbd_conf *mdev = congested_data;
3476         struct request_queue *q;
3477         char reason = '-';
3478         int r = 0;
3479
3480         if (!may_inc_ap_bio(mdev)) {
3481                 /* DRBD has frozen IO */
3482                 r = bdi_bits;
3483                 reason = 'd';
3484                 goto out;
3485         }
3486
3487         if (get_ldev(mdev)) {
3488                 q = bdev_get_queue(mdev->ldev->backing_bdev);
3489                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3490                 put_ldev(mdev);
3491                 if (r)
3492                         reason = 'b';
3493         }
3494
3495         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3496                 r |= (1 << BDI_async_congested);
3497                 reason = reason == 'b' ? 'a' : 'n';
3498         }
3499
3500 out:
3501         mdev->congestion_reason = reason;
3502         return r;
3503 }
3504
3505 struct drbd_conf *drbd_new_device(unsigned int minor)
3506 {
3507         struct drbd_conf *mdev;
3508         struct gendisk *disk;
3509         struct request_queue *q;
3510
3511         /* GFP_KERNEL, we are outside of all write-out paths */
3512         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3513         if (!mdev)
3514                 return NULL;
3515         if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3516                 goto out_no_cpumask;
3517
3518         mdev->minor = minor;
3519
3520         drbd_init_set_defaults(mdev);
3521
3522         q = blk_alloc_queue(GFP_KERNEL);
3523         if (!q)
3524                 goto out_no_q;
3525         mdev->rq_queue = q;
3526         q->queuedata   = mdev;
3527
3528         disk = alloc_disk(1);
3529         if (!disk)
3530                 goto out_no_disk;
3531         mdev->vdisk = disk;
3532
3533         set_disk_ro(disk, true);
3534
3535         disk->queue = q;
3536         disk->major = DRBD_MAJOR;
3537         disk->first_minor = minor;
3538         disk->fops = &drbd_ops;
3539         sprintf(disk->disk_name, "drbd%d", minor);
3540         disk->private_data = mdev;
3541
3542         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3543         /* we have no partitions. we contain only ourselves. */
3544         mdev->this_bdev->bd_contains = mdev->this_bdev;
3545
3546         q->backing_dev_info.congested_fn = drbd_congested;
3547         q->backing_dev_info.congested_data = mdev;
3548
3549         blk_queue_make_request(q, drbd_make_request);
3550         /* Setting the max_hw_sectors to an odd value of 8kibyte here
3551            This triggers a max_bio_size message upon first attach or connect */
3552         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
3553         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3554         blk_queue_merge_bvec(q, drbd_merge_bvec);
3555         q->queue_lock = &mdev->req_lock;
3556
3557         mdev->md_io_page = alloc_page(GFP_KERNEL);
3558         if (!mdev->md_io_page)
3559                 goto out_no_io_page;
3560
3561         if (drbd_bm_init(mdev))
3562                 goto out_no_bitmap;
3563         /* no need to lock access, we are still initializing this minor device. */
3564         if (!tl_init(mdev))
3565                 goto out_no_tl;
3566
3567         mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3568         if (!mdev->app_reads_hash)
3569                 goto out_no_app_reads;
3570
3571         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3572         if (!mdev->current_epoch)
3573                 goto out_no_epoch;
3574
3575         INIT_LIST_HEAD(&mdev->current_epoch->list);
3576         mdev->epochs = 1;
3577
3578         return mdev;
3579
3580 /* out_whatever_else:
3581         kfree(mdev->current_epoch); */
3582 out_no_epoch:
3583         kfree(mdev->app_reads_hash);
3584 out_no_app_reads:
3585         tl_cleanup(mdev);
3586 out_no_tl:
3587         drbd_bm_cleanup(mdev);
3588 out_no_bitmap:
3589         __free_page(mdev->md_io_page);
3590 out_no_io_page:
3591         put_disk(disk);
3592 out_no_disk:
3593         blk_cleanup_queue(q);
3594 out_no_q:
3595         free_cpumask_var(mdev->cpu_mask);
3596 out_no_cpumask:
3597         kfree(mdev);
3598         return NULL;
3599 }
3600
3601 /* counterpart of drbd_new_device.
3602  * last part of drbd_delete_device. */
3603 void drbd_free_mdev(struct drbd_conf *mdev)
3604 {
3605         kfree(mdev->current_epoch);
3606         kfree(mdev->app_reads_hash);
3607         tl_cleanup(mdev);
3608         if (mdev->bitmap) /* should no longer be there. */
3609                 drbd_bm_cleanup(mdev);
3610         __free_page(mdev->md_io_page);
3611         put_disk(mdev->vdisk);
3612         blk_cleanup_queue(mdev->rq_queue);
3613         free_cpumask_var(mdev->cpu_mask);
3614         drbd_free_tl_hash(mdev);
3615         kfree(mdev);
3616 }
3617
3618
3619 int __init drbd_init(void)
3620 {
3621         int err;
3622
3623         if (sizeof(struct p_handshake) != 80) {
3624                 printk(KERN_ERR
3625                        "drbd: never change the size or layout "
3626                        "of the HandShake packet.\n");
3627                 return -EINVAL;
3628         }
3629
3630         if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
3631                 printk(KERN_ERR
3632                         "drbd: invalid minor_count (%d)\n", minor_count);
3633 #ifdef MODULE
3634                 return -EINVAL;
3635 #else
3636                 minor_count = 8;
3637 #endif
3638         }
3639
3640         err = drbd_nl_init();
3641         if (err)
3642                 return err;
3643
3644         err = register_blkdev(DRBD_MAJOR, "drbd");
3645         if (err) {
3646                 printk(KERN_ERR
3647                        "drbd: unable to register block device major %d\n",
3648                        DRBD_MAJOR);
3649                 return err;
3650         }
3651
3652         register_reboot_notifier(&drbd_notifier);
3653
3654         /*
3655          * allocate all necessary structs
3656          */
3657         err = -ENOMEM;
3658
3659         init_waitqueue_head(&drbd_pp_wait);
3660
3661         drbd_proc = NULL; /* play safe for drbd_cleanup */
3662         minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3663                                 GFP_KERNEL);
3664         if (!minor_table)
3665                 goto Enomem;
3666
3667         err = drbd_create_mempools();
3668         if (err)
3669                 goto Enomem;
3670
3671         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3672         if (!drbd_proc) {
3673                 printk(KERN_ERR "drbd: unable to register proc file\n");
3674                 goto Enomem;
3675         }
3676
3677         rwlock_init(&global_state_lock);
3678
3679         printk(KERN_INFO "drbd: initialized. "
3680                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3681                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3682         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3683         printk(KERN_INFO "drbd: registered as block device major %d\n",
3684                 DRBD_MAJOR);
3685         printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3686
3687         return 0; /* Success! */
3688
3689 Enomem:
3690         drbd_cleanup();
3691         if (err == -ENOMEM)
3692                 /* currently always the case */
3693                 printk(KERN_ERR "drbd: ran out of memory\n");
3694         else
3695                 printk(KERN_ERR "drbd: initialization failure\n");
3696         return err;
3697 }
3698
3699 void drbd_free_bc(struct drbd_backing_dev *ldev)
3700 {
3701         if (ldev == NULL)
3702                 return;
3703
3704         blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3705         blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3706
3707         kfree(ldev);
3708 }
3709
3710 void drbd_free_sock(struct drbd_conf *mdev)
3711 {
3712         if (mdev->data.socket) {
3713                 mutex_lock(&mdev->data.mutex);
3714                 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3715                 sock_release(mdev->data.socket);
3716                 mdev->data.socket = NULL;
3717                 mutex_unlock(&mdev->data.mutex);
3718         }
3719         if (mdev->meta.socket) {
3720                 mutex_lock(&mdev->meta.mutex);
3721                 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3722                 sock_release(mdev->meta.socket);
3723                 mdev->meta.socket = NULL;
3724                 mutex_unlock(&mdev->meta.mutex);
3725         }
3726 }
3727
3728
3729 void drbd_free_resources(struct drbd_conf *mdev)
3730 {
3731         crypto_free_hash(mdev->csums_tfm);
3732         mdev->csums_tfm = NULL;
3733         crypto_free_hash(mdev->verify_tfm);
3734         mdev->verify_tfm = NULL;
3735         crypto_free_hash(mdev->cram_hmac_tfm);
3736         mdev->cram_hmac_tfm = NULL;
3737         crypto_free_hash(mdev->integrity_w_tfm);
3738         mdev->integrity_w_tfm = NULL;
3739         crypto_free_hash(mdev->integrity_r_tfm);
3740         mdev->integrity_r_tfm = NULL;
3741
3742         drbd_free_sock(mdev);
3743
3744         __no_warn(local,
3745                   drbd_free_bc(mdev->ldev);
3746                   mdev->ldev = NULL;);
3747 }
3748
3749 /* meta data management */
3750
3751 struct meta_data_on_disk {
3752         u64 la_size;           /* last agreed size. */
3753         u64 uuid[UI_SIZE];   /* UUIDs. */
3754         u64 device_uuid;
3755         u64 reserved_u64_1;
3756         u32 flags;             /* MDF */
3757         u32 magic;
3758         u32 md_size_sect;
3759         u32 al_offset;         /* offset to this block */
3760         u32 al_nr_extents;     /* important for restoring the AL */
3761               /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3762         u32 bm_offset;         /* offset to the bitmap, from here */
3763         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3764         u32 la_peer_max_bio_size;   /* last peer max_bio_size */
3765         u32 reserved_u32[3];
3766
3767 } __packed;
3768
3769 /**
3770  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3771  * @mdev:       DRBD device.
3772  */
3773 void drbd_md_sync(struct drbd_conf *mdev)
3774 {
3775         struct meta_data_on_disk *buffer;
3776         sector_t sector;
3777         int i;
3778
3779         del_timer(&mdev->md_sync_timer);
3780         /* timer may be rearmed by drbd_md_mark_dirty() now. */
3781         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3782                 return;
3783
3784         /* We use here D_FAILED and not D_ATTACHING because we try to write
3785          * metadata even if we detach due to a disk failure! */
3786         if (!get_ldev_if_state(mdev, D_FAILED))
3787                 return;
3788
3789         buffer = drbd_md_get_buffer(mdev);
3790         if (!buffer)
3791                 goto out;
3792
3793         memset(buffer, 0, 512);
3794
3795         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3796         for (i = UI_CURRENT; i < UI_SIZE; i++)
3797                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3798         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3799         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3800
3801         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
3802         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
3803         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3804         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3805         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3806
3807         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3808         buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
3809
3810         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3811         sector = mdev->ldev->md.md_offset;
3812
3813         if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3814                 /* this was a try anyways ... */
3815                 dev_err(DEV, "meta data update failed!\n");
3816                 drbd_chk_io_error(mdev, 1, true);
3817         }
3818
3819         /* Update mdev->ldev->md.la_size_sect,
3820          * since we updated it on metadata. */
3821         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3822
3823         drbd_md_put_buffer(mdev);
3824 out:
3825         put_ldev(mdev);
3826 }
3827
3828 /**
3829  * drbd_md_read() - Reads in the meta data super block
3830  * @mdev:       DRBD device.
3831  * @bdev:       Device from which the meta data should be read in.
3832  *
3833  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
3834  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3835  */
3836 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3837 {
3838         struct meta_data_on_disk *buffer;
3839         int i, rv = NO_ERROR;
3840
3841         if (!get_ldev_if_state(mdev, D_ATTACHING))
3842                 return ERR_IO_MD_DISK;
3843
3844         buffer = drbd_md_get_buffer(mdev);
3845         if (!buffer)
3846                 goto out;
3847
3848         if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3849                 /* NOTE: can't do normal error processing here as this is
3850                    called BEFORE disk is attached */
3851                 dev_err(DEV, "Error while reading metadata.\n");
3852                 rv = ERR_IO_MD_DISK;
3853                 goto err;
3854         }
3855
3856         if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3857                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3858                 rv = ERR_MD_INVALID;
3859                 goto err;
3860         }
3861         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3862                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3863                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3864                 rv = ERR_MD_INVALID;
3865                 goto err;
3866         }
3867         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3868                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3869                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3870                 rv = ERR_MD_INVALID;
3871                 goto err;
3872         }
3873         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3874                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3875                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3876                 rv = ERR_MD_INVALID;
3877                 goto err;
3878         }
3879
3880         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3881                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3882                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3883                 rv = ERR_MD_INVALID;
3884                 goto err;
3885         }
3886
3887         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3888         for (i = UI_CURRENT; i < UI_SIZE; i++)
3889                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3890         bdev->md.flags = be32_to_cpu(buffer->flags);
3891         mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3892         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3893
3894         spin_lock_irq(&mdev->req_lock);
3895         if (mdev->state.conn < C_CONNECTED) {
3896                 int peer;
3897                 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3898                 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3899                 mdev->peer_max_bio_size = peer;
3900         }
3901         spin_unlock_irq(&mdev->req_lock);
3902
3903         if (mdev->sync_conf.al_extents < 7)
3904                 mdev->sync_conf.al_extents = 127;
3905
3906  err:
3907         drbd_md_put_buffer(mdev);
3908  out:
3909         put_ldev(mdev);
3910
3911         return rv;
3912 }
3913
3914 /**
3915  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3916  * @mdev:       DRBD device.
3917  *
3918  * Call this function if you change anything that should be written to
3919  * the meta-data super block. This function sets MD_DIRTY, and starts a
3920  * timer that ensures that within five seconds you have to call drbd_md_sync().
3921  */
3922 #ifdef DEBUG
3923 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3924 {
3925         if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3926                 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3927                 mdev->last_md_mark_dirty.line = line;
3928                 mdev->last_md_mark_dirty.func = func;
3929         }
3930 }
3931 #else
3932 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3933 {
3934         if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3935                 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3936 }
3937 #endif
3938
3939 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3940 {
3941         int i;
3942
3943         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3944                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3945 }
3946
3947 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3948 {
3949         if (idx == UI_CURRENT) {
3950                 if (mdev->state.role == R_PRIMARY)
3951                         val |= 1;
3952                 else
3953                         val &= ~((u64)1);
3954
3955                 drbd_set_ed_uuid(mdev, val);
3956         }
3957
3958         mdev->ldev->md.uuid[idx] = val;
3959         drbd_md_mark_dirty(mdev);
3960 }
3961
3962
3963 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3964 {
3965         if (mdev->ldev->md.uuid[idx]) {
3966                 drbd_uuid_move_history(mdev);
3967                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3968         }
3969         _drbd_uuid_set(mdev, idx, val);
3970 }
3971
3972 /**
3973  * drbd_uuid_new_current() - Creates a new current UUID
3974  * @mdev:       DRBD device.
3975  *
3976  * Creates a new current UUID, and rotates the old current UUID into
3977  * the bitmap slot. Causes an incremental resync upon next connect.
3978  */
3979 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3980 {
3981         u64 val;
3982         unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3983
3984         if (bm_uuid)
3985                 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3986
3987         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3988
3989         get_random_bytes(&val, sizeof(u64));
3990         _drbd_uuid_set(mdev, UI_CURRENT, val);
3991         drbd_print_uuids(mdev, "new current UUID");
3992         /* get it to stable storage _now_ */
3993         drbd_md_sync(mdev);
3994 }
3995
3996 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3997 {
3998         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3999                 return;
4000
4001         if (val == 0) {
4002                 drbd_uuid_move_history(mdev);
4003                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
4004                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
4005         } else {
4006                 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
4007                 if (bm_uuid)
4008                         dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
4009
4010                 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
4011         }
4012         drbd_md_mark_dirty(mdev);
4013 }
4014
4015 /**
4016  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4017  * @mdev:       DRBD device.
4018  *
4019  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
4020  */
4021 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
4022 {
4023         int rv = -EIO;
4024
4025         if (get_ldev_if_state(mdev, D_ATTACHING)) {
4026                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
4027                 drbd_md_sync(mdev);
4028                 drbd_bm_set_all(mdev);
4029
4030                 rv = drbd_bm_write(mdev);
4031
4032                 if (!rv) {
4033                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
4034                         drbd_md_sync(mdev);
4035                 }
4036
4037                 put_ldev(mdev);
4038         }
4039
4040         return rv;
4041 }
4042
4043 /**
4044  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4045  * @mdev:       DRBD device.
4046  *
4047  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
4048  */
4049 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
4050 {
4051         int rv = -EIO;
4052
4053         drbd_resume_al(mdev);
4054         if (get_ldev_if_state(mdev, D_ATTACHING)) {
4055                 drbd_bm_clear_all(mdev);
4056                 rv = drbd_bm_write(mdev);
4057                 put_ldev(mdev);
4058         }
4059
4060         return rv;
4061 }
4062
4063 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4064 {
4065         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
4066         int rv = -EIO;
4067
4068         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
4069
4070         if (get_ldev(mdev)) {
4071                 drbd_bm_lock(mdev, work->why, work->flags);
4072                 rv = work->io_fn(mdev);
4073                 drbd_bm_unlock(mdev);
4074                 put_ldev(mdev);
4075         }
4076
4077         clear_bit(BITMAP_IO, &mdev->flags);
4078         smp_mb__after_clear_bit();
4079         wake_up(&mdev->misc_wait);
4080
4081         if (work->done)
4082                 work->done(mdev, rv);
4083
4084         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
4085         work->why = NULL;
4086         work->flags = 0;
4087
4088         return 1;
4089 }
4090
4091 void drbd_ldev_destroy(struct drbd_conf *mdev)
4092 {
4093         lc_destroy(mdev->resync);
4094         mdev->resync = NULL;
4095         lc_destroy(mdev->act_log);
4096         mdev->act_log = NULL;
4097         __no_warn(local,
4098                 drbd_free_bc(mdev->ldev);
4099                 mdev->ldev = NULL;);
4100
4101         if (mdev->md_io_tmpp) {
4102                 __free_page(mdev->md_io_tmpp);
4103                 mdev->md_io_tmpp = NULL;
4104         }
4105         clear_bit(GO_DISKLESS, &mdev->flags);
4106 }
4107
4108 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4109 {
4110         D_ASSERT(mdev->state.disk == D_FAILED);
4111         /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
4112          * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
4113          * the protected members anymore, though, so once put_ldev reaches zero
4114          * again, it will be safe to free them. */
4115         drbd_force_state(mdev, NS(disk, D_DISKLESS));
4116         return 1;
4117 }
4118
4119 void drbd_go_diskless(struct drbd_conf *mdev)
4120 {
4121         D_ASSERT(mdev->state.disk == D_FAILED);
4122         if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
4123                 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
4124 }
4125
4126 /**
4127  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
4128  * @mdev:       DRBD device.
4129  * @io_fn:      IO callback to be called when bitmap IO is possible
4130  * @done:       callback to be called after the bitmap IO was performed
4131  * @why:        Descriptive text of the reason for doing the IO
4132  *
4133  * While IO on the bitmap happens we freeze application IO thus we ensure
4134  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
4135  * called from worker context. It MUST NOT be used while a previous such
4136  * work is still pending!
4137  */
4138 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4139                           int (*io_fn)(struct drbd_conf *),
4140                           void (*done)(struct drbd_conf *, int),
4141                           char *why, enum bm_flag flags)
4142 {
4143         D_ASSERT(current == mdev->worker.task);
4144
4145         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4146         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4147         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4148         if (mdev->bm_io_work.why)
4149                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4150                         why, mdev->bm_io_work.why);
4151
4152         mdev->bm_io_work.io_fn = io_fn;
4153         mdev->bm_io_work.done = done;
4154         mdev->bm_io_work.why = why;
4155         mdev->bm_io_work.flags = flags;
4156
4157         spin_lock_irq(&mdev->req_lock);
4158         set_bit(BITMAP_IO, &mdev->flags);
4159         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
4160                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
4161                         drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
4162         }
4163         spin_unlock_irq(&mdev->req_lock);
4164 }
4165
4166 /**
4167  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
4168  * @mdev:       DRBD device.
4169  * @io_fn:      IO callback to be called when bitmap IO is possible
4170  * @why:        Descriptive text of the reason for doing the IO
4171  *
4172  * freezes application IO while that the actual IO operations runs. This
4173  * functions MAY NOT be called from worker context.
4174  */
4175 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4176                 char *why, enum bm_flag flags)
4177 {
4178         int rv;
4179
4180         D_ASSERT(current != mdev->worker.task);
4181
4182         if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4183                 drbd_suspend_io(mdev);
4184
4185         drbd_bm_lock(mdev, why, flags);
4186         rv = io_fn(mdev);
4187         drbd_bm_unlock(mdev);
4188
4189         if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4190                 drbd_resume_io(mdev);
4191
4192         return rv;
4193 }
4194
4195 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4196 {
4197         if ((mdev->ldev->md.flags & flag) != flag) {
4198                 drbd_md_mark_dirty(mdev);
4199                 mdev->ldev->md.flags |= flag;
4200         }
4201 }
4202
4203 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4204 {
4205         if ((mdev->ldev->md.flags & flag) != 0) {
4206                 drbd_md_mark_dirty(mdev);
4207                 mdev->ldev->md.flags &= ~flag;
4208         }
4209 }
4210 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4211 {
4212         return (bdev->md.flags & flag) != 0;
4213 }
4214
4215 static void md_sync_timer_fn(unsigned long data)
4216 {
4217         struct drbd_conf *mdev = (struct drbd_conf *) data;
4218
4219         drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4220 }
4221
4222 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4223 {
4224         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
4225 #ifdef DEBUG
4226         dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4227                 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4228 #endif
4229         drbd_md_sync(mdev);
4230         return 1;
4231 }
4232
4233 #ifdef CONFIG_DRBD_FAULT_INJECTION
4234 /* Fault insertion support including random number generator shamelessly
4235  * stolen from kernel/rcutorture.c */
4236 struct fault_random_state {
4237         unsigned long state;
4238         unsigned long count;
4239 };
4240
4241 #define FAULT_RANDOM_MULT 39916801  /* prime */
4242 #define FAULT_RANDOM_ADD        479001701 /* prime */
4243 #define FAULT_RANDOM_REFRESH 10000
4244
4245 /*
4246  * Crude but fast random-number generator.  Uses a linear congruential
4247  * generator, with occasional help from get_random_bytes().
4248  */
4249 static unsigned long
4250 _drbd_fault_random(struct fault_random_state *rsp)
4251 {
4252         long refresh;
4253
4254         if (!rsp->count--) {
4255                 get_random_bytes(&refresh, sizeof(refresh));
4256                 rsp->state += refresh;
4257                 rsp->count = FAULT_RANDOM_REFRESH;
4258         }
4259         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4260         return swahw32(rsp->state);
4261 }
4262
4263 static char *
4264 _drbd_fault_str(unsigned int type) {
4265         static char *_faults[] = {
4266                 [DRBD_FAULT_MD_WR] = "Meta-data write",
4267                 [DRBD_FAULT_MD_RD] = "Meta-data read",
4268                 [DRBD_FAULT_RS_WR] = "Resync write",
4269                 [DRBD_FAULT_RS_RD] = "Resync read",
4270                 [DRBD_FAULT_DT_WR] = "Data write",
4271                 [DRBD_FAULT_DT_RD] = "Data read",
4272                 [DRBD_FAULT_DT_RA] = "Data read ahead",
4273                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
4274                 [DRBD_FAULT_AL_EE] = "EE allocation",
4275                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
4276         };
4277
4278         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4279 }
4280
4281 unsigned int
4282 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4283 {
4284         static struct fault_random_state rrs = {0, 0};
4285
4286         unsigned int ret = (
4287                 (fault_devs == 0 ||
4288                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4289                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4290
4291         if (ret) {
4292                 fault_count++;
4293
4294                 if (__ratelimit(&drbd_ratelimit_state))
4295                         dev_warn(DEV, "***Simulating %s failure\n",
4296                                 _drbd_fault_str(type));
4297         }
4298
4299         return ret;
4300 }
4301 #endif
4302
4303 const char *drbd_buildtag(void)
4304 {
4305         /* DRBD built from external sources has here a reference to the
4306            git hash of the source code. */
4307
4308         static char buildtag[38] = "\0uilt-in";
4309
4310         if (buildtag[0] == 0) {
4311 #ifdef CONFIG_MODULES
4312                 if (THIS_MODULE != NULL)
4313                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4314                 else
4315 #endif
4316                         buildtag[0] = 'b';
4317         }
4318
4319         return buildtag;
4320 }
4321
4322 module_init(drbd_init)
4323 module_exit(drbd_cleanup)
4324
4325 EXPORT_SYMBOL(drbd_conn_str);
4326 EXPORT_SYMBOL(drbd_role_str);
4327 EXPORT_SYMBOL(drbd_disk_str);
4328 EXPORT_SYMBOL(drbd_set_st_err_str);