]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_main.c
drbd: Make _drbd_send_bitmap() static
[karo-tx-linux.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 static DEFINE_MUTEX(drbd_main_mutex);
60 int drbdd_init(struct drbd_thread *);
61 int drbd_worker(struct drbd_thread *);
62 int drbd_asender(struct drbd_thread *);
63
64 int drbd_init(void);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static int drbd_release(struct gendisk *gd, fmode_t mode);
67 static int w_md_sync(struct drbd_work *w, int unused);
68 static void md_sync_timer_fn(unsigned long data);
69 static int w_bitmap_io(struct drbd_work *w, int unused);
70 static int w_go_diskless(struct drbd_work *w, int unused);
71
72 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73               "Lars Ellenberg <lars@linbit.com>");
74 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75 MODULE_VERSION(REL_VERSION);
76 MODULE_LICENSE("GPL");
77 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
78                  __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
79 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81 #include <linux/moduleparam.h>
82 /* allow_open_on_secondary */
83 MODULE_PARM_DESC(allow_oos, "DONT USE!");
84 /* thanks to these macros, if compiled into the kernel (not-module),
85  * this becomes the boot parameter drbd.minor_count */
86 module_param(minor_count, uint, 0444);
87 module_param(disable_sendpage, bool, 0644);
88 module_param(allow_oos, bool, 0);
89 module_param(proc_details, int, 0644);
90
91 #ifdef CONFIG_DRBD_FAULT_INJECTION
92 int enable_faults;
93 int fault_rate;
94 static int fault_count;
95 int fault_devs;
96 /* bitmap of enabled faults */
97 module_param(enable_faults, int, 0664);
98 /* fault rate % value - applies to all enabled faults */
99 module_param(fault_rate, int, 0664);
100 /* count of faults inserted */
101 module_param(fault_count, int, 0664);
102 /* bitmap of devices to insert faults on */
103 module_param(fault_devs, int, 0644);
104 #endif
105
106 /* module parameter, defined */
107 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
108 int disable_sendpage;
109 int allow_oos;
110 int proc_details;       /* Detail level in proc drbd*/
111
112 /* Module parameter for setting the user mode helper program
113  * to run. Default is /sbin/drbdadm */
114 char usermode_helper[80] = "/sbin/drbdadm";
115
116 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
119  * as member "struct gendisk *vdisk;"
120  */
121 struct idr minors;
122 struct list_head drbd_tconns;  /* list of struct drbd_tconn */
123 DEFINE_MUTEX(drbd_cfg_mutex);
124
125 struct kmem_cache *drbd_request_cache;
126 struct kmem_cache *drbd_ee_cache;       /* peer requests */
127 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
128 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
129 mempool_t *drbd_request_mempool;
130 mempool_t *drbd_ee_mempool;
131 mempool_t *drbd_md_io_page_pool;
132 struct bio_set *drbd_md_io_bio_set;
133
134 /* I do not use a standard mempool, because:
135    1) I want to hand out the pre-allocated objects first.
136    2) I want to be able to interrupt sleeping allocation with a signal.
137    Note: This is a single linked list, the next pointer is the private
138          member of struct page.
139  */
140 struct page *drbd_pp_pool;
141 spinlock_t   drbd_pp_lock;
142 int          drbd_pp_vacant;
143 wait_queue_head_t drbd_pp_wait;
144
145 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
146
147 static const struct block_device_operations drbd_ops = {
148         .owner =   THIS_MODULE,
149         .open =    drbd_open,
150         .release = drbd_release,
151 };
152
153 static void bio_destructor_drbd(struct bio *bio)
154 {
155         bio_free(bio, drbd_md_io_bio_set);
156 }
157
158 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
159 {
160         struct bio *bio;
161
162         if (!drbd_md_io_bio_set)
163                 return bio_alloc(gfp_mask, 1);
164
165         bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
166         if (!bio)
167                 return NULL;
168         bio->bi_destructor = bio_destructor_drbd;
169         return bio;
170 }
171
172 #ifdef __CHECKER__
173 /* When checking with sparse, and this is an inline function, sparse will
174    give tons of false positives. When this is a real functions sparse works.
175  */
176 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
177 {
178         int io_allowed;
179
180         atomic_inc(&mdev->local_cnt);
181         io_allowed = (mdev->state.disk >= mins);
182         if (!io_allowed) {
183                 if (atomic_dec_and_test(&mdev->local_cnt))
184                         wake_up(&mdev->misc_wait);
185         }
186         return io_allowed;
187 }
188
189 #endif
190
191 /**
192  * DOC: The transfer log
193  *
194  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
195  * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
196  * of the list. There is always at least one &struct drbd_tl_epoch object.
197  *
198  * Each &struct drbd_tl_epoch has a circular double linked list of requests
199  * attached.
200  */
201 static int tl_init(struct drbd_tconn *tconn)
202 {
203         struct drbd_tl_epoch *b;
204
205         /* during device minor initialization, we may well use GFP_KERNEL */
206         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
207         if (!b)
208                 return 0;
209         INIT_LIST_HEAD(&b->requests);
210         INIT_LIST_HEAD(&b->w.list);
211         b->next = NULL;
212         b->br_number = 4711;
213         b->n_writes = 0;
214         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
215
216         tconn->oldest_tle = b;
217         tconn->newest_tle = b;
218         INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
219
220         return 1;
221 }
222
223 static void tl_cleanup(struct drbd_tconn *tconn)
224 {
225         if (tconn->oldest_tle != tconn->newest_tle)
226                 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227         if (!list_empty(&tconn->out_of_sequence_requests))
228                 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229         kfree(tconn->oldest_tle);
230         tconn->oldest_tle = NULL;
231         kfree(tconn->unused_spare_tle);
232         tconn->unused_spare_tle = NULL;
233 }
234
235 /**
236  * _tl_add_barrier() - Adds a barrier to the transfer log
237  * @mdev:       DRBD device.
238  * @new:        Barrier to be added before the current head of the TL.
239  *
240  * The caller must hold the req_lock.
241  */
242 void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
243 {
244         struct drbd_tl_epoch *newest_before;
245
246         INIT_LIST_HEAD(&new->requests);
247         INIT_LIST_HEAD(&new->w.list);
248         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249         new->next = NULL;
250         new->n_writes = 0;
251
252         newest_before = tconn->newest_tle;
253         /* never send a barrier number == 0, because that is special-cased
254          * when using TCQ for our write ordering code */
255         new->br_number = (newest_before->br_number+1) ?: 1;
256         if (tconn->newest_tle != new) {
257                 tconn->newest_tle->next = new;
258                 tconn->newest_tle = new;
259         }
260 }
261
262 /**
263  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264  * @mdev:       DRBD device.
265  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266  * @set_size:   Expected number of requests before that barrier.
267  *
268  * In case the passed barrier_nr or set_size does not match the oldest
269  * &struct drbd_tl_epoch objects this function will cause a termination
270  * of the connection.
271  */
272 void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273                 unsigned int set_size)
274 {
275         struct drbd_conf *mdev;
276         struct drbd_tl_epoch *b, *nob; /* next old barrier */
277         struct list_head *le, *tle;
278         struct drbd_request *r;
279
280         spin_lock_irq(&tconn->req_lock);
281
282         b = tconn->oldest_tle;
283
284         /* first some paranoia code */
285         if (b == NULL) {
286                 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287                          barrier_nr);
288                 goto bail;
289         }
290         if (b->br_number != barrier_nr) {
291                 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292                          barrier_nr, b->br_number);
293                 goto bail;
294         }
295         if (b->n_writes != set_size) {
296                 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297                          barrier_nr, set_size, b->n_writes);
298                 goto bail;
299         }
300
301         /* Clean up list of requests processed during current epoch */
302         list_for_each_safe(le, tle, &b->requests) {
303                 r = list_entry(le, struct drbd_request, tl_requests);
304                 _req_mod(r, BARRIER_ACKED);
305         }
306         /* There could be requests on the list waiting for completion
307            of the write to the local disk. To avoid corruptions of
308            slab's data structures we have to remove the lists head.
309
310            Also there could have been a barrier ack out of sequence, overtaking
311            the write acks - which would be a bug and violating write ordering.
312            To not deadlock in case we lose connection while such requests are
313            still pending, we need some way to find them for the
314            _req_mode(CONNECTION_LOST_WHILE_PENDING).
315
316            These have been list_move'd to the out_of_sequence_requests list in
317            _req_mod(, BARRIER_ACKED) above.
318            */
319         list_del_init(&b->requests);
320         mdev = b->w.mdev;
321
322         nob = b->next;
323         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
324                 _tl_add_barrier(tconn, b);
325                 if (nob)
326                         tconn->oldest_tle = nob;
327                 /* if nob == NULL b was the only barrier, and becomes the new
328                    barrier. Therefore tconn->oldest_tle points already to b */
329         } else {
330                 D_ASSERT(nob != NULL);
331                 tconn->oldest_tle = nob;
332                 kfree(b);
333         }
334
335         spin_unlock_irq(&tconn->req_lock);
336         dec_ap_pending(mdev);
337
338         return;
339
340 bail:
341         spin_unlock_irq(&tconn->req_lock);
342         conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
343 }
344
345
346 /**
347  * _tl_restart() - Walks the transfer log, and applies an action to all requests
348  * @mdev:       DRBD device.
349  * @what:       The action/event to perform with all request objects
350  *
351  * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352  * RESTART_FROZEN_DISK_IO.
353  */
354 void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
355 {
356         struct drbd_tl_epoch *b, *tmp, **pn;
357         struct list_head *le, *tle, carry_reads;
358         struct drbd_request *req;
359         int rv, n_writes, n_reads;
360
361         b = tconn->oldest_tle;
362         pn = &tconn->oldest_tle;
363         while (b) {
364                 n_writes = 0;
365                 n_reads = 0;
366                 INIT_LIST_HEAD(&carry_reads);
367                 list_for_each_safe(le, tle, &b->requests) {
368                         req = list_entry(le, struct drbd_request, tl_requests);
369                         rv = _req_mod(req, what);
370
371                         n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
372                         n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
373                 }
374                 tmp = b->next;
375
376                 if (n_writes) {
377                         if (what == RESEND) {
378                                 b->n_writes = n_writes;
379                                 if (b->w.cb == NULL) {
380                                         b->w.cb = w_send_barrier;
381                                         inc_ap_pending(b->w.mdev);
382                                         set_bit(CREATE_BARRIER, &b->w.mdev->flags);
383                                 }
384
385                                 drbd_queue_work(&tconn->data.work, &b->w);
386                         }
387                         pn = &b->next;
388                 } else {
389                         if (n_reads)
390                                 list_add(&carry_reads, &b->requests);
391                         /* there could still be requests on that ring list,
392                          * in case local io is still pending */
393                         list_del(&b->requests);
394
395                         /* dec_ap_pending corresponding to queue_barrier.
396                          * the newest barrier may not have been queued yet,
397                          * in which case w.cb is still NULL. */
398                         if (b->w.cb != NULL)
399                                 dec_ap_pending(b->w.mdev);
400
401                         if (b == tconn->newest_tle) {
402                                 /* recycle, but reinit! */
403                                 if (tmp != NULL)
404                                         conn_err(tconn, "ASSERT FAILED tmp == NULL");
405                                 INIT_LIST_HEAD(&b->requests);
406                                 list_splice(&carry_reads, &b->requests);
407                                 INIT_LIST_HEAD(&b->w.list);
408                                 b->w.cb = NULL;
409                                 b->br_number = net_random();
410                                 b->n_writes = 0;
411
412                                 *pn = b;
413                                 break;
414                         }
415                         *pn = tmp;
416                         kfree(b);
417                 }
418                 b = tmp;
419                 list_splice(&carry_reads, &b->requests);
420         }
421 }
422
423
424 /**
425  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
426  * @mdev:       DRBD device.
427  *
428  * This is called after the connection to the peer was lost. The storage covered
429  * by the requests on the transfer gets marked as our of sync. Called from the
430  * receiver thread and the worker thread.
431  */
432 void tl_clear(struct drbd_tconn *tconn)
433 {
434         struct drbd_conf *mdev;
435         struct list_head *le, *tle;
436         struct drbd_request *r;
437         int vnr;
438
439         spin_lock_irq(&tconn->req_lock);
440
441         _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
442
443         /* we expect this list to be empty. */
444         if (!list_empty(&tconn->out_of_sequence_requests))
445                 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
446
447         /* but just in case, clean it up anyways! */
448         list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
449                 r = list_entry(le, struct drbd_request, tl_requests);
450                 /* It would be nice to complete outside of spinlock.
451                  * But this is easier for now. */
452                 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
453         }
454
455         /* ensure bit indicating barrier is required is clear */
456         idr_for_each_entry(&tconn->volumes, mdev, vnr)
457                 clear_bit(CREATE_BARRIER, &mdev->flags);
458
459         spin_unlock_irq(&tconn->req_lock);
460 }
461
462 void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
463 {
464         spin_lock_irq(&tconn->req_lock);
465         _tl_restart(tconn, what);
466         spin_unlock_irq(&tconn->req_lock);
467 }
468
469 static int drbd_thread_setup(void *arg)
470 {
471         struct drbd_thread *thi = (struct drbd_thread *) arg;
472         struct drbd_tconn *tconn = thi->tconn;
473         unsigned long flags;
474         int retval;
475
476         snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
477                  thi->name[0], thi->tconn->name);
478
479 restart:
480         retval = thi->function(thi);
481
482         spin_lock_irqsave(&thi->t_lock, flags);
483
484         /* if the receiver has been "EXITING", the last thing it did
485          * was set the conn state to "StandAlone",
486          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
487          * and receiver thread will be "started".
488          * drbd_thread_start needs to set "RESTARTING" in that case.
489          * t_state check and assignment needs to be within the same spinlock,
490          * so either thread_start sees EXITING, and can remap to RESTARTING,
491          * or thread_start see NONE, and can proceed as normal.
492          */
493
494         if (thi->t_state == RESTARTING) {
495                 conn_info(tconn, "Restarting %s thread\n", thi->name);
496                 thi->t_state = RUNNING;
497                 spin_unlock_irqrestore(&thi->t_lock, flags);
498                 goto restart;
499         }
500
501         thi->task = NULL;
502         thi->t_state = NONE;
503         smp_mb();
504         complete(&thi->stop);
505         spin_unlock_irqrestore(&thi->t_lock, flags);
506
507         conn_info(tconn, "Terminating %s\n", current->comm);
508
509         /* Release mod reference taken when thread was started */
510         module_put(THIS_MODULE);
511         return retval;
512 }
513
514 static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
515                              int (*func) (struct drbd_thread *), char *name)
516 {
517         spin_lock_init(&thi->t_lock);
518         thi->task    = NULL;
519         thi->t_state = NONE;
520         thi->function = func;
521         thi->tconn = tconn;
522         strncpy(thi->name, name, ARRAY_SIZE(thi->name));
523 }
524
525 int drbd_thread_start(struct drbd_thread *thi)
526 {
527         struct drbd_tconn *tconn = thi->tconn;
528         struct task_struct *nt;
529         unsigned long flags;
530
531         /* is used from state engine doing drbd_thread_stop_nowait,
532          * while holding the req lock irqsave */
533         spin_lock_irqsave(&thi->t_lock, flags);
534
535         switch (thi->t_state) {
536         case NONE:
537                 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
538                          thi->name, current->comm, current->pid);
539
540                 /* Get ref on module for thread - this is released when thread exits */
541                 if (!try_module_get(THIS_MODULE)) {
542                         conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
543                         spin_unlock_irqrestore(&thi->t_lock, flags);
544                         return false;
545                 }
546
547                 init_completion(&thi->stop);
548                 thi->reset_cpu_mask = 1;
549                 thi->t_state = RUNNING;
550                 spin_unlock_irqrestore(&thi->t_lock, flags);
551                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
552
553                 nt = kthread_create(drbd_thread_setup, (void *) thi,
554                                     "drbd_%c_%s", thi->name[0], thi->tconn->name);
555
556                 if (IS_ERR(nt)) {
557                         conn_err(tconn, "Couldn't start thread\n");
558
559                         module_put(THIS_MODULE);
560                         return false;
561                 }
562                 spin_lock_irqsave(&thi->t_lock, flags);
563                 thi->task = nt;
564                 thi->t_state = RUNNING;
565                 spin_unlock_irqrestore(&thi->t_lock, flags);
566                 wake_up_process(nt);
567                 break;
568         case EXITING:
569                 thi->t_state = RESTARTING;
570                 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
571                                 thi->name, current->comm, current->pid);
572                 /* fall through */
573         case RUNNING:
574         case RESTARTING:
575         default:
576                 spin_unlock_irqrestore(&thi->t_lock, flags);
577                 break;
578         }
579
580         return true;
581 }
582
583
584 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
585 {
586         unsigned long flags;
587
588         enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
589
590         /* may be called from state engine, holding the req lock irqsave */
591         spin_lock_irqsave(&thi->t_lock, flags);
592
593         if (thi->t_state == NONE) {
594                 spin_unlock_irqrestore(&thi->t_lock, flags);
595                 if (restart)
596                         drbd_thread_start(thi);
597                 return;
598         }
599
600         if (thi->t_state != ns) {
601                 if (thi->task == NULL) {
602                         spin_unlock_irqrestore(&thi->t_lock, flags);
603                         return;
604                 }
605
606                 thi->t_state = ns;
607                 smp_mb();
608                 init_completion(&thi->stop);
609                 if (thi->task != current)
610                         force_sig(DRBD_SIGKILL, thi->task);
611         }
612
613         spin_unlock_irqrestore(&thi->t_lock, flags);
614
615         if (wait)
616                 wait_for_completion(&thi->stop);
617 }
618
619 static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
620 {
621         struct drbd_thread *thi =
622                 task == tconn->receiver.task ? &tconn->receiver :
623                 task == tconn->asender.task  ? &tconn->asender :
624                 task == tconn->worker.task   ? &tconn->worker : NULL;
625
626         return thi;
627 }
628
629 char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
630 {
631         struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
632         return thi ? thi->name : task->comm;
633 }
634
635 int conn_lowest_minor(struct drbd_tconn *tconn)
636 {
637         int vnr = 0;
638         struct drbd_conf *mdev;
639
640         mdev = idr_get_next(&tconn->volumes, &vnr);
641         if (!mdev)
642                 return -1;
643         return mdev_to_minor(mdev);
644 }
645
646 #ifdef CONFIG_SMP
647 /**
648  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
649  * @mdev:       DRBD device.
650  *
651  * Forces all threads of a device onto the same CPU. This is beneficial for
652  * DRBD's performance. May be overwritten by user's configuration.
653  */
654 void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
655 {
656         int ord, cpu;
657
658         /* user override. */
659         if (cpumask_weight(tconn->cpu_mask))
660                 return;
661
662         ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
663         for_each_online_cpu(cpu) {
664                 if (ord-- == 0) {
665                         cpumask_set_cpu(cpu, tconn->cpu_mask);
666                         return;
667                 }
668         }
669         /* should not be reached */
670         cpumask_setall(tconn->cpu_mask);
671 }
672
673 /**
674  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
675  * @mdev:       DRBD device.
676  * @thi:        drbd_thread object
677  *
678  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
679  * prematurely.
680  */
681 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
682 {
683         struct task_struct *p = current;
684
685         if (!thi->reset_cpu_mask)
686                 return;
687         thi->reset_cpu_mask = 0;
688         set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
689 }
690 #endif
691
692 static void prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
693 {
694         h->magic   = cpu_to_be32(DRBD_MAGIC);
695         h->command = cpu_to_be16(cmd);
696         h->length  = cpu_to_be16(size);
697 }
698
699 static void prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
700 {
701         h->magic   = cpu_to_be16(DRBD_MAGIC_BIG);
702         h->command = cpu_to_be16(cmd);
703         h->length  = cpu_to_be32(size);
704 }
705
706 static void _prepare_header(struct drbd_tconn *tconn, int vnr, struct p_header *h,
707                             enum drbd_packet cmd, int size)
708 {
709         if (tconn->agreed_pro_version >= 95)
710                 prepare_header95(&h->h95, cmd, size);
711         else
712                 prepare_header80(&h->h80, cmd, size);
713 }
714
715 static void prepare_header(struct drbd_conf *mdev, struct p_header *h,
716                            enum drbd_packet cmd, int size)
717 {
718         _prepare_header(mdev->tconn, mdev->vnr, h, cmd, size);
719 }
720
721 /* the appropriate socket mutex must be held already */
722 int _conn_send_cmd(struct drbd_tconn *tconn, int vnr, struct socket *sock,
723                    enum drbd_packet cmd, struct p_header *h, size_t size,
724                    unsigned msg_flags)
725 {
726         int err;
727
728         _prepare_header(tconn, vnr, h, cmd, size - sizeof(struct p_header));
729         err = drbd_send_all(tconn, sock, h, size, msg_flags);
730         if (err && !signal_pending(current))
731                 conn_warn(tconn, "short send %s size=%d\n",
732                           cmdname(cmd), (int)size);
733         return err;
734 }
735
736 /* don't pass the socket. we may only look at it
737  * when we hold the appropriate socket mutex.
738  */
739 int conn_send_cmd(struct drbd_tconn *tconn, int vnr, struct drbd_socket *sock,
740                   enum drbd_packet cmd, struct p_header *h, size_t size)
741 {
742         int err = -EIO;
743
744         mutex_lock(&sock->mutex);
745         if (sock->socket)
746                 err = _conn_send_cmd(tconn, vnr, sock->socket, cmd, h, size, 0);
747         mutex_unlock(&sock->mutex);
748         return err;
749 }
750
751 int conn_send_cmd2(struct drbd_tconn *tconn, enum drbd_packet cmd, char *data,
752                    size_t size)
753 {
754         struct p_header80 h;
755         int err;
756
757         prepare_header80(&h, cmd, size);
758         err = drbd_get_data_sock(tconn);
759         if (!err) {
760                 err = drbd_send_all(tconn, tconn->data.socket, &h, sizeof(h), 0);
761                 if (!err)
762                         err = drbd_send_all(tconn, tconn->data.socket, data, size, 0);
763                 drbd_put_data_sock(tconn);
764         }
765         return err;
766 }
767
768 int drbd_send_ping(struct drbd_tconn *tconn)
769 {
770         struct p_header h;
771         return !conn_send_cmd(tconn, 0, &tconn->meta, P_PING, &h, sizeof(h));
772 }
773
774 int drbd_send_ping_ack(struct drbd_tconn *tconn)
775 {
776         struct p_header h;
777         return !conn_send_cmd(tconn, 0, &tconn->meta, P_PING_ACK, &h, sizeof(h));
778 }
779
780 int drbd_send_sync_param(struct drbd_conf *mdev)
781 {
782         struct p_rs_param_95 *p;
783         struct socket *sock;
784         int size, err;
785         const int apv = mdev->tconn->agreed_pro_version;
786
787         size = apv <= 87 ? sizeof(struct p_rs_param)
788                 : apv == 88 ? sizeof(struct p_rs_param)
789                         + strlen(mdev->tconn->net_conf->verify_alg) + 1
790                 : apv <= 94 ? sizeof(struct p_rs_param_89)
791                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
792
793         /* used from admin command context and receiver/worker context.
794          * to avoid kmalloc, grab the socket right here,
795          * then use the pre-allocated sbuf there */
796         mutex_lock(&mdev->tconn->data.mutex);
797         sock = mdev->tconn->data.socket;
798
799         if (likely(sock != NULL)) {
800                 enum drbd_packet cmd =
801                         apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
802
803                 p = &mdev->tconn->data.sbuf.rs_param_95;
804
805                 /* initialize verify_alg and csums_alg */
806                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
807
808                 if (get_ldev(mdev)) {
809                         p->rate = cpu_to_be32(mdev->ldev->dc.resync_rate);
810                         p->c_plan_ahead = cpu_to_be32(mdev->ldev->dc.c_plan_ahead);
811                         p->c_delay_target = cpu_to_be32(mdev->ldev->dc.c_delay_target);
812                         p->c_fill_target = cpu_to_be32(mdev->ldev->dc.c_fill_target);
813                         p->c_max_rate = cpu_to_be32(mdev->ldev->dc.c_max_rate);
814                         put_ldev(mdev);
815                 } else {
816                         p->rate = cpu_to_be32(DRBD_RATE_DEF);
817                         p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
818                         p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
819                         p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
820                         p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
821                 }
822
823                 if (apv >= 88)
824                         strcpy(p->verify_alg, mdev->tconn->net_conf->verify_alg);
825                 if (apv >= 89)
826                         strcpy(p->csums_alg, mdev->tconn->net_conf->csums_alg);
827
828                 err = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
829         } else
830                 err = -EIO;
831
832         mutex_unlock(&mdev->tconn->data.mutex);
833
834         return err;
835 }
836
837 int drbd_send_protocol(struct drbd_tconn *tconn)
838 {
839         struct p_protocol *p;
840         int size, cf, err;
841
842         size = sizeof(struct p_protocol);
843
844         if (tconn->agreed_pro_version >= 87)
845                 size += strlen(tconn->net_conf->integrity_alg) + 1;
846
847         /* we must not recurse into our own queue,
848          * as that is blocked during handshake */
849         p = kmalloc(size, GFP_NOIO);
850         if (p == NULL)
851                 return -ENOMEM;
852
853         p->protocol      = cpu_to_be32(tconn->net_conf->wire_protocol);
854         p->after_sb_0p   = cpu_to_be32(tconn->net_conf->after_sb_0p);
855         p->after_sb_1p   = cpu_to_be32(tconn->net_conf->after_sb_1p);
856         p->after_sb_2p   = cpu_to_be32(tconn->net_conf->after_sb_2p);
857         p->two_primaries = cpu_to_be32(tconn->net_conf->two_primaries);
858
859         cf = 0;
860         if (tconn->net_conf->want_lose)
861                 cf |= CF_WANT_LOSE;
862         if (tconn->net_conf->dry_run) {
863                 if (tconn->agreed_pro_version >= 92)
864                         cf |= CF_DRY_RUN;
865                 else {
866                         conn_err(tconn, "--dry-run is not supported by peer");
867                         kfree(p);
868                         return -EOPNOTSUPP;
869                 }
870         }
871         p->conn_flags    = cpu_to_be32(cf);
872
873         if (tconn->agreed_pro_version >= 87)
874                 strcpy(p->integrity_alg, tconn->net_conf->integrity_alg);
875
876         err = conn_send_cmd2(tconn, P_PROTOCOL, p->head.payload, size - sizeof(struct p_header));
877         kfree(p);
878         return err;
879 }
880
881 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
882 {
883         struct p_uuids p;
884         int i;
885
886         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
887                 return 0;
888
889         for (i = UI_CURRENT; i < UI_SIZE; i++)
890                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
891
892         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
893         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
894         uuid_flags |= mdev->tconn->net_conf->want_lose ? 1 : 0;
895         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
896         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
897         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
898
899         put_ldev(mdev);
900
901         return drbd_send_cmd(mdev, &mdev->tconn->data, P_UUIDS, &p.head, sizeof(p));
902 }
903
904 int drbd_send_uuids(struct drbd_conf *mdev)
905 {
906         return _drbd_send_uuids(mdev, 0);
907 }
908
909 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
910 {
911         return _drbd_send_uuids(mdev, 8);
912 }
913
914 void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
915 {
916         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
917                 u64 *uuid = mdev->ldev->md.uuid;
918                 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
919                      text,
920                      (unsigned long long)uuid[UI_CURRENT],
921                      (unsigned long long)uuid[UI_BITMAP],
922                      (unsigned long long)uuid[UI_HISTORY_START],
923                      (unsigned long long)uuid[UI_HISTORY_END]);
924                 put_ldev(mdev);
925         } else {
926                 dev_info(DEV, "%s effective data uuid: %016llX\n",
927                                 text,
928                                 (unsigned long long)mdev->ed_uuid);
929         }
930 }
931
932 void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
933 {
934         struct p_rs_uuid p;
935         u64 uuid;
936
937         D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
938
939         uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
940         drbd_uuid_set(mdev, UI_BITMAP, uuid);
941         drbd_print_uuids(mdev, "updated sync UUID");
942         drbd_md_sync(mdev);
943         p.uuid = cpu_to_be64(uuid);
944
945         drbd_send_cmd(mdev, &mdev->tconn->data, P_SYNC_UUID, &p.head, sizeof(p));
946 }
947
948 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
949 {
950         struct p_sizes p;
951         sector_t d_size, u_size;
952         int q_order_type, max_bio_size;
953
954         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
955                 D_ASSERT(mdev->ldev->backing_bdev);
956                 d_size = drbd_get_max_capacity(mdev->ldev);
957                 u_size = mdev->ldev->dc.disk_size;
958                 q_order_type = drbd_queue_order_type(mdev);
959                 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
960                 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
961                 put_ldev(mdev);
962         } else {
963                 d_size = 0;
964                 u_size = 0;
965                 q_order_type = QUEUE_ORDERED_NONE;
966                 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
967         }
968
969         p.d_size = cpu_to_be64(d_size);
970         p.u_size = cpu_to_be64(u_size);
971         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
972         p.max_bio_size = cpu_to_be32(max_bio_size);
973         p.queue_order_type = cpu_to_be16(q_order_type);
974         p.dds_flags = cpu_to_be16(flags);
975
976         return drbd_send_cmd(mdev, &mdev->tconn->data, P_SIZES, &p.head, sizeof(p));
977 }
978
979 /**
980  * drbd_send_state() - Sends the drbd state to the peer
981  * @mdev:       DRBD device.
982  */
983 int drbd_send_state(struct drbd_conf *mdev)
984 {
985         struct socket *sock;
986         struct p_state p;
987         int err = -EIO;
988
989         mutex_lock(&mdev->tconn->data.mutex);
990
991         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
992         sock = mdev->tconn->data.socket;
993
994         if (likely(sock != NULL))
995                 err = _drbd_send_cmd(mdev, sock, P_STATE, &p.head, sizeof(p), 0);
996
997         mutex_unlock(&mdev->tconn->data.mutex);
998
999         return err;
1000 }
1001
1002 int _conn_send_state_req(struct drbd_tconn *tconn, int vnr, enum drbd_packet cmd,
1003                          union drbd_state mask, union drbd_state val)
1004 {
1005         struct p_req_state p;
1006
1007         p.mask    = cpu_to_be32(mask.i);
1008         p.val     = cpu_to_be32(val.i);
1009
1010         return conn_send_cmd(tconn, vnr, &tconn->data, cmd, &p.head, sizeof(p));
1011 }
1012
1013 void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
1014 {
1015         struct p_req_state_reply p;
1016
1017         p.retcode    = cpu_to_be32(retcode);
1018
1019         drbd_send_cmd(mdev, &mdev->tconn->meta, P_STATE_CHG_REPLY, &p.head, sizeof(p));
1020 }
1021
1022 int conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
1023 {
1024         struct p_req_state_reply p;
1025         enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1026
1027         p.retcode    = cpu_to_be32(retcode);
1028
1029         return !conn_send_cmd(tconn, 0, &tconn->meta, cmd, &p.head, sizeof(p));
1030 }
1031
1032 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1033         struct p_compressed_bm *p,
1034         struct bm_xfer_ctx *c)
1035 {
1036         struct bitstream bs;
1037         unsigned long plain_bits;
1038         unsigned long tmp;
1039         unsigned long rl;
1040         unsigned len;
1041         unsigned toggle;
1042         int bits;
1043
1044         /* may we use this feature? */
1045         if ((mdev->tconn->net_conf->use_rle == 0) ||
1046                 (mdev->tconn->agreed_pro_version < 90))
1047                         return 0;
1048
1049         if (c->bit_offset >= c->bm_bits)
1050                 return 0; /* nothing to do. */
1051
1052         /* use at most thus many bytes */
1053         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
1054         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
1055         /* plain bits covered in this code string */
1056         plain_bits = 0;
1057
1058         /* p->encoding & 0x80 stores whether the first run length is set.
1059          * bit offset is implicit.
1060          * start with toggle == 2 to be able to tell the first iteration */
1061         toggle = 2;
1062
1063         /* see how much plain bits we can stuff into one packet
1064          * using RLE and VLI. */
1065         do {
1066                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1067                                     : _drbd_bm_find_next(mdev, c->bit_offset);
1068                 if (tmp == -1UL)
1069                         tmp = c->bm_bits;
1070                 rl = tmp - c->bit_offset;
1071
1072                 if (toggle == 2) { /* first iteration */
1073                         if (rl == 0) {
1074                                 /* the first checked bit was set,
1075                                  * store start value, */
1076                                 DCBP_set_start(p, 1);
1077                                 /* but skip encoding of zero run length */
1078                                 toggle = !toggle;
1079                                 continue;
1080                         }
1081                         DCBP_set_start(p, 0);
1082                 }
1083
1084                 /* paranoia: catch zero runlength.
1085                  * can only happen if bitmap is modified while we scan it. */
1086                 if (rl == 0) {
1087                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1088                             "t:%u bo:%lu\n", toggle, c->bit_offset);
1089                         return -1;
1090                 }
1091
1092                 bits = vli_encode_bits(&bs, rl);
1093                 if (bits == -ENOBUFS) /* buffer full */
1094                         break;
1095                 if (bits <= 0) {
1096                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1097                         return 0;
1098                 }
1099
1100                 toggle = !toggle;
1101                 plain_bits += rl;
1102                 c->bit_offset = tmp;
1103         } while (c->bit_offset < c->bm_bits);
1104
1105         len = bs.cur.b - p->code + !!bs.cur.bit;
1106
1107         if (plain_bits < (len << 3)) {
1108                 /* incompressible with this method.
1109                  * we need to rewind both word and bit position. */
1110                 c->bit_offset -= plain_bits;
1111                 bm_xfer_ctx_bit_to_word_offset(c);
1112                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1113                 return 0;
1114         }
1115
1116         /* RLE + VLI was able to compress it just fine.
1117          * update c->word_offset. */
1118         bm_xfer_ctx_bit_to_word_offset(c);
1119
1120         /* store pad_bits */
1121         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1122
1123         return len;
1124 }
1125
1126 /**
1127  * send_bitmap_rle_or_plain
1128  *
1129  * Return 0 when done, 1 when another iteration is needed, and a negative error
1130  * code upon failure.
1131  */
1132 static int
1133 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
1134                          struct p_header *h, struct bm_xfer_ctx *c)
1135 {
1136         struct p_compressed_bm *p = (void*)h;
1137         unsigned long num_words;
1138         int len, err;
1139
1140         len = fill_bitmap_rle_bits(mdev, p, c);
1141
1142         if (len < 0)
1143                 return -EIO;
1144
1145         if (len) {
1146                 DCBP_set_code(p, RLE_VLI_Bits);
1147                 err = _drbd_send_cmd(mdev, mdev->tconn->data.socket,
1148                                      P_COMPRESSED_BITMAP, h,
1149                                      sizeof(*p) + len, 0);
1150
1151                 c->packets[0]++;
1152                 c->bytes[0] += sizeof(*p) + len;
1153
1154                 if (c->bit_offset >= c->bm_bits)
1155                         len = 0; /* DONE */
1156         } else {
1157                 /* was not compressible.
1158                  * send a buffer full of plain text bits instead. */
1159                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
1160                 len = num_words * sizeof(long);
1161                 if (len)
1162                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
1163                 err = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BITMAP,
1164                                      h, sizeof(struct p_header80) + len, 0);
1165                 c->word_offset += num_words;
1166                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1167
1168                 c->packets[1]++;
1169                 c->bytes[1] += sizeof(struct p_header80) + len;
1170
1171                 if (c->bit_offset > c->bm_bits)
1172                         c->bit_offset = c->bm_bits;
1173         }
1174         if (!err) {
1175                 if (len == 0) {
1176                         INFO_bm_xfer_stats(mdev, "send", c);
1177                         return 0;
1178                 } else
1179                         return 1;
1180         }
1181         return -EIO;
1182 }
1183
1184 /* See the comment at receive_bitmap() */
1185 static int _drbd_send_bitmap(struct drbd_conf *mdev)
1186 {
1187         struct bm_xfer_ctx c;
1188         struct p_header *p;
1189         int err;
1190
1191         if (!expect(mdev->bitmap))
1192                 return false;
1193
1194         /* maybe we should use some per thread scratch page,
1195          * and allocate that during initial device creation? */
1196         p = (struct p_header *) __get_free_page(GFP_NOIO);
1197         if (!p) {
1198                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
1199                 return false;
1200         }
1201
1202         if (get_ldev(mdev)) {
1203                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1204                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1205                         drbd_bm_set_all(mdev);
1206                         if (drbd_bm_write(mdev)) {
1207                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1208                                  * but otherwise process as per normal - need to tell other
1209                                  * side that a full resync is required! */
1210                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
1211                         } else {
1212                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1213                                 drbd_md_sync(mdev);
1214                         }
1215                 }
1216                 put_ldev(mdev);
1217         }
1218
1219         c = (struct bm_xfer_ctx) {
1220                 .bm_bits = drbd_bm_bits(mdev),
1221                 .bm_words = drbd_bm_words(mdev),
1222         };
1223
1224         do {
1225                 err = send_bitmap_rle_or_plain(mdev, p, &c);
1226         } while (err > 0);
1227
1228         free_page((unsigned long) p);
1229         return err == 0;
1230 }
1231
1232 int drbd_send_bitmap(struct drbd_conf *mdev)
1233 {
1234         int err;
1235
1236         if (drbd_get_data_sock(mdev->tconn))
1237                 return -1;
1238         err = !_drbd_send_bitmap(mdev);
1239         drbd_put_data_sock(mdev->tconn);
1240         return err;
1241 }
1242 void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
1243 {
1244         struct p_barrier_ack p;
1245
1246         p.barrier  = barrier_nr;
1247         p.set_size = cpu_to_be32(set_size);
1248
1249         if (mdev->state.conn >= C_CONNECTED)
1250                 drbd_send_cmd(mdev, &mdev->tconn->meta, P_BARRIER_ACK, &p.head, sizeof(p));
1251 }
1252
1253 /**
1254  * _drbd_send_ack() - Sends an ack packet
1255  * @mdev:       DRBD device.
1256  * @cmd:        Packet command code.
1257  * @sector:     sector, needs to be in big endian byte order
1258  * @blksize:    size in byte, needs to be in big endian byte order
1259  * @block_id:   Id, big endian byte order
1260  */
1261 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1262                           u64 sector, u32 blksize, u64 block_id)
1263 {
1264         struct p_block_ack p;
1265
1266         p.sector   = sector;
1267         p.block_id = block_id;
1268         p.blksize  = blksize;
1269         p.seq_num  = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1270
1271         if (!mdev->tconn->meta.socket || mdev->state.conn < C_CONNECTED)
1272                 return -EIO;
1273         return drbd_send_cmd(mdev, &mdev->tconn->meta, cmd, &p.head, sizeof(p));
1274 }
1275
1276 /* dp->sector and dp->block_id already/still in network byte order,
1277  * data_size is payload size according to dp->head,
1278  * and may need to be corrected for digest size. */
1279 void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1280                       struct p_data *dp, int data_size)
1281 {
1282         data_size -= (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
1283                 crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
1284         _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1285                        dp->block_id);
1286 }
1287
1288 void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1289                       struct p_block_req *rp)
1290 {
1291         _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
1292 }
1293
1294 /**
1295  * drbd_send_ack() - Sends an ack packet
1296  * @mdev:       DRBD device
1297  * @cmd:        packet command code
1298  * @peer_req:   peer request
1299  */
1300 int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1301                   struct drbd_peer_request *peer_req)
1302 {
1303         return _drbd_send_ack(mdev, cmd,
1304                               cpu_to_be64(peer_req->i.sector),
1305                               cpu_to_be32(peer_req->i.size),
1306                               peer_req->block_id);
1307 }
1308
1309 /* This function misuses the block_id field to signal if the blocks
1310  * are is sync or not. */
1311 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
1312                      sector_t sector, int blksize, u64 block_id)
1313 {
1314         return _drbd_send_ack(mdev, cmd,
1315                               cpu_to_be64(sector),
1316                               cpu_to_be32(blksize),
1317                               cpu_to_be64(block_id));
1318 }
1319
1320 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1321                        sector_t sector, int size, u64 block_id)
1322 {
1323         struct p_block_req p;
1324
1325         p.sector   = cpu_to_be64(sector);
1326         p.block_id = block_id;
1327         p.blksize  = cpu_to_be32(size);
1328
1329         return drbd_send_cmd(mdev, &mdev->tconn->data, cmd, &p.head, sizeof(p));
1330 }
1331
1332 int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1333                             void *digest, int digest_size, enum drbd_packet cmd)
1334 {
1335         int err;
1336         struct p_block_req p;
1337
1338         prepare_header(mdev, &p.head, cmd, sizeof(p) - sizeof(struct p_header) + digest_size);
1339         p.sector   = cpu_to_be64(sector);
1340         p.block_id = ID_SYNCER /* unused */;
1341         p.blksize  = cpu_to_be32(size);
1342
1343         mutex_lock(&mdev->tconn->data.mutex);
1344         err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, &p, sizeof(p), 0);
1345         if (!err)
1346                 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, digest, digest_size, 0);
1347         mutex_unlock(&mdev->tconn->data.mutex);
1348         return err;
1349 }
1350
1351 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1352 {
1353         struct p_block_req p;
1354
1355         p.sector   = cpu_to_be64(sector);
1356         p.block_id = ID_SYNCER /* unused */;
1357         p.blksize  = cpu_to_be32(size);
1358
1359         return drbd_send_cmd(mdev, &mdev->tconn->data, P_OV_REQUEST, &p.head, sizeof(p));
1360 }
1361
1362 /* called on sndtimeo
1363  * returns false if we should retry,
1364  * true if we think connection is dead
1365  */
1366 static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
1367 {
1368         int drop_it;
1369         /* long elapsed = (long)(jiffies - mdev->last_received); */
1370
1371         drop_it =   tconn->meta.socket == sock
1372                 || !tconn->asender.task
1373                 || get_t_state(&tconn->asender) != RUNNING
1374                 || tconn->cstate < C_WF_REPORT_PARAMS;
1375
1376         if (drop_it)
1377                 return true;
1378
1379         drop_it = !--tconn->ko_count;
1380         if (!drop_it) {
1381                 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1382                          current->comm, current->pid, tconn->ko_count);
1383                 request_ping(tconn);
1384         }
1385
1386         return drop_it; /* && (mdev->state == R_PRIMARY) */;
1387 }
1388
1389 static void drbd_update_congested(struct drbd_tconn *tconn)
1390 {
1391         struct sock *sk = tconn->data.socket->sk;
1392         if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1393                 set_bit(NET_CONGESTED, &tconn->flags);
1394 }
1395
1396 /* The idea of sendpage seems to be to put some kind of reference
1397  * to the page into the skb, and to hand it over to the NIC. In
1398  * this process get_page() gets called.
1399  *
1400  * As soon as the page was really sent over the network put_page()
1401  * gets called by some part of the network layer. [ NIC driver? ]
1402  *
1403  * [ get_page() / put_page() increment/decrement the count. If count
1404  *   reaches 0 the page will be freed. ]
1405  *
1406  * This works nicely with pages from FSs.
1407  * But this means that in protocol A we might signal IO completion too early!
1408  *
1409  * In order not to corrupt data during a resync we must make sure
1410  * that we do not reuse our own buffer pages (EEs) to early, therefore
1411  * we have the net_ee list.
1412  *
1413  * XFS seems to have problems, still, it submits pages with page_count == 0!
1414  * As a workaround, we disable sendpage on pages
1415  * with page_count == 0 or PageSlab.
1416  */
1417 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
1418                               int offset, size_t size, unsigned msg_flags)
1419 {
1420         struct socket *socket;
1421         void *addr;
1422         int err;
1423
1424         socket = mdev->tconn->data.socket;
1425         addr = kmap(page) + offset;
1426         err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
1427         kunmap(page);
1428         if (!err)
1429                 mdev->send_cnt += size >> 9;
1430         return err;
1431 }
1432
1433 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
1434                     int offset, size_t size, unsigned msg_flags)
1435 {
1436         struct socket *socket = mdev->tconn->data.socket;
1437         mm_segment_t oldfs = get_fs();
1438         int len = size;
1439         int err = -EIO;
1440
1441         /* e.g. XFS meta- & log-data is in slab pages, which have a
1442          * page_count of 0 and/or have PageSlab() set.
1443          * we cannot use send_page for those, as that does get_page();
1444          * put_page(); and would cause either a VM_BUG directly, or
1445          * __page_cache_release a page that would actually still be referenced
1446          * by someone, leading to some obscure delayed Oops somewhere else. */
1447         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1448                 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
1449
1450         msg_flags |= MSG_NOSIGNAL;
1451         drbd_update_congested(mdev->tconn);
1452         set_fs(KERNEL_DS);
1453         do {
1454                 int sent;
1455
1456                 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1457                 if (sent <= 0) {
1458                         if (sent == -EAGAIN) {
1459                                 if (we_should_drop_the_connection(mdev->tconn, socket))
1460                                         break;
1461                                 continue;
1462                         }
1463                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1464                              __func__, (int)size, len, sent);
1465                         if (sent < 0)
1466                                 err = sent;
1467                         break;
1468                 }
1469                 len    -= sent;
1470                 offset += sent;
1471         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1472         set_fs(oldfs);
1473         clear_bit(NET_CONGESTED, &mdev->tconn->flags);
1474
1475         if (len == 0) {
1476                 err = 0;
1477                 mdev->send_cnt += size >> 9;
1478         }
1479         return err;
1480 }
1481
1482 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1483 {
1484         struct bio_vec *bvec;
1485         int i;
1486         /* hint all but last page with MSG_MORE */
1487         __bio_for_each_segment(bvec, bio, i, 0) {
1488                 int err;
1489
1490                 err = _drbd_no_send_page(mdev, bvec->bv_page,
1491                                          bvec->bv_offset, bvec->bv_len,
1492                                          i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1493                 if (err)
1494                         return err;
1495         }
1496         return 0;
1497 }
1498
1499 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1500 {
1501         struct bio_vec *bvec;
1502         int i;
1503         /* hint all but last page with MSG_MORE */
1504         __bio_for_each_segment(bvec, bio, i, 0) {
1505                 int err;
1506
1507                 err = _drbd_send_page(mdev, bvec->bv_page,
1508                                       bvec->bv_offset, bvec->bv_len,
1509                                       i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1510                 if (err)
1511                         return err;
1512         }
1513         return 0;
1514 }
1515
1516 static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1517                             struct drbd_peer_request *peer_req)
1518 {
1519         struct page *page = peer_req->pages;
1520         unsigned len = peer_req->i.size;
1521         int err;
1522
1523         /* hint all but last page with MSG_MORE */
1524         page_chain_for_each(page) {
1525                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1526
1527                 err = _drbd_send_page(mdev, page, 0, l,
1528                                       page_chain_next(page) ? MSG_MORE : 0);
1529                 if (err)
1530                         return err;
1531                 len -= l;
1532         }
1533         return 0;
1534 }
1535
1536 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1537 {
1538         if (mdev->tconn->agreed_pro_version >= 95)
1539                 return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1540                         (bi_rw & REQ_FUA ? DP_FUA : 0) |
1541                         (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1542                         (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1543         else
1544                 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
1545 }
1546
1547 /* Used to send write requests
1548  * R_PRIMARY -> Peer    (P_DATA)
1549  */
1550 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1551 {
1552         int err;
1553         struct p_data p;
1554         unsigned int dp_flags = 0;
1555         void *dgb;
1556         int dgs;
1557
1558         err = drbd_get_data_sock(mdev->tconn);
1559         if (err)
1560                 return err;
1561
1562         dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1563                 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
1564
1565         prepare_header(mdev, &p.head, P_DATA, sizeof(p) - sizeof(struct p_header) + dgs + req->i.size);
1566         p.sector   = cpu_to_be64(req->i.sector);
1567         p.block_id = (unsigned long)req;
1568         p.seq_num  = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
1569
1570         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
1571
1572         if (mdev->state.conn >= C_SYNC_SOURCE &&
1573             mdev->state.conn <= C_PAUSED_SYNC_T)
1574                 dp_flags |= DP_MAY_SET_IN_SYNC;
1575
1576         p.dp_flags = cpu_to_be32(dp_flags);
1577         set_bit(UNPLUG_REMOTE, &mdev->flags);
1578         err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, &p,
1579                             sizeof(p), dgs ? MSG_MORE : 0);
1580         if (!err && dgs) {
1581                 dgb = mdev->tconn->int_dig_out;
1582                 drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, dgb);
1583                 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, dgb, dgs, 0);
1584         }
1585         if (!err) {
1586                 /* For protocol A, we have to memcpy the payload into
1587                  * socket buffers, as we may complete right away
1588                  * as soon as we handed it over to tcp, at which point the data
1589                  * pages may become invalid.
1590                  *
1591                  * For data-integrity enabled, we copy it as well, so we can be
1592                  * sure that even if the bio pages may still be modified, it
1593                  * won't change the data on the wire, thus if the digest checks
1594                  * out ok after sending on this side, but does not fit on the
1595                  * receiving side, we sure have detected corruption elsewhere.
1596                  */
1597                 if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || dgs)
1598                         err = _drbd_send_bio(mdev, req->master_bio);
1599                 else
1600                         err = _drbd_send_zc_bio(mdev, req->master_bio);
1601
1602                 /* double check digest, sometimes buffers have been modified in flight. */
1603                 if (dgs > 0 && dgs <= 64) {
1604                         /* 64 byte, 512 bit, is the largest digest size
1605                          * currently supported in kernel crypto. */
1606                         unsigned char digest[64];
1607                         drbd_csum_bio(mdev, mdev->tconn->integrity_w_tfm, req->master_bio, digest);
1608                         if (memcmp(mdev->tconn->int_dig_out, digest, dgs)) {
1609                                 dev_warn(DEV,
1610                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1611                                         (unsigned long long)req->i.sector, req->i.size);
1612                         }
1613                 } /* else if (dgs > 64) {
1614                      ... Be noisy about digest too large ...
1615                 } */
1616         }
1617
1618         drbd_put_data_sock(mdev->tconn);
1619
1620         return err;
1621 }
1622
1623 /* answer packet, used to send data back for read requests:
1624  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
1625  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
1626  */
1627 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
1628                     struct drbd_peer_request *peer_req)
1629 {
1630         int err;
1631         struct p_data p;
1632         void *dgb;
1633         int dgs;
1634
1635         dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_w_tfm) ?
1636                 crypto_hash_digestsize(mdev->tconn->integrity_w_tfm) : 0;
1637
1638         prepare_header(mdev, &p.head, cmd, sizeof(p) -
1639                                            sizeof(struct p_header80) +
1640                                            dgs + peer_req->i.size);
1641         p.sector   = cpu_to_be64(peer_req->i.sector);
1642         p.block_id = peer_req->block_id;
1643         p.seq_num = 0;  /* unused */
1644
1645         /* Only called by our kernel thread.
1646          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
1647          * in response to admin command or module unload.
1648          */
1649         err = drbd_get_data_sock(mdev->tconn);
1650         if (err)
1651                 return err;
1652         err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, &p,
1653                             sizeof(p), dgs ? MSG_MORE : 0);
1654         if (!err && dgs) {
1655                 dgb = mdev->tconn->int_dig_out;
1656                 drbd_csum_ee(mdev, mdev->tconn->integrity_w_tfm, peer_req, dgb);
1657                 err = drbd_send_all(mdev->tconn, mdev->tconn->data.socket, dgb,
1658                                     dgs, 0);
1659         }
1660         if (!err)
1661                 err = _drbd_send_zc_ee(mdev, peer_req);
1662         drbd_put_data_sock(mdev->tconn);
1663
1664         return err;
1665 }
1666
1667 int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
1668 {
1669         struct p_block_desc p;
1670
1671         p.sector  = cpu_to_be64(req->i.sector);
1672         p.blksize = cpu_to_be32(req->i.size);
1673
1674         return drbd_send_cmd(mdev, &mdev->tconn->data, P_OUT_OF_SYNC, &p.head, sizeof(p));
1675 }
1676
1677 /*
1678   drbd_send distinguishes two cases:
1679
1680   Packets sent via the data socket "sock"
1681   and packets sent via the meta data socket "msock"
1682
1683                     sock                      msock
1684   -----------------+-------------------------+------------------------------
1685   timeout           conf.timeout / 2          conf.timeout / 2
1686   timeout action    send a ping via msock     Abort communication
1687                                               and close all sockets
1688 */
1689
1690 /*
1691  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1692  */
1693 int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1694               void *buf, size_t size, unsigned msg_flags)
1695 {
1696         struct kvec iov;
1697         struct msghdr msg;
1698         int rv, sent = 0;
1699
1700         if (!sock)
1701                 return -EBADR;
1702
1703         /* THINK  if (signal_pending) return ... ? */
1704
1705         iov.iov_base = buf;
1706         iov.iov_len  = size;
1707
1708         msg.msg_name       = NULL;
1709         msg.msg_namelen    = 0;
1710         msg.msg_control    = NULL;
1711         msg.msg_controllen = 0;
1712         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
1713
1714         if (sock == tconn->data.socket) {
1715                 tconn->ko_count = tconn->net_conf->ko_count;
1716                 drbd_update_congested(tconn);
1717         }
1718         do {
1719                 /* STRANGE
1720                  * tcp_sendmsg does _not_ use its size parameter at all ?
1721                  *
1722                  * -EAGAIN on timeout, -EINTR on signal.
1723                  */
1724 /* THINK
1725  * do we need to block DRBD_SIG if sock == &meta.socket ??
1726  * otherwise wake_asender() might interrupt some send_*Ack !
1727  */
1728                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1729                 if (rv == -EAGAIN) {
1730                         if (we_should_drop_the_connection(tconn, sock))
1731                                 break;
1732                         else
1733                                 continue;
1734                 }
1735                 if (rv == -EINTR) {
1736                         flush_signals(current);
1737                         rv = 0;
1738                 }
1739                 if (rv < 0)
1740                         break;
1741                 sent += rv;
1742                 iov.iov_base += rv;
1743                 iov.iov_len  -= rv;
1744         } while (sent < size);
1745
1746         if (sock == tconn->data.socket)
1747                 clear_bit(NET_CONGESTED, &tconn->flags);
1748
1749         if (rv <= 0) {
1750                 if (rv != -EAGAIN) {
1751                         conn_err(tconn, "%s_sendmsg returned %d\n",
1752                                  sock == tconn->meta.socket ? "msock" : "sock",
1753                                  rv);
1754                         conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
1755                 } else
1756                         conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
1757         }
1758
1759         return sent;
1760 }
1761
1762 /**
1763  * drbd_send_all  -  Send an entire buffer
1764  *
1765  * Returns 0 upon success and a negative error value otherwise.
1766  */
1767 int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1768                   size_t size, unsigned msg_flags)
1769 {
1770         int err;
1771
1772         err = drbd_send(tconn, sock, buffer, size, msg_flags);
1773         if (err < 0)
1774                 return err;
1775         if (err != size)
1776                 return -EIO;
1777         return 0;
1778 }
1779
1780 static int drbd_open(struct block_device *bdev, fmode_t mode)
1781 {
1782         struct drbd_conf *mdev = bdev->bd_disk->private_data;
1783         unsigned long flags;
1784         int rv = 0;
1785
1786         mutex_lock(&drbd_main_mutex);
1787         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1788         /* to have a stable mdev->state.role
1789          * and no race with updating open_cnt */
1790
1791         if (mdev->state.role != R_PRIMARY) {
1792                 if (mode & FMODE_WRITE)
1793                         rv = -EROFS;
1794                 else if (!allow_oos)
1795                         rv = -EMEDIUMTYPE;
1796         }
1797
1798         if (!rv)
1799                 mdev->open_cnt++;
1800         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1801         mutex_unlock(&drbd_main_mutex);
1802
1803         return rv;
1804 }
1805
1806 static int drbd_release(struct gendisk *gd, fmode_t mode)
1807 {
1808         struct drbd_conf *mdev = gd->private_data;
1809         mutex_lock(&drbd_main_mutex);
1810         mdev->open_cnt--;
1811         mutex_unlock(&drbd_main_mutex);
1812         return 0;
1813 }
1814
1815 static void drbd_set_defaults(struct drbd_conf *mdev)
1816 {
1817         /* Beware! The actual layout differs
1818          * between big endian and little endian */
1819         mdev->state = (union drbd_state) {
1820                 { .role = R_SECONDARY,
1821                   .peer = R_UNKNOWN,
1822                   .conn = C_STANDALONE,
1823                   .disk = D_DISKLESS,
1824                   .pdsk = D_UNKNOWN,
1825                   .susp = 0,
1826                   .susp_nod = 0,
1827                   .susp_fen = 0
1828                 } };
1829 }
1830
1831 void drbd_init_set_defaults(struct drbd_conf *mdev)
1832 {
1833         /* the memset(,0,) did most of this.
1834          * note: only assignments, no allocation in here */
1835
1836         drbd_set_defaults(mdev);
1837
1838         atomic_set(&mdev->ap_bio_cnt, 0);
1839         atomic_set(&mdev->ap_pending_cnt, 0);
1840         atomic_set(&mdev->rs_pending_cnt, 0);
1841         atomic_set(&mdev->unacked_cnt, 0);
1842         atomic_set(&mdev->local_cnt, 0);
1843         atomic_set(&mdev->pp_in_use_by_net, 0);
1844         atomic_set(&mdev->rs_sect_in, 0);
1845         atomic_set(&mdev->rs_sect_ev, 0);
1846         atomic_set(&mdev->ap_in_flight, 0);
1847
1848         mutex_init(&mdev->md_io_mutex);
1849         mutex_init(&mdev->own_state_mutex);
1850         mdev->state_mutex = &mdev->own_state_mutex;
1851
1852         spin_lock_init(&mdev->al_lock);
1853         spin_lock_init(&mdev->peer_seq_lock);
1854         spin_lock_init(&mdev->epoch_lock);
1855
1856         INIT_LIST_HEAD(&mdev->active_ee);
1857         INIT_LIST_HEAD(&mdev->sync_ee);
1858         INIT_LIST_HEAD(&mdev->done_ee);
1859         INIT_LIST_HEAD(&mdev->read_ee);
1860         INIT_LIST_HEAD(&mdev->net_ee);
1861         INIT_LIST_HEAD(&mdev->resync_reads);
1862         INIT_LIST_HEAD(&mdev->resync_work.list);
1863         INIT_LIST_HEAD(&mdev->unplug_work.list);
1864         INIT_LIST_HEAD(&mdev->go_diskless.list);
1865         INIT_LIST_HEAD(&mdev->md_sync_work.list);
1866         INIT_LIST_HEAD(&mdev->start_resync_work.list);
1867         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
1868
1869         mdev->resync_work.cb  = w_resync_timer;
1870         mdev->unplug_work.cb  = w_send_write_hint;
1871         mdev->go_diskless.cb  = w_go_diskless;
1872         mdev->md_sync_work.cb = w_md_sync;
1873         mdev->bm_io_work.w.cb = w_bitmap_io;
1874         mdev->start_resync_work.cb = w_start_resync;
1875
1876         mdev->resync_work.mdev  = mdev;
1877         mdev->unplug_work.mdev  = mdev;
1878         mdev->go_diskless.mdev  = mdev;
1879         mdev->md_sync_work.mdev = mdev;
1880         mdev->bm_io_work.w.mdev = mdev;
1881         mdev->start_resync_work.mdev = mdev;
1882
1883         init_timer(&mdev->resync_timer);
1884         init_timer(&mdev->md_sync_timer);
1885         init_timer(&mdev->start_resync_timer);
1886         init_timer(&mdev->request_timer);
1887         mdev->resync_timer.function = resync_timer_fn;
1888         mdev->resync_timer.data = (unsigned long) mdev;
1889         mdev->md_sync_timer.function = md_sync_timer_fn;
1890         mdev->md_sync_timer.data = (unsigned long) mdev;
1891         mdev->start_resync_timer.function = start_resync_timer_fn;
1892         mdev->start_resync_timer.data = (unsigned long) mdev;
1893         mdev->request_timer.function = request_timer_fn;
1894         mdev->request_timer.data = (unsigned long) mdev;
1895
1896         init_waitqueue_head(&mdev->misc_wait);
1897         init_waitqueue_head(&mdev->state_wait);
1898         init_waitqueue_head(&mdev->ee_wait);
1899         init_waitqueue_head(&mdev->al_wait);
1900         init_waitqueue_head(&mdev->seq_wait);
1901
1902         /* mdev->tconn->agreed_pro_version gets initialized in drbd_connect() */
1903         mdev->write_ordering = WO_bdev_flush;
1904         mdev->resync_wenr = LC_FREE;
1905         mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1906         mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1907 }
1908
1909 void drbd_mdev_cleanup(struct drbd_conf *mdev)
1910 {
1911         int i;
1912         if (mdev->tconn->receiver.t_state != NONE)
1913                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1914                                 mdev->tconn->receiver.t_state);
1915
1916         /* no need to lock it, I'm the only thread alive */
1917         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
1918                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
1919         mdev->al_writ_cnt  =
1920         mdev->bm_writ_cnt  =
1921         mdev->read_cnt     =
1922         mdev->recv_cnt     =
1923         mdev->send_cnt     =
1924         mdev->writ_cnt     =
1925         mdev->p_size       =
1926         mdev->rs_start     =
1927         mdev->rs_total     =
1928         mdev->rs_failed    = 0;
1929         mdev->rs_last_events = 0;
1930         mdev->rs_last_sect_ev = 0;
1931         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1932                 mdev->rs_mark_left[i] = 0;
1933                 mdev->rs_mark_time[i] = 0;
1934         }
1935         D_ASSERT(mdev->tconn->net_conf == NULL);
1936
1937         drbd_set_my_capacity(mdev, 0);
1938         if (mdev->bitmap) {
1939                 /* maybe never allocated. */
1940                 drbd_bm_resize(mdev, 0, 1);
1941                 drbd_bm_cleanup(mdev);
1942         }
1943
1944         drbd_free_resources(mdev);
1945         clear_bit(AL_SUSPENDED, &mdev->flags);
1946
1947         /*
1948          * currently we drbd_init_ee only on module load, so
1949          * we may do drbd_release_ee only on module unload!
1950          */
1951         D_ASSERT(list_empty(&mdev->active_ee));
1952         D_ASSERT(list_empty(&mdev->sync_ee));
1953         D_ASSERT(list_empty(&mdev->done_ee));
1954         D_ASSERT(list_empty(&mdev->read_ee));
1955         D_ASSERT(list_empty(&mdev->net_ee));
1956         D_ASSERT(list_empty(&mdev->resync_reads));
1957         D_ASSERT(list_empty(&mdev->tconn->data.work.q));
1958         D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
1959         D_ASSERT(list_empty(&mdev->resync_work.list));
1960         D_ASSERT(list_empty(&mdev->unplug_work.list));
1961         D_ASSERT(list_empty(&mdev->go_diskless.list));
1962
1963         drbd_set_defaults(mdev);
1964 }
1965
1966
1967 static void drbd_destroy_mempools(void)
1968 {
1969         struct page *page;
1970
1971         while (drbd_pp_pool) {
1972                 page = drbd_pp_pool;
1973                 drbd_pp_pool = (struct page *)page_private(page);
1974                 __free_page(page);
1975                 drbd_pp_vacant--;
1976         }
1977
1978         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
1979
1980         if (drbd_md_io_bio_set)
1981                 bioset_free(drbd_md_io_bio_set);
1982         if (drbd_md_io_page_pool)
1983                 mempool_destroy(drbd_md_io_page_pool);
1984         if (drbd_ee_mempool)
1985                 mempool_destroy(drbd_ee_mempool);
1986         if (drbd_request_mempool)
1987                 mempool_destroy(drbd_request_mempool);
1988         if (drbd_ee_cache)
1989                 kmem_cache_destroy(drbd_ee_cache);
1990         if (drbd_request_cache)
1991                 kmem_cache_destroy(drbd_request_cache);
1992         if (drbd_bm_ext_cache)
1993                 kmem_cache_destroy(drbd_bm_ext_cache);
1994         if (drbd_al_ext_cache)
1995                 kmem_cache_destroy(drbd_al_ext_cache);
1996
1997         drbd_md_io_bio_set   = NULL;
1998         drbd_md_io_page_pool = NULL;
1999         drbd_ee_mempool      = NULL;
2000         drbd_request_mempool = NULL;
2001         drbd_ee_cache        = NULL;
2002         drbd_request_cache   = NULL;
2003         drbd_bm_ext_cache    = NULL;
2004         drbd_al_ext_cache    = NULL;
2005
2006         return;
2007 }
2008
2009 static int drbd_create_mempools(void)
2010 {
2011         struct page *page;
2012         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
2013         int i;
2014
2015         /* prepare our caches and mempools */
2016         drbd_request_mempool = NULL;
2017         drbd_ee_cache        = NULL;
2018         drbd_request_cache   = NULL;
2019         drbd_bm_ext_cache    = NULL;
2020         drbd_al_ext_cache    = NULL;
2021         drbd_pp_pool         = NULL;
2022         drbd_md_io_page_pool = NULL;
2023         drbd_md_io_bio_set   = NULL;
2024
2025         /* caches */
2026         drbd_request_cache = kmem_cache_create(
2027                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2028         if (drbd_request_cache == NULL)
2029                 goto Enomem;
2030
2031         drbd_ee_cache = kmem_cache_create(
2032                 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2033         if (drbd_ee_cache == NULL)
2034                 goto Enomem;
2035
2036         drbd_bm_ext_cache = kmem_cache_create(
2037                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2038         if (drbd_bm_ext_cache == NULL)
2039                 goto Enomem;
2040
2041         drbd_al_ext_cache = kmem_cache_create(
2042                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2043         if (drbd_al_ext_cache == NULL)
2044                 goto Enomem;
2045
2046         /* mempools */
2047         drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2048         if (drbd_md_io_bio_set == NULL)
2049                 goto Enomem;
2050
2051         drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2052         if (drbd_md_io_page_pool == NULL)
2053                 goto Enomem;
2054
2055         drbd_request_mempool = mempool_create(number,
2056                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2057         if (drbd_request_mempool == NULL)
2058                 goto Enomem;
2059
2060         drbd_ee_mempool = mempool_create(number,
2061                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2062         if (drbd_ee_mempool == NULL)
2063                 goto Enomem;
2064
2065         /* drbd's page pool */
2066         spin_lock_init(&drbd_pp_lock);
2067
2068         for (i = 0; i < number; i++) {
2069                 page = alloc_page(GFP_HIGHUSER);
2070                 if (!page)
2071                         goto Enomem;
2072                 set_page_private(page, (unsigned long)drbd_pp_pool);
2073                 drbd_pp_pool = page;
2074         }
2075         drbd_pp_vacant = number;
2076
2077         return 0;
2078
2079 Enomem:
2080         drbd_destroy_mempools(); /* in case we allocated some */
2081         return -ENOMEM;
2082 }
2083
2084 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2085         void *unused)
2086 {
2087         /* just so we have it.  you never know what interesting things we
2088          * might want to do here some day...
2089          */
2090
2091         return NOTIFY_DONE;
2092 }
2093
2094 static struct notifier_block drbd_notifier = {
2095         .notifier_call = drbd_notify_sys,
2096 };
2097
2098 static void drbd_release_ee_lists(struct drbd_conf *mdev)
2099 {
2100         int rr;
2101
2102         rr = drbd_release_ee(mdev, &mdev->active_ee);
2103         if (rr)
2104                 dev_err(DEV, "%d EEs in active list found!\n", rr);
2105
2106         rr = drbd_release_ee(mdev, &mdev->sync_ee);
2107         if (rr)
2108                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2109
2110         rr = drbd_release_ee(mdev, &mdev->read_ee);
2111         if (rr)
2112                 dev_err(DEV, "%d EEs in read list found!\n", rr);
2113
2114         rr = drbd_release_ee(mdev, &mdev->done_ee);
2115         if (rr)
2116                 dev_err(DEV, "%d EEs in done list found!\n", rr);
2117
2118         rr = drbd_release_ee(mdev, &mdev->net_ee);
2119         if (rr)
2120                 dev_err(DEV, "%d EEs in net list found!\n", rr);
2121 }
2122
2123 /* caution. no locking. */
2124 void drbd_delete_device(unsigned int minor)
2125 {
2126         struct drbd_conf *mdev = minor_to_mdev(minor);
2127
2128         if (!mdev)
2129                 return;
2130
2131         idr_remove(&mdev->tconn->volumes, mdev->vnr);
2132         idr_remove(&minors, minor);
2133         synchronize_rcu();
2134
2135         /* paranoia asserts */
2136         D_ASSERT(mdev->open_cnt == 0);
2137         D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2138         /* end paranoia asserts */
2139
2140         del_gendisk(mdev->vdisk);
2141
2142         /* cleanup stuff that may have been allocated during
2143          * device (re-)configuration or state changes */
2144
2145         if (mdev->this_bdev)
2146                 bdput(mdev->this_bdev);
2147
2148         drbd_free_resources(mdev);
2149
2150         drbd_release_ee_lists(mdev);
2151
2152         lc_destroy(mdev->act_log);
2153         lc_destroy(mdev->resync);
2154
2155         kfree(mdev->p_uuid);
2156         /* mdev->p_uuid = NULL; */
2157
2158         /* cleanup the rest that has been
2159          * allocated from drbd_new_device
2160          * and actually free the mdev itself */
2161         drbd_free_mdev(mdev);
2162 }
2163
2164 static void drbd_cleanup(void)
2165 {
2166         unsigned int i;
2167         struct drbd_conf *mdev;
2168
2169         unregister_reboot_notifier(&drbd_notifier);
2170
2171         /* first remove proc,
2172          * drbdsetup uses it's presence to detect
2173          * whether DRBD is loaded.
2174          * If we would get stuck in proc removal,
2175          * but have netlink already deregistered,
2176          * some drbdsetup commands may wait forever
2177          * for an answer.
2178          */
2179         if (drbd_proc)
2180                 remove_proc_entry("drbd", NULL);
2181
2182         drbd_genl_unregister();
2183
2184         idr_for_each_entry(&minors, mdev, i)
2185                 drbd_delete_device(i);
2186         drbd_destroy_mempools();
2187         unregister_blkdev(DRBD_MAJOR, "drbd");
2188
2189         idr_destroy(&minors);
2190
2191         printk(KERN_INFO "drbd: module cleanup done.\n");
2192 }
2193
2194 /**
2195  * drbd_congested() - Callback for pdflush
2196  * @congested_data:     User data
2197  * @bdi_bits:           Bits pdflush is currently interested in
2198  *
2199  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2200  */
2201 static int drbd_congested(void *congested_data, int bdi_bits)
2202 {
2203         struct drbd_conf *mdev = congested_data;
2204         struct request_queue *q;
2205         char reason = '-';
2206         int r = 0;
2207
2208         if (!may_inc_ap_bio(mdev)) {
2209                 /* DRBD has frozen IO */
2210                 r = bdi_bits;
2211                 reason = 'd';
2212                 goto out;
2213         }
2214
2215         if (get_ldev(mdev)) {
2216                 q = bdev_get_queue(mdev->ldev->backing_bdev);
2217                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2218                 put_ldev(mdev);
2219                 if (r)
2220                         reason = 'b';
2221         }
2222
2223         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
2224                 r |= (1 << BDI_async_congested);
2225                 reason = reason == 'b' ? 'a' : 'n';
2226         }
2227
2228 out:
2229         mdev->congestion_reason = reason;
2230         return r;
2231 }
2232
2233 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2234 {
2235         sema_init(&wq->s, 0);
2236         spin_lock_init(&wq->q_lock);
2237         INIT_LIST_HEAD(&wq->q);
2238 }
2239
2240 struct drbd_tconn *conn_by_name(const char *name)
2241 {
2242         struct drbd_tconn *tconn;
2243
2244         if (!name || !name[0])
2245                 return NULL;
2246
2247         mutex_lock(&drbd_cfg_mutex);
2248         list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2249                 if (!strcmp(tconn->name, name))
2250                         goto found;
2251         }
2252         tconn = NULL;
2253 found:
2254         mutex_unlock(&drbd_cfg_mutex);
2255         return tconn;
2256 }
2257
2258 struct drbd_tconn *drbd_new_tconn(const char *name)
2259 {
2260         struct drbd_tconn *tconn;
2261
2262         tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2263         if (!tconn)
2264                 return NULL;
2265
2266         tconn->name = kstrdup(name, GFP_KERNEL);
2267         if (!tconn->name)
2268                 goto fail;
2269
2270         if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2271                 goto fail;
2272
2273         if (!tl_init(tconn))
2274                 goto fail;
2275
2276         tconn->cstate = C_STANDALONE;
2277         mutex_init(&tconn->cstate_mutex);
2278         spin_lock_init(&tconn->req_lock);
2279         atomic_set(&tconn->net_cnt, 0);
2280         init_waitqueue_head(&tconn->net_cnt_wait);
2281         init_waitqueue_head(&tconn->ping_wait);
2282         idr_init(&tconn->volumes);
2283
2284         drbd_init_workqueue(&tconn->data.work);
2285         mutex_init(&tconn->data.mutex);
2286
2287         drbd_init_workqueue(&tconn->meta.work);
2288         mutex_init(&tconn->meta.mutex);
2289
2290         drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2291         drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2292         drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2293
2294         tconn->res_opts = (struct res_opts) {
2295                 {}, 0, /* cpu_mask */
2296                 DRBD_ON_NO_DATA_DEF, /* on_no_data */
2297         };
2298
2299         mutex_lock(&drbd_cfg_mutex);
2300         list_add_tail(&tconn->all_tconn, &drbd_tconns);
2301         mutex_unlock(&drbd_cfg_mutex);
2302
2303         return tconn;
2304
2305 fail:
2306         tl_cleanup(tconn);
2307         free_cpumask_var(tconn->cpu_mask);
2308         kfree(tconn->name);
2309         kfree(tconn);
2310
2311         return NULL;
2312 }
2313
2314 void drbd_free_tconn(struct drbd_tconn *tconn)
2315 {
2316         list_del(&tconn->all_tconn);
2317         idr_destroy(&tconn->volumes);
2318
2319         free_cpumask_var(tconn->cpu_mask);
2320         kfree(tconn->name);
2321         kfree(tconn->int_dig_out);
2322         kfree(tconn->int_dig_in);
2323         kfree(tconn->int_dig_vv);
2324         kfree(tconn);
2325 }
2326
2327 enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
2328 {
2329         struct drbd_conf *mdev;
2330         struct gendisk *disk;
2331         struct request_queue *q;
2332         int vnr_got = vnr;
2333         int minor_got = minor;
2334         enum drbd_ret_code err = ERR_NOMEM;
2335
2336         mdev = minor_to_mdev(minor);
2337         if (mdev)
2338                 return ERR_MINOR_EXISTS;
2339
2340         /* GFP_KERNEL, we are outside of all write-out paths */
2341         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2342         if (!mdev)
2343                 return ERR_NOMEM;
2344
2345         mdev->tconn = tconn;
2346         mdev->minor = minor;
2347         mdev->vnr = vnr;
2348
2349         drbd_init_set_defaults(mdev);
2350
2351         q = blk_alloc_queue(GFP_KERNEL);
2352         if (!q)
2353                 goto out_no_q;
2354         mdev->rq_queue = q;
2355         q->queuedata   = mdev;
2356
2357         disk = alloc_disk(1);
2358         if (!disk)
2359                 goto out_no_disk;
2360         mdev->vdisk = disk;
2361
2362         set_disk_ro(disk, true);
2363
2364         disk->queue = q;
2365         disk->major = DRBD_MAJOR;
2366         disk->first_minor = minor;
2367         disk->fops = &drbd_ops;
2368         sprintf(disk->disk_name, "drbd%d", minor);
2369         disk->private_data = mdev;
2370
2371         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2372         /* we have no partitions. we contain only ourselves. */
2373         mdev->this_bdev->bd_contains = mdev->this_bdev;
2374
2375         q->backing_dev_info.congested_fn = drbd_congested;
2376         q->backing_dev_info.congested_data = mdev;
2377
2378         blk_queue_make_request(q, drbd_make_request);
2379         /* Setting the max_hw_sectors to an odd value of 8kibyte here
2380            This triggers a max_bio_size message upon first attach or connect */
2381         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2382         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2383         blk_queue_merge_bvec(q, drbd_merge_bvec);
2384         q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
2385
2386         mdev->md_io_page = alloc_page(GFP_KERNEL);
2387         if (!mdev->md_io_page)
2388                 goto out_no_io_page;
2389
2390         if (drbd_bm_init(mdev))
2391                 goto out_no_bitmap;
2392         mdev->read_requests = RB_ROOT;
2393         mdev->write_requests = RB_ROOT;
2394
2395         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2396         if (!mdev->current_epoch)
2397                 goto out_no_epoch;
2398
2399         INIT_LIST_HEAD(&mdev->current_epoch->list);
2400         mdev->epochs = 1;
2401
2402         if (!idr_pre_get(&minors, GFP_KERNEL))
2403                 goto out_no_minor_idr;
2404         if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2405                 goto out_no_minor_idr;
2406         if (minor_got != minor) {
2407                 err = ERR_MINOR_EXISTS;
2408                 drbd_msg_put_info("requested minor exists already");
2409                 goto out_idr_remove_minor;
2410         }
2411
2412         if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
2413                 goto out_idr_remove_minor;
2414         if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2415                 goto out_idr_remove_minor;
2416         if (vnr_got != vnr) {
2417                 err = ERR_INVALID_REQUEST;
2418                 drbd_msg_put_info("requested volume exists already");
2419                 goto out_idr_remove_vol;
2420         }
2421         add_disk(disk);
2422
2423         /* inherit the connection state */
2424         mdev->state.conn = tconn->cstate;
2425         if (mdev->state.conn == C_WF_REPORT_PARAMS)
2426                 drbd_connected(vnr, mdev, tconn);
2427
2428         return NO_ERROR;
2429
2430 out_idr_remove_vol:
2431         idr_remove(&tconn->volumes, vnr_got);
2432 out_idr_remove_minor:
2433         idr_remove(&minors, minor_got);
2434         synchronize_rcu();
2435 out_no_minor_idr:
2436         kfree(mdev->current_epoch);
2437 out_no_epoch:
2438         drbd_bm_cleanup(mdev);
2439 out_no_bitmap:
2440         __free_page(mdev->md_io_page);
2441 out_no_io_page:
2442         put_disk(disk);
2443 out_no_disk:
2444         blk_cleanup_queue(q);
2445 out_no_q:
2446         kfree(mdev);
2447         return err;
2448 }
2449
2450 /* counterpart of drbd_new_device.
2451  * last part of drbd_delete_device. */
2452 void drbd_free_mdev(struct drbd_conf *mdev)
2453 {
2454         kfree(mdev->current_epoch);
2455         if (mdev->bitmap) /* should no longer be there. */
2456                 drbd_bm_cleanup(mdev);
2457         __free_page(mdev->md_io_page);
2458         put_disk(mdev->vdisk);
2459         blk_cleanup_queue(mdev->rq_queue);
2460         kfree(mdev);
2461 }
2462
2463
2464 int __init drbd_init(void)
2465 {
2466         int err;
2467
2468         BUILD_BUG_ON(sizeof(struct p_header80) != sizeof(struct p_header95));
2469         BUILD_BUG_ON(sizeof(struct p_handshake) != 80);
2470
2471         if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
2472                 printk(KERN_ERR
2473                        "drbd: invalid minor_count (%d)\n", minor_count);
2474 #ifdef MODULE
2475                 return -EINVAL;
2476 #else
2477                 minor_count = 8;
2478 #endif
2479         }
2480
2481         err = register_blkdev(DRBD_MAJOR, "drbd");
2482         if (err) {
2483                 printk(KERN_ERR
2484                        "drbd: unable to register block device major %d\n",
2485                        DRBD_MAJOR);
2486                 return err;
2487         }
2488
2489         err = drbd_genl_register();
2490         if (err) {
2491                 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2492                 goto fail;
2493         }
2494
2495
2496         register_reboot_notifier(&drbd_notifier);
2497
2498         /*
2499          * allocate all necessary structs
2500          */
2501         err = -ENOMEM;
2502
2503         init_waitqueue_head(&drbd_pp_wait);
2504
2505         drbd_proc = NULL; /* play safe for drbd_cleanup */
2506         idr_init(&minors);
2507
2508         err = drbd_create_mempools();
2509         if (err)
2510                 goto fail;
2511
2512         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
2513         if (!drbd_proc) {
2514                 printk(KERN_ERR "drbd: unable to register proc file\n");
2515                 goto fail;
2516         }
2517
2518         rwlock_init(&global_state_lock);
2519         INIT_LIST_HEAD(&drbd_tconns);
2520
2521         printk(KERN_INFO "drbd: initialized. "
2522                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2523                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2524         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2525         printk(KERN_INFO "drbd: registered as block device major %d\n",
2526                 DRBD_MAJOR);
2527
2528         return 0; /* Success! */
2529
2530 fail:
2531         drbd_cleanup();
2532         if (err == -ENOMEM)
2533                 /* currently always the case */
2534                 printk(KERN_ERR "drbd: ran out of memory\n");
2535         else
2536                 printk(KERN_ERR "drbd: initialization failure\n");
2537         return err;
2538 }
2539
2540 void drbd_free_bc(struct drbd_backing_dev *ldev)
2541 {
2542         if (ldev == NULL)
2543                 return;
2544
2545         blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2546         blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2547
2548         kfree(ldev);
2549 }
2550
2551 void drbd_free_sock(struct drbd_tconn *tconn)
2552 {
2553         if (tconn->data.socket) {
2554                 mutex_lock(&tconn->data.mutex);
2555                 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2556                 sock_release(tconn->data.socket);
2557                 tconn->data.socket = NULL;
2558                 mutex_unlock(&tconn->data.mutex);
2559         }
2560         if (tconn->meta.socket) {
2561                 mutex_lock(&tconn->meta.mutex);
2562                 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2563                 sock_release(tconn->meta.socket);
2564                 tconn->meta.socket = NULL;
2565                 mutex_unlock(&tconn->meta.mutex);
2566         }
2567 }
2568
2569
2570 void drbd_free_resources(struct drbd_conf *mdev)
2571 {
2572         crypto_free_hash(mdev->tconn->csums_tfm);
2573         mdev->tconn->csums_tfm = NULL;
2574         crypto_free_hash(mdev->tconn->verify_tfm);
2575         mdev->tconn->verify_tfm = NULL;
2576         crypto_free_hash(mdev->tconn->cram_hmac_tfm);
2577         mdev->tconn->cram_hmac_tfm = NULL;
2578         crypto_free_hash(mdev->tconn->integrity_w_tfm);
2579         mdev->tconn->integrity_w_tfm = NULL;
2580         crypto_free_hash(mdev->tconn->integrity_r_tfm);
2581         mdev->tconn->integrity_r_tfm = NULL;
2582
2583         drbd_free_sock(mdev->tconn);
2584
2585         __no_warn(local,
2586                   drbd_free_bc(mdev->ldev);
2587                   mdev->ldev = NULL;);
2588 }
2589
2590 /* meta data management */
2591
2592 struct meta_data_on_disk {
2593         u64 la_size;           /* last agreed size. */
2594         u64 uuid[UI_SIZE];   /* UUIDs. */
2595         u64 device_uuid;
2596         u64 reserved_u64_1;
2597         u32 flags;             /* MDF */
2598         u32 magic;
2599         u32 md_size_sect;
2600         u32 al_offset;         /* offset to this block */
2601         u32 al_nr_extents;     /* important for restoring the AL */
2602               /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2603         u32 bm_offset;         /* offset to the bitmap, from here */
2604         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
2605         u32 la_peer_max_bio_size;   /* last peer max_bio_size */
2606         u32 reserved_u32[3];
2607
2608 } __packed;
2609
2610 /**
2611  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2612  * @mdev:       DRBD device.
2613  */
2614 void drbd_md_sync(struct drbd_conf *mdev)
2615 {
2616         struct meta_data_on_disk *buffer;
2617         sector_t sector;
2618         int i;
2619
2620         del_timer(&mdev->md_sync_timer);
2621         /* timer may be rearmed by drbd_md_mark_dirty() now. */
2622         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2623                 return;
2624
2625         /* We use here D_FAILED and not D_ATTACHING because we try to write
2626          * metadata even if we detach due to a disk failure! */
2627         if (!get_ldev_if_state(mdev, D_FAILED))
2628                 return;
2629
2630         mutex_lock(&mdev->md_io_mutex);
2631         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2632         memset(buffer, 0, 512);
2633
2634         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2635         for (i = UI_CURRENT; i < UI_SIZE; i++)
2636                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2637         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
2638         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
2639
2640         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
2641         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
2642         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2643         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2644         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2645
2646         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
2647         buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
2648
2649         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2650         sector = mdev->ldev->md.md_offset;
2651
2652         if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
2653                 /* this was a try anyways ... */
2654                 dev_err(DEV, "meta data update failed!\n");
2655                 drbd_chk_io_error(mdev, 1, true);
2656         }
2657
2658         /* Update mdev->ldev->md.la_size_sect,
2659          * since we updated it on metadata. */
2660         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2661
2662         mutex_unlock(&mdev->md_io_mutex);
2663         put_ldev(mdev);
2664 }
2665
2666 /**
2667  * drbd_md_read() - Reads in the meta data super block
2668  * @mdev:       DRBD device.
2669  * @bdev:       Device from which the meta data should be read in.
2670  *
2671  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
2672  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
2673  */
2674 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2675 {
2676         struct meta_data_on_disk *buffer;
2677         int i, rv = NO_ERROR;
2678
2679         if (!get_ldev_if_state(mdev, D_ATTACHING))
2680                 return ERR_IO_MD_DISK;
2681
2682         mutex_lock(&mdev->md_io_mutex);
2683         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2684
2685         if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
2686                 /* NOTE: can't do normal error processing here as this is
2687                    called BEFORE disk is attached */
2688                 dev_err(DEV, "Error while reading metadata.\n");
2689                 rv = ERR_IO_MD_DISK;
2690                 goto err;
2691         }
2692
2693         if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
2694                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
2695                 rv = ERR_MD_INVALID;
2696                 goto err;
2697         }
2698         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2699                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2700                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2701                 rv = ERR_MD_INVALID;
2702                 goto err;
2703         }
2704         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2705                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2706                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2707                 rv = ERR_MD_INVALID;
2708                 goto err;
2709         }
2710         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2711                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2712                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2713                 rv = ERR_MD_INVALID;
2714                 goto err;
2715         }
2716
2717         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2718                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2719                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2720                 rv = ERR_MD_INVALID;
2721                 goto err;
2722         }
2723
2724         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2725         for (i = UI_CURRENT; i < UI_SIZE; i++)
2726                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2727         bdev->md.flags = be32_to_cpu(buffer->flags);
2728         bdev->dc.al_extents = be32_to_cpu(buffer->al_nr_extents);
2729         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2730
2731         spin_lock_irq(&mdev->tconn->req_lock);
2732         if (mdev->state.conn < C_CONNECTED) {
2733                 int peer;
2734                 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2735                 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2736                 mdev->peer_max_bio_size = peer;
2737         }
2738         spin_unlock_irq(&mdev->tconn->req_lock);
2739
2740         if (bdev->dc.al_extents < 7)
2741                 bdev->dc.al_extents = 127;
2742
2743  err:
2744         mutex_unlock(&mdev->md_io_mutex);
2745         put_ldev(mdev);
2746
2747         return rv;
2748 }
2749
2750 /**
2751  * drbd_md_mark_dirty() - Mark meta data super block as dirty
2752  * @mdev:       DRBD device.
2753  *
2754  * Call this function if you change anything that should be written to
2755  * the meta-data super block. This function sets MD_DIRTY, and starts a
2756  * timer that ensures that within five seconds you have to call drbd_md_sync().
2757  */
2758 #ifdef DEBUG
2759 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
2760 {
2761         if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
2762                 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
2763                 mdev->last_md_mark_dirty.line = line;
2764                 mdev->last_md_mark_dirty.func = func;
2765         }
2766 }
2767 #else
2768 void drbd_md_mark_dirty(struct drbd_conf *mdev)
2769 {
2770         if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
2771                 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
2772 }
2773 #endif
2774
2775 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
2776 {
2777         int i;
2778
2779         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
2780                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
2781 }
2782
2783 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2784 {
2785         if (idx == UI_CURRENT) {
2786                 if (mdev->state.role == R_PRIMARY)
2787                         val |= 1;
2788                 else
2789                         val &= ~((u64)1);
2790
2791                 drbd_set_ed_uuid(mdev, val);
2792         }
2793
2794         mdev->ldev->md.uuid[idx] = val;
2795         drbd_md_mark_dirty(mdev);
2796 }
2797
2798
2799 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2800 {
2801         if (mdev->ldev->md.uuid[idx]) {
2802                 drbd_uuid_move_history(mdev);
2803                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
2804         }
2805         _drbd_uuid_set(mdev, idx, val);
2806 }
2807
2808 /**
2809  * drbd_uuid_new_current() - Creates a new current UUID
2810  * @mdev:       DRBD device.
2811  *
2812  * Creates a new current UUID, and rotates the old current UUID into
2813  * the bitmap slot. Causes an incremental resync upon next connect.
2814  */
2815 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
2816 {
2817         u64 val;
2818         unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
2819
2820         if (bm_uuid)
2821                 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
2822
2823         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
2824
2825         get_random_bytes(&val, sizeof(u64));
2826         _drbd_uuid_set(mdev, UI_CURRENT, val);
2827         drbd_print_uuids(mdev, "new current UUID");
2828         /* get it to stable storage _now_ */
2829         drbd_md_sync(mdev);
2830 }
2831
2832 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
2833 {
2834         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
2835                 return;
2836
2837         if (val == 0) {
2838                 drbd_uuid_move_history(mdev);
2839                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2840                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
2841         } else {
2842                 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
2843                 if (bm_uuid)
2844                         dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
2845
2846                 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
2847         }
2848         drbd_md_mark_dirty(mdev);
2849 }
2850
2851 /**
2852  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2853  * @mdev:       DRBD device.
2854  *
2855  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
2856  */
2857 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
2858 {
2859         int rv = -EIO;
2860
2861         if (get_ldev_if_state(mdev, D_ATTACHING)) {
2862                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
2863                 drbd_md_sync(mdev);
2864                 drbd_bm_set_all(mdev);
2865
2866                 rv = drbd_bm_write(mdev);
2867
2868                 if (!rv) {
2869                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2870                         drbd_md_sync(mdev);
2871                 }
2872
2873                 put_ldev(mdev);
2874         }
2875
2876         return rv;
2877 }
2878
2879 /**
2880  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
2881  * @mdev:       DRBD device.
2882  *
2883  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
2884  */
2885 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
2886 {
2887         int rv = -EIO;
2888
2889         drbd_resume_al(mdev);
2890         if (get_ldev_if_state(mdev, D_ATTACHING)) {
2891                 drbd_bm_clear_all(mdev);
2892                 rv = drbd_bm_write(mdev);
2893                 put_ldev(mdev);
2894         }
2895
2896         return rv;
2897 }
2898
2899 static int w_bitmap_io(struct drbd_work *w, int unused)
2900 {
2901         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
2902         struct drbd_conf *mdev = w->mdev;
2903         int rv = -EIO;
2904
2905         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
2906
2907         if (get_ldev(mdev)) {
2908                 drbd_bm_lock(mdev, work->why, work->flags);
2909                 rv = work->io_fn(mdev);
2910                 drbd_bm_unlock(mdev);
2911                 put_ldev(mdev);
2912         }
2913
2914         clear_bit_unlock(BITMAP_IO, &mdev->flags);
2915         wake_up(&mdev->misc_wait);
2916
2917         if (work->done)
2918                 work->done(mdev, rv);
2919
2920         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
2921         work->why = NULL;
2922         work->flags = 0;
2923
2924         return 0;
2925 }
2926
2927 void drbd_ldev_destroy(struct drbd_conf *mdev)
2928 {
2929         lc_destroy(mdev->resync);
2930         mdev->resync = NULL;
2931         lc_destroy(mdev->act_log);
2932         mdev->act_log = NULL;
2933         __no_warn(local,
2934                 drbd_free_bc(mdev->ldev);
2935                 mdev->ldev = NULL;);
2936
2937         clear_bit(GO_DISKLESS, &mdev->flags);
2938 }
2939
2940 static int w_go_diskless(struct drbd_work *w, int unused)
2941 {
2942         struct drbd_conf *mdev = w->mdev;
2943
2944         D_ASSERT(mdev->state.disk == D_FAILED);
2945         /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
2946          * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
2947          * the protected members anymore, though, so once put_ldev reaches zero
2948          * again, it will be safe to free them. */
2949         drbd_force_state(mdev, NS(disk, D_DISKLESS));
2950         return 0;
2951 }
2952
2953 void drbd_go_diskless(struct drbd_conf *mdev)
2954 {
2955         D_ASSERT(mdev->state.disk == D_FAILED);
2956         if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
2957                 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
2958 }
2959
2960 /**
2961  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
2962  * @mdev:       DRBD device.
2963  * @io_fn:      IO callback to be called when bitmap IO is possible
2964  * @done:       callback to be called after the bitmap IO was performed
2965  * @why:        Descriptive text of the reason for doing the IO
2966  *
2967  * While IO on the bitmap happens we freeze application IO thus we ensure
2968  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
2969  * called from worker context. It MUST NOT be used while a previous such
2970  * work is still pending!
2971  */
2972 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
2973                           int (*io_fn)(struct drbd_conf *),
2974                           void (*done)(struct drbd_conf *, int),
2975                           char *why, enum bm_flag flags)
2976 {
2977         D_ASSERT(current == mdev->tconn->worker.task);
2978
2979         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
2980         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
2981         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
2982         if (mdev->bm_io_work.why)
2983                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
2984                         why, mdev->bm_io_work.why);
2985
2986         mdev->bm_io_work.io_fn = io_fn;
2987         mdev->bm_io_work.done = done;
2988         mdev->bm_io_work.why = why;
2989         mdev->bm_io_work.flags = flags;
2990
2991         spin_lock_irq(&mdev->tconn->req_lock);
2992         set_bit(BITMAP_IO, &mdev->flags);
2993         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
2994                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
2995                         drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
2996         }
2997         spin_unlock_irq(&mdev->tconn->req_lock);
2998 }
2999
3000 /**
3001  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3002  * @mdev:       DRBD device.
3003  * @io_fn:      IO callback to be called when bitmap IO is possible
3004  * @why:        Descriptive text of the reason for doing the IO
3005  *
3006  * freezes application IO while that the actual IO operations runs. This
3007  * functions MAY NOT be called from worker context.
3008  */
3009 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3010                 char *why, enum bm_flag flags)
3011 {
3012         int rv;
3013
3014         D_ASSERT(current != mdev->tconn->worker.task);
3015
3016         if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3017                 drbd_suspend_io(mdev);
3018
3019         drbd_bm_lock(mdev, why, flags);
3020         rv = io_fn(mdev);
3021         drbd_bm_unlock(mdev);
3022
3023         if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3024                 drbd_resume_io(mdev);
3025
3026         return rv;
3027 }
3028
3029 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3030 {
3031         if ((mdev->ldev->md.flags & flag) != flag) {
3032                 drbd_md_mark_dirty(mdev);
3033                 mdev->ldev->md.flags |= flag;
3034         }
3035 }
3036
3037 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3038 {
3039         if ((mdev->ldev->md.flags & flag) != 0) {
3040                 drbd_md_mark_dirty(mdev);
3041                 mdev->ldev->md.flags &= ~flag;
3042         }
3043 }
3044 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3045 {
3046         return (bdev->md.flags & flag) != 0;
3047 }
3048
3049 static void md_sync_timer_fn(unsigned long data)
3050 {
3051         struct drbd_conf *mdev = (struct drbd_conf *) data;
3052
3053         drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
3054 }
3055
3056 static int w_md_sync(struct drbd_work *w, int unused)
3057 {
3058         struct drbd_conf *mdev = w->mdev;
3059
3060         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3061 #ifdef DEBUG
3062         dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3063                 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3064 #endif
3065         drbd_md_sync(mdev);
3066         return 0;
3067 }
3068
3069 const char *cmdname(enum drbd_packet cmd)
3070 {
3071         /* THINK may need to become several global tables
3072          * when we want to support more than
3073          * one PRO_VERSION */
3074         static const char *cmdnames[] = {
3075                 [P_DATA]                = "Data",
3076                 [P_DATA_REPLY]          = "DataReply",
3077                 [P_RS_DATA_REPLY]       = "RSDataReply",
3078                 [P_BARRIER]             = "Barrier",
3079                 [P_BITMAP]              = "ReportBitMap",
3080                 [P_BECOME_SYNC_TARGET]  = "BecomeSyncTarget",
3081                 [P_BECOME_SYNC_SOURCE]  = "BecomeSyncSource",
3082                 [P_UNPLUG_REMOTE]       = "UnplugRemote",
3083                 [P_DATA_REQUEST]        = "DataRequest",
3084                 [P_RS_DATA_REQUEST]     = "RSDataRequest",
3085                 [P_SYNC_PARAM]          = "SyncParam",
3086                 [P_SYNC_PARAM89]        = "SyncParam89",
3087                 [P_PROTOCOL]            = "ReportProtocol",
3088                 [P_UUIDS]               = "ReportUUIDs",
3089                 [P_SIZES]               = "ReportSizes",
3090                 [P_STATE]               = "ReportState",
3091                 [P_SYNC_UUID]           = "ReportSyncUUID",
3092                 [P_AUTH_CHALLENGE]      = "AuthChallenge",
3093                 [P_AUTH_RESPONSE]       = "AuthResponse",
3094                 [P_PING]                = "Ping",
3095                 [P_PING_ACK]            = "PingAck",
3096                 [P_RECV_ACK]            = "RecvAck",
3097                 [P_WRITE_ACK]           = "WriteAck",
3098                 [P_RS_WRITE_ACK]        = "RSWriteAck",
3099                 [P_DISCARD_WRITE]        = "DiscardWrite",
3100                 [P_NEG_ACK]             = "NegAck",
3101                 [P_NEG_DREPLY]          = "NegDReply",
3102                 [P_NEG_RS_DREPLY]       = "NegRSDReply",
3103                 [P_BARRIER_ACK]         = "BarrierAck",
3104                 [P_STATE_CHG_REQ]       = "StateChgRequest",
3105                 [P_STATE_CHG_REPLY]     = "StateChgReply",
3106                 [P_OV_REQUEST]          = "OVRequest",
3107                 [P_OV_REPLY]            = "OVReply",
3108                 [P_OV_RESULT]           = "OVResult",
3109                 [P_CSUM_RS_REQUEST]     = "CsumRSRequest",
3110                 [P_RS_IS_IN_SYNC]       = "CsumRSIsInSync",
3111                 [P_COMPRESSED_BITMAP]   = "CBitmap",
3112                 [P_DELAY_PROBE]         = "DelayProbe",
3113                 [P_OUT_OF_SYNC]         = "OutOfSync",
3114                 [P_RETRY_WRITE]         = "RetryWrite",
3115         };
3116
3117         if (cmd == P_HAND_SHAKE_M)
3118                 return "HandShakeM";
3119         if (cmd == P_HAND_SHAKE_S)
3120                 return "HandShakeS";
3121         if (cmd == P_HAND_SHAKE)
3122                 return "HandShake";
3123         if (cmd >= ARRAY_SIZE(cmdnames))
3124                 return "Unknown";
3125         return cmdnames[cmd];
3126 }
3127
3128 /**
3129  * drbd_wait_misc  -  wait for a request to make progress
3130  * @mdev:       device associated with the request
3131  * @i:          the struct drbd_interval embedded in struct drbd_request or
3132  *              struct drbd_peer_request
3133  */
3134 int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3135 {
3136         struct net_conf *net_conf = mdev->tconn->net_conf;
3137         DEFINE_WAIT(wait);
3138         long timeout;
3139
3140         if (!net_conf)
3141                 return -ETIMEDOUT;
3142         timeout = MAX_SCHEDULE_TIMEOUT;
3143         if (net_conf->ko_count)
3144                 timeout = net_conf->timeout * HZ / 10 * net_conf->ko_count;
3145
3146         /* Indicate to wake up mdev->misc_wait on progress.  */
3147         i->waiting = true;
3148         prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3149         spin_unlock_irq(&mdev->tconn->req_lock);
3150         timeout = schedule_timeout(timeout);
3151         finish_wait(&mdev->misc_wait, &wait);
3152         spin_lock_irq(&mdev->tconn->req_lock);
3153         if (!timeout || mdev->state.conn < C_CONNECTED)
3154                 return -ETIMEDOUT;
3155         if (signal_pending(current))
3156                 return -ERESTARTSYS;
3157         return 0;
3158 }
3159
3160 #ifdef CONFIG_DRBD_FAULT_INJECTION
3161 /* Fault insertion support including random number generator shamelessly
3162  * stolen from kernel/rcutorture.c */
3163 struct fault_random_state {
3164         unsigned long state;
3165         unsigned long count;
3166 };
3167
3168 #define FAULT_RANDOM_MULT 39916801  /* prime */
3169 #define FAULT_RANDOM_ADD        479001701 /* prime */
3170 #define FAULT_RANDOM_REFRESH 10000
3171
3172 /*
3173  * Crude but fast random-number generator.  Uses a linear congruential
3174  * generator, with occasional help from get_random_bytes().
3175  */
3176 static unsigned long
3177 _drbd_fault_random(struct fault_random_state *rsp)
3178 {
3179         long refresh;
3180
3181         if (!rsp->count--) {
3182                 get_random_bytes(&refresh, sizeof(refresh));
3183                 rsp->state += refresh;
3184                 rsp->count = FAULT_RANDOM_REFRESH;
3185         }
3186         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3187         return swahw32(rsp->state);
3188 }
3189
3190 static char *
3191 _drbd_fault_str(unsigned int type) {
3192         static char *_faults[] = {
3193                 [DRBD_FAULT_MD_WR] = "Meta-data write",
3194                 [DRBD_FAULT_MD_RD] = "Meta-data read",
3195                 [DRBD_FAULT_RS_WR] = "Resync write",
3196                 [DRBD_FAULT_RS_RD] = "Resync read",
3197                 [DRBD_FAULT_DT_WR] = "Data write",
3198                 [DRBD_FAULT_DT_RD] = "Data read",
3199                 [DRBD_FAULT_DT_RA] = "Data read ahead",
3200                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3201                 [DRBD_FAULT_AL_EE] = "EE allocation",
3202                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3203         };
3204
3205         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3206 }
3207
3208 unsigned int
3209 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3210 {
3211         static struct fault_random_state rrs = {0, 0};
3212
3213         unsigned int ret = (
3214                 (fault_devs == 0 ||
3215                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3216                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3217
3218         if (ret) {
3219                 fault_count++;
3220
3221                 if (__ratelimit(&drbd_ratelimit_state))
3222                         dev_warn(DEV, "***Simulating %s failure\n",
3223                                 _drbd_fault_str(type));
3224         }
3225
3226         return ret;
3227 }
3228 #endif
3229
3230 const char *drbd_buildtag(void)
3231 {
3232         /* DRBD built from external sources has here a reference to the
3233            git hash of the source code. */
3234
3235         static char buildtag[38] = "\0uilt-in";
3236
3237         if (buildtag[0] == 0) {
3238 #ifdef CONFIG_MODULES
3239                 if (THIS_MODULE != NULL)
3240                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3241                 else
3242 #endif
3243                         buildtag[0] = 'b';
3244         }
3245
3246         return buildtag;
3247 }
3248
3249 module_init(drbd_init)
3250 module_exit(drbd_cleanup)
3251
3252 EXPORT_SYMBOL(drbd_conn_str);
3253 EXPORT_SYMBOL(drbd_role_str);
3254 EXPORT_SYMBOL(drbd_disk_str);
3255 EXPORT_SYMBOL(drbd_set_st_err_str);