]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/block/drbd/drbd_receiver.c
drbd: Refcounting for mdev objects
[karo-tx-linux.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2    drbd_receiver.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
46 #include "drbd_int.h"
47 #include "drbd_req.h"
48
49 #include "drbd_vli.h"
50
51 struct packet_info {
52         enum drbd_packet cmd;
53         unsigned int size;
54         unsigned int vnr;
55         void *data;
56 };
57
58 enum finish_epoch {
59         FE_STILL_LIVE,
60         FE_DESTROYED,
61         FE_RECYCLED,
62 };
63
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(int vnr, void *p, void *data);
67
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
70
71
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
74 /*
75  * some helper functions to deal with single linked page lists,
76  * page->private being our "next" pointer.
77  */
78
79 /* If at least n pages are linked at head, get n pages off.
80  * Otherwise, don't modify head, and return NULL.
81  * Locking is the responsibility of the caller.
82  */
83 static struct page *page_chain_del(struct page **head, int n)
84 {
85         struct page *page;
86         struct page *tmp;
87
88         BUG_ON(!n);
89         BUG_ON(!head);
90
91         page = *head;
92
93         if (!page)
94                 return NULL;
95
96         while (page) {
97                 tmp = page_chain_next(page);
98                 if (--n == 0)
99                         break; /* found sufficient pages */
100                 if (tmp == NULL)
101                         /* insufficient pages, don't use any of them. */
102                         return NULL;
103                 page = tmp;
104         }
105
106         /* add end of list marker for the returned list */
107         set_page_private(page, 0);
108         /* actual return value, and adjustment of head */
109         page = *head;
110         *head = tmp;
111         return page;
112 }
113
114 /* may be used outside of locks to find the tail of a (usually short)
115  * "private" page chain, before adding it back to a global chain head
116  * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
118 {
119         struct page *tmp;
120         int i = 1;
121         while ((tmp = page_chain_next(page)))
122                 ++i, page = tmp;
123         if (len)
124                 *len = i;
125         return page;
126 }
127
128 static int page_chain_free(struct page *page)
129 {
130         struct page *tmp;
131         int i = 0;
132         page_chain_for_each_safe(page, tmp) {
133                 put_page(page);
134                 ++i;
135         }
136         return i;
137 }
138
139 static void page_chain_add(struct page **head,
140                 struct page *chain_first, struct page *chain_last)
141 {
142 #if 1
143         struct page *tmp;
144         tmp = page_chain_tail(chain_first, NULL);
145         BUG_ON(tmp != chain_last);
146 #endif
147
148         /* add chain to head */
149         set_page_private(chain_last, (unsigned long)*head);
150         *head = chain_first;
151 }
152
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154                                        unsigned int number)
155 {
156         struct page *page = NULL;
157         struct page *tmp = NULL;
158         unsigned int i = 0;
159
160         /* Yes, testing drbd_pp_vacant outside the lock is racy.
161          * So what. It saves a spin_lock. */
162         if (drbd_pp_vacant >= number) {
163                 spin_lock(&drbd_pp_lock);
164                 page = page_chain_del(&drbd_pp_pool, number);
165                 if (page)
166                         drbd_pp_vacant -= number;
167                 spin_unlock(&drbd_pp_lock);
168                 if (page)
169                         return page;
170         }
171
172         /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173          * "criss-cross" setup, that might cause write-out on some other DRBD,
174          * which in turn might block on the other node at this very place.  */
175         for (i = 0; i < number; i++) {
176                 tmp = alloc_page(GFP_TRY);
177                 if (!tmp)
178                         break;
179                 set_page_private(tmp, (unsigned long)page);
180                 page = tmp;
181         }
182
183         if (i == number)
184                 return page;
185
186         /* Not enough pages immediately available this time.
187          * No need to jump around here, drbd_alloc_pages will retry this
188          * function "soon". */
189         if (page) {
190                 tmp = page_chain_tail(page, NULL);
191                 spin_lock(&drbd_pp_lock);
192                 page_chain_add(&drbd_pp_pool, page, tmp);
193                 drbd_pp_vacant += i;
194                 spin_unlock(&drbd_pp_lock);
195         }
196         return NULL;
197 }
198
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200                                            struct list_head *to_be_freed)
201 {
202         struct drbd_peer_request *peer_req;
203         struct list_head *le, *tle;
204
205         /* The EEs are always appended to the end of the list. Since
206            they are sent in order over the wire, they have to finish
207            in order. As soon as we see the first not finished we can
208            stop to examine the list... */
209
210         list_for_each_safe(le, tle, &mdev->net_ee) {
211                 peer_req = list_entry(le, struct drbd_peer_request, w.list);
212                 if (drbd_peer_req_has_active_page(peer_req))
213                         break;
214                 list_move(le, to_be_freed);
215         }
216 }
217
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219 {
220         LIST_HEAD(reclaimed);
221         struct drbd_peer_request *peer_req, *t;
222
223         spin_lock_irq(&mdev->tconn->req_lock);
224         reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225         spin_unlock_irq(&mdev->tconn->req_lock);
226
227         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228                 drbd_free_net_peer_req(mdev, peer_req);
229 }
230
231 /**
232  * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233  * @mdev:       DRBD device.
234  * @number:     number of pages requested
235  * @retry:      whether to retry, if not enough pages are available right now
236  *
237  * Tries to allocate number pages, first from our own page pool, then from
238  * the kernel, unless this allocation would exceed the max_buffers setting.
239  * Possibly retry until DRBD frees sufficient pages somewhere else.
240  *
241  * Returns a page chain linked via page->private.
242  */
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244                               bool retry)
245 {
246         struct page *page = NULL;
247         struct net_conf *nc;
248         DEFINE_WAIT(wait);
249         int mxb;
250
251         /* Yes, we may run up to @number over max_buffers. If we
252          * follow it strictly, the admin will get it wrong anyways. */
253         rcu_read_lock();
254         nc = rcu_dereference(mdev->tconn->net_conf);
255         mxb = nc ? nc->max_buffers : 1000000;
256         rcu_read_unlock();
257
258         if (atomic_read(&mdev->pp_in_use) < mxb)
259                 page = __drbd_alloc_pages(mdev, number);
260
261         while (page == NULL) {
262                 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264                 drbd_kick_lo_and_reclaim_net(mdev);
265
266                 if (atomic_read(&mdev->pp_in_use) < mxb) {
267                         page = __drbd_alloc_pages(mdev, number);
268                         if (page)
269                                 break;
270                 }
271
272                 if (!retry)
273                         break;
274
275                 if (signal_pending(current)) {
276                         dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
277                         break;
278                 }
279
280                 schedule();
281         }
282         finish_wait(&drbd_pp_wait, &wait);
283
284         if (page)
285                 atomic_add(number, &mdev->pp_in_use);
286         return page;
287 }
288
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290  * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291  * Either links the page chain back to the global pool,
292  * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
294 {
295         atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
296         int i;
297
298         if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
299                 i = page_chain_free(page);
300         else {
301                 struct page *tmp;
302                 tmp = page_chain_tail(page, &i);
303                 spin_lock(&drbd_pp_lock);
304                 page_chain_add(&drbd_pp_pool, page, tmp);
305                 drbd_pp_vacant += i;
306                 spin_unlock(&drbd_pp_lock);
307         }
308         i = atomic_sub_return(i, a);
309         if (i < 0)
310                 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
311                         is_net ? "pp_in_use_by_net" : "pp_in_use", i);
312         wake_up(&drbd_pp_wait);
313 }
314
315 /*
316 You need to hold the req_lock:
317  _drbd_wait_ee_list_empty()
318
319 You must not have the req_lock:
320  drbd_free_peer_req()
321  drbd_alloc_peer_req()
322  drbd_free_peer_reqs()
323  drbd_ee_fix_bhs()
324  drbd_finish_peer_reqs()
325  drbd_clear_done_ee()
326  drbd_wait_ee_list_empty()
327 */
328
329 struct drbd_peer_request *
330 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
331                     unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
332 {
333         struct drbd_peer_request *peer_req;
334         struct page *page;
335         unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
336
337         if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
338                 return NULL;
339
340         peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
341         if (!peer_req) {
342                 if (!(gfp_mask & __GFP_NOWARN))
343                         dev_err(DEV, "%s: allocation failed\n", __func__);
344                 return NULL;
345         }
346
347         page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
348         if (!page)
349                 goto fail;
350
351         drbd_clear_interval(&peer_req->i);
352         peer_req->i.size = data_size;
353         peer_req->i.sector = sector;
354         peer_req->i.local = false;
355         peer_req->i.waiting = false;
356
357         peer_req->epoch = NULL;
358         peer_req->w.mdev = mdev;
359         peer_req->pages = page;
360         atomic_set(&peer_req->pending_bios, 0);
361         peer_req->flags = 0;
362         /*
363          * The block_id is opaque to the receiver.  It is not endianness
364          * converted, and sent back to the sender unchanged.
365          */
366         peer_req->block_id = id;
367
368         return peer_req;
369
370  fail:
371         mempool_free(peer_req, drbd_ee_mempool);
372         return NULL;
373 }
374
375 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
376                        int is_net)
377 {
378         if (peer_req->flags & EE_HAS_DIGEST)
379                 kfree(peer_req->digest);
380         drbd_free_pages(mdev, peer_req->pages, is_net);
381         D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
382         D_ASSERT(drbd_interval_empty(&peer_req->i));
383         mempool_free(peer_req, drbd_ee_mempool);
384 }
385
386 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
387 {
388         LIST_HEAD(work_list);
389         struct drbd_peer_request *peer_req, *t;
390         int count = 0;
391         int is_net = list == &mdev->net_ee;
392
393         spin_lock_irq(&mdev->tconn->req_lock);
394         list_splice_init(list, &work_list);
395         spin_unlock_irq(&mdev->tconn->req_lock);
396
397         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
398                 __drbd_free_peer_req(mdev, peer_req, is_net);
399                 count++;
400         }
401         return count;
402 }
403
404 /*
405  * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
406  */
407 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
408 {
409         LIST_HEAD(work_list);
410         LIST_HEAD(reclaimed);
411         struct drbd_peer_request *peer_req, *t;
412         int err = 0;
413
414         spin_lock_irq(&mdev->tconn->req_lock);
415         reclaim_finished_net_peer_reqs(mdev, &reclaimed);
416         list_splice_init(&mdev->done_ee, &work_list);
417         spin_unlock_irq(&mdev->tconn->req_lock);
418
419         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
420                 drbd_free_net_peer_req(mdev, peer_req);
421
422         /* possible callbacks here:
423          * e_end_block, and e_end_resync_block, e_send_discard_write.
424          * all ignore the last argument.
425          */
426         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
427                 int err2;
428
429                 /* list_del not necessary, next/prev members not touched */
430                 err2 = peer_req->w.cb(&peer_req->w, !!err);
431                 if (!err)
432                         err = err2;
433                 drbd_free_peer_req(mdev, peer_req);
434         }
435         wake_up(&mdev->ee_wait);
436
437         return err;
438 }
439
440 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
441                                      struct list_head *head)
442 {
443         DEFINE_WAIT(wait);
444
445         /* avoids spin_lock/unlock
446          * and calling prepare_to_wait in the fast path */
447         while (!list_empty(head)) {
448                 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
449                 spin_unlock_irq(&mdev->tconn->req_lock);
450                 io_schedule();
451                 finish_wait(&mdev->ee_wait, &wait);
452                 spin_lock_irq(&mdev->tconn->req_lock);
453         }
454 }
455
456 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
457                                     struct list_head *head)
458 {
459         spin_lock_irq(&mdev->tconn->req_lock);
460         _drbd_wait_ee_list_empty(mdev, head);
461         spin_unlock_irq(&mdev->tconn->req_lock);
462 }
463
464 /* see also kernel_accept; which is only present since 2.6.18.
465  * also we want to log which part of it failed, exactly */
466 static int drbd_accept(const char **what, struct socket *sock, struct socket **newsock)
467 {
468         struct sock *sk = sock->sk;
469         int err = 0;
470
471         *what = "listen";
472         err = sock->ops->listen(sock, 5);
473         if (err < 0)
474                 goto out;
475
476         *what = "sock_create_lite";
477         err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
478                                newsock);
479         if (err < 0)
480                 goto out;
481
482         *what = "accept";
483         err = sock->ops->accept(sock, *newsock, 0);
484         if (err < 0) {
485                 sock_release(*newsock);
486                 *newsock = NULL;
487                 goto out;
488         }
489         (*newsock)->ops  = sock->ops;
490
491 out:
492         return err;
493 }
494
495 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
496 {
497         mm_segment_t oldfs;
498         struct kvec iov = {
499                 .iov_base = buf,
500                 .iov_len = size,
501         };
502         struct msghdr msg = {
503                 .msg_iovlen = 1,
504                 .msg_iov = (struct iovec *)&iov,
505                 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
506         };
507         int rv;
508
509         oldfs = get_fs();
510         set_fs(KERNEL_DS);
511         rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
512         set_fs(oldfs);
513
514         return rv;
515 }
516
517 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
518 {
519         mm_segment_t oldfs;
520         struct kvec iov = {
521                 .iov_base = buf,
522                 .iov_len = size,
523         };
524         struct msghdr msg = {
525                 .msg_iovlen = 1,
526                 .msg_iov = (struct iovec *)&iov,
527                 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
528         };
529         int rv;
530
531         oldfs = get_fs();
532         set_fs(KERNEL_DS);
533
534         for (;;) {
535                 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
536                 if (rv == size)
537                         break;
538
539                 /* Note:
540                  * ECONNRESET   other side closed the connection
541                  * ERESTARTSYS  (on  sock) we got a signal
542                  */
543
544                 if (rv < 0) {
545                         if (rv == -ECONNRESET)
546                                 conn_info(tconn, "sock was reset by peer\n");
547                         else if (rv != -ERESTARTSYS)
548                                 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
549                         break;
550                 } else if (rv == 0) {
551                         conn_info(tconn, "sock was shut down by peer\n");
552                         break;
553                 } else  {
554                         /* signal came in, or peer/link went down,
555                          * after we read a partial message
556                          */
557                         /* D_ASSERT(signal_pending(current)); */
558                         break;
559                 }
560         };
561
562         set_fs(oldfs);
563
564         if (rv != size)
565                 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
566
567         return rv;
568 }
569
570 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
571 {
572         int err;
573
574         err = drbd_recv(tconn, buf, size);
575         if (err != size) {
576                 if (err >= 0)
577                         err = -EIO;
578         } else
579                 err = 0;
580         return err;
581 }
582
583 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
584 {
585         int err;
586
587         err = drbd_recv_all(tconn, buf, size);
588         if (err && !signal_pending(current))
589                 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
590         return err;
591 }
592
593 /* quoting tcp(7):
594  *   On individual connections, the socket buffer size must be set prior to the
595  *   listen(2) or connect(2) calls in order to have it take effect.
596  * This is our wrapper to do so.
597  */
598 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
599                 unsigned int rcv)
600 {
601         /* open coded SO_SNDBUF, SO_RCVBUF */
602         if (snd) {
603                 sock->sk->sk_sndbuf = snd;
604                 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
605         }
606         if (rcv) {
607                 sock->sk->sk_rcvbuf = rcv;
608                 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
609         }
610 }
611
612 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
613 {
614         const char *what;
615         struct socket *sock;
616         struct sockaddr_in6 src_in6;
617         struct sockaddr_in6 peer_in6;
618         struct net_conf *nc;
619         int err, peer_addr_len, my_addr_len;
620         int sndbuf_size, rcvbuf_size, try_connect_int;
621         int disconnect_on_error = 1;
622
623         rcu_read_lock();
624         nc = rcu_dereference(tconn->net_conf);
625         if (!nc) {
626                 rcu_read_unlock();
627                 return NULL;
628         }
629
630         sndbuf_size = nc->sndbuf_size;
631         rcvbuf_size = nc->rcvbuf_size;
632         try_connect_int = nc->try_connect_int;
633
634         my_addr_len = min_t(int, nc->my_addr_len, sizeof(src_in6));
635         memcpy(&src_in6, nc->my_addr, my_addr_len);
636
637         if (((struct sockaddr *)nc->my_addr)->sa_family == AF_INET6)
638                 src_in6.sin6_port = 0;
639         else
640                 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
641
642         peer_addr_len = min_t(int, nc->peer_addr_len, sizeof(src_in6));
643         memcpy(&peer_in6, nc->peer_addr, peer_addr_len);
644
645         rcu_read_unlock();
646
647         what = "sock_create_kern";
648         err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
649                                SOCK_STREAM, IPPROTO_TCP, &sock);
650         if (err < 0) {
651                 sock = NULL;
652                 goto out;
653         }
654
655         sock->sk->sk_rcvtimeo =
656         sock->sk->sk_sndtimeo = try_connect_int * HZ;
657         drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
658
659        /* explicitly bind to the configured IP as source IP
660         *  for the outgoing connections.
661         *  This is needed for multihomed hosts and to be
662         *  able to use lo: interfaces for drbd.
663         * Make sure to use 0 as port number, so linux selects
664         *  a free one dynamically.
665         */
666         what = "bind before connect";
667         err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
668         if (err < 0)
669                 goto out;
670
671         /* connect may fail, peer not yet available.
672          * stay C_WF_CONNECTION, don't go Disconnecting! */
673         disconnect_on_error = 0;
674         what = "connect";
675         err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
676
677 out:
678         if (err < 0) {
679                 if (sock) {
680                         sock_release(sock);
681                         sock = NULL;
682                 }
683                 switch (-err) {
684                         /* timeout, busy, signal pending */
685                 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
686                 case EINTR: case ERESTARTSYS:
687                         /* peer not (yet) available, network problem */
688                 case ECONNREFUSED: case ENETUNREACH:
689                 case EHOSTDOWN:    case EHOSTUNREACH:
690                         disconnect_on_error = 0;
691                         break;
692                 default:
693                         conn_err(tconn, "%s failed, err = %d\n", what, err);
694                 }
695                 if (disconnect_on_error)
696                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
697         }
698
699         return sock;
700 }
701
702 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
703 {
704         int timeo, err, my_addr_len;
705         int sndbuf_size, rcvbuf_size, try_connect_int;
706         struct socket *s_estab = NULL, *s_listen;
707         struct sockaddr_in6 my_addr;
708         struct net_conf *nc;
709         const char *what;
710
711         rcu_read_lock();
712         nc = rcu_dereference(tconn->net_conf);
713         if (!nc) {
714                 rcu_read_unlock();
715                 return NULL;
716         }
717
718         sndbuf_size = nc->sndbuf_size;
719         rcvbuf_size = nc->rcvbuf_size;
720         try_connect_int = nc->try_connect_int;
721
722         my_addr_len = min_t(int, nc->my_addr_len, sizeof(struct sockaddr_in6));
723         memcpy(&my_addr, nc->my_addr, my_addr_len);
724         rcu_read_unlock();
725
726         what = "sock_create_kern";
727         err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
728                 SOCK_STREAM, IPPROTO_TCP, &s_listen);
729         if (err) {
730                 s_listen = NULL;
731                 goto out;
732         }
733
734         timeo = try_connect_int * HZ;
735         timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
736
737         s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
738         s_listen->sk->sk_rcvtimeo = timeo;
739         s_listen->sk->sk_sndtimeo = timeo;
740         drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
741
742         what = "bind before listen";
743         err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
744         if (err < 0)
745                 goto out;
746
747         err = drbd_accept(&what, s_listen, &s_estab);
748
749 out:
750         if (s_listen)
751                 sock_release(s_listen);
752         if (err < 0) {
753                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
754                         conn_err(tconn, "%s failed, err = %d\n", what, err);
755                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
756                 }
757         }
758
759         return s_estab;
760 }
761
762 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
763
764 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
765                              enum drbd_packet cmd)
766 {
767         if (!conn_prepare_command(tconn, sock))
768                 return -EIO;
769         return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
770 }
771
772 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
773 {
774         unsigned int header_size = drbd_header_size(tconn);
775         struct packet_info pi;
776         int err;
777
778         err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
779         if (err != header_size) {
780                 if (err >= 0)
781                         err = -EIO;
782                 return err;
783         }
784         err = decode_header(tconn, tconn->data.rbuf, &pi);
785         if (err)
786                 return err;
787         return pi.cmd;
788 }
789
790 /**
791  * drbd_socket_okay() - Free the socket if its connection is not okay
792  * @sock:       pointer to the pointer to the socket.
793  */
794 static int drbd_socket_okay(struct socket **sock)
795 {
796         int rr;
797         char tb[4];
798
799         if (!*sock)
800                 return false;
801
802         rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
803
804         if (rr > 0 || rr == -EAGAIN) {
805                 return true;
806         } else {
807                 sock_release(*sock);
808                 *sock = NULL;
809                 return false;
810         }
811 }
812 /* Gets called if a connection is established, or if a new minor gets created
813    in a connection */
814 int drbd_connected(int vnr, void *p, void *data)
815 {
816         struct drbd_conf *mdev = (struct drbd_conf *)p;
817         int err;
818
819         atomic_set(&mdev->packet_seq, 0);
820         mdev->peer_seq = 0;
821
822         mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
823                 &mdev->tconn->cstate_mutex :
824                 &mdev->own_state_mutex;
825
826         err = drbd_send_sync_param(mdev);
827         if (!err)
828                 err = drbd_send_sizes(mdev, 0, 0);
829         if (!err)
830                 err = drbd_send_uuids(mdev);
831         if (!err)
832                 err = drbd_send_state(mdev);
833         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
834         clear_bit(RESIZE_PENDING, &mdev->flags);
835         mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
836         return err;
837 }
838
839 /*
840  * return values:
841  *   1 yes, we have a valid connection
842  *   0 oops, did not work out, please try again
843  *  -1 peer talks different language,
844  *     no point in trying again, please go standalone.
845  *  -2 We do not have a network config...
846  */
847 static int conn_connect(struct drbd_tconn *tconn)
848 {
849         struct socket *sock, *msock;
850         struct net_conf *nc;
851         int timeout, try, h, ok;
852
853         if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
854                 return -2;
855
856         clear_bit(DISCARD_CONCURRENT, &tconn->flags);
857
858         /* Assume that the peer only understands protocol 80 until we know better.  */
859         tconn->agreed_pro_version = 80;
860
861         do {
862                 struct socket *s;
863
864                 for (try = 0;;) {
865                         /* 3 tries, this should take less than a second! */
866                         s = drbd_try_connect(tconn);
867                         if (s || ++try >= 3)
868                                 break;
869                         /* give the other side time to call bind() & listen() */
870                         schedule_timeout_interruptible(HZ / 10);
871                 }
872
873                 if (s) {
874                         if (!tconn->data.socket) {
875                                 tconn->data.socket = s;
876                                 send_first_packet(tconn, &tconn->data, P_INITIAL_DATA);
877                         } else if (!tconn->meta.socket) {
878                                 tconn->meta.socket = s;
879                                 send_first_packet(tconn, &tconn->meta, P_INITIAL_META);
880                         } else {
881                                 conn_err(tconn, "Logic error in conn_connect()\n");
882                                 goto out_release_sockets;
883                         }
884                 }
885
886                 if (tconn->data.socket && tconn->meta.socket) {
887                         schedule_timeout_interruptible(tconn->net_conf->ping_timeo*HZ/10);
888                         ok = drbd_socket_okay(&tconn->data.socket);
889                         ok = drbd_socket_okay(&tconn->meta.socket) && ok;
890                         if (ok)
891                                 break;
892                 }
893
894 retry:
895                 s = drbd_wait_for_connect(tconn);
896                 if (s) {
897                         try = receive_first_packet(tconn, s);
898                         drbd_socket_okay(&tconn->data.socket);
899                         drbd_socket_okay(&tconn->meta.socket);
900                         switch (try) {
901                         case P_INITIAL_DATA:
902                                 if (tconn->data.socket) {
903                                         conn_warn(tconn, "initial packet S crossed\n");
904                                         sock_release(tconn->data.socket);
905                                 }
906                                 tconn->data.socket = s;
907                                 break;
908                         case P_INITIAL_META:
909                                 if (tconn->meta.socket) {
910                                         conn_warn(tconn, "initial packet M crossed\n");
911                                         sock_release(tconn->meta.socket);
912                                 }
913                                 tconn->meta.socket = s;
914                                 set_bit(DISCARD_CONCURRENT, &tconn->flags);
915                                 break;
916                         default:
917                                 conn_warn(tconn, "Error receiving initial packet\n");
918                                 sock_release(s);
919                                 if (random32() & 1)
920                                         goto retry;
921                         }
922                 }
923
924                 if (tconn->cstate <= C_DISCONNECTING)
925                         goto out_release_sockets;
926                 if (signal_pending(current)) {
927                         flush_signals(current);
928                         smp_rmb();
929                         if (get_t_state(&tconn->receiver) == EXITING)
930                                 goto out_release_sockets;
931                 }
932
933                 if (tconn->data.socket && &tconn->meta.socket) {
934                         ok = drbd_socket_okay(&tconn->data.socket);
935                         ok = drbd_socket_okay(&tconn->meta.socket) && ok;
936                         if (ok)
937                                 break;
938                 }
939         } while (1);
940
941         sock  = tconn->data.socket;
942         msock = tconn->meta.socket;
943
944         msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
945         sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
946
947         sock->sk->sk_allocation = GFP_NOIO;
948         msock->sk->sk_allocation = GFP_NOIO;
949
950         sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
951         msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
952
953         /* NOT YET ...
954          * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
955          * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
956          * first set it to the P_CONNECTION_FEATURES timeout,
957          * which we set to 4x the configured ping_timeout. */
958         rcu_read_lock();
959         nc = rcu_dereference(tconn->net_conf);
960
961         sock->sk->sk_sndtimeo =
962         sock->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
963
964         msock->sk->sk_rcvtimeo = nc->ping_int*HZ;
965         timeout = nc->timeout * HZ / 10;
966         rcu_read_unlock();
967
968         msock->sk->sk_sndtimeo = timeout;
969
970         /* we don't want delays.
971          * we use TCP_CORK where appropriate, though */
972         drbd_tcp_nodelay(sock);
973         drbd_tcp_nodelay(msock);
974
975         tconn->last_received = jiffies;
976
977         h = drbd_do_features(tconn);
978         if (h <= 0)
979                 return h;
980
981         if (tconn->cram_hmac_tfm) {
982                 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
983                 switch (drbd_do_auth(tconn)) {
984                 case -1:
985                         conn_err(tconn, "Authentication of peer failed\n");
986                         return -1;
987                 case 0:
988                         conn_err(tconn, "Authentication of peer failed, trying again.\n");
989                         return 0;
990                 }
991         }
992
993         if (conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE) < SS_SUCCESS)
994                 return 0;
995
996         sock->sk->sk_sndtimeo = timeout;
997         sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
998
999         drbd_thread_start(&tconn->asender);
1000
1001         if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
1002                 return -1;
1003
1004         down_read(&drbd_cfg_rwsem);
1005         h = !idr_for_each(&tconn->volumes, drbd_connected, tconn);
1006         up_read(&drbd_cfg_rwsem);
1007         return h;
1008
1009 out_release_sockets:
1010         if (tconn->data.socket) {
1011                 sock_release(tconn->data.socket);
1012                 tconn->data.socket = NULL;
1013         }
1014         if (tconn->meta.socket) {
1015                 sock_release(tconn->meta.socket);
1016                 tconn->meta.socket = NULL;
1017         }
1018         return -1;
1019 }
1020
1021 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1022 {
1023         unsigned int header_size = drbd_header_size(tconn);
1024
1025         if (header_size == sizeof(struct p_header100) &&
1026             *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1027                 struct p_header100 *h = header;
1028                 if (h->pad != 0) {
1029                         conn_err(tconn, "Header padding is not zero\n");
1030                         return -EINVAL;
1031                 }
1032                 pi->vnr = be16_to_cpu(h->volume);
1033                 pi->cmd = be16_to_cpu(h->command);
1034                 pi->size = be32_to_cpu(h->length);
1035         } else if (header_size == sizeof(struct p_header95) &&
1036                    *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1037                 struct p_header95 *h = header;
1038                 pi->cmd = be16_to_cpu(h->command);
1039                 pi->size = be32_to_cpu(h->length);
1040                 pi->vnr = 0;
1041         } else if (header_size == sizeof(struct p_header80) &&
1042                    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1043                 struct p_header80 *h = header;
1044                 pi->cmd = be16_to_cpu(h->command);
1045                 pi->size = be16_to_cpu(h->length);
1046                 pi->vnr = 0;
1047         } else {
1048                 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1049                          be32_to_cpu(*(__be32 *)header),
1050                          tconn->agreed_pro_version);
1051                 return -EINVAL;
1052         }
1053         pi->data = header + header_size;
1054         return 0;
1055 }
1056
1057 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1058 {
1059         void *buffer = tconn->data.rbuf;
1060         int err;
1061
1062         err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1063         if (err)
1064                 return err;
1065
1066         err = decode_header(tconn, buffer, pi);
1067         tconn->last_received = jiffies;
1068
1069         return err;
1070 }
1071
1072 static void drbd_flush(struct drbd_conf *mdev)
1073 {
1074         int rv;
1075
1076         if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
1077                 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
1078                                         NULL);
1079                 if (rv) {
1080                         dev_err(DEV, "local disk flush failed with status %d\n", rv);
1081                         /* would rather check on EOPNOTSUPP, but that is not reliable.
1082                          * don't try again for ANY return value != 0
1083                          * if (rv == -EOPNOTSUPP) */
1084                         drbd_bump_write_ordering(mdev, WO_drain_io);
1085                 }
1086                 put_ldev(mdev);
1087         }
1088 }
1089
1090 /**
1091  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1092  * @mdev:       DRBD device.
1093  * @epoch:      Epoch object.
1094  * @ev:         Epoch event.
1095  */
1096 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1097                                                struct drbd_epoch *epoch,
1098                                                enum epoch_event ev)
1099 {
1100         int epoch_size;
1101         struct drbd_epoch *next_epoch;
1102         enum finish_epoch rv = FE_STILL_LIVE;
1103
1104         spin_lock(&mdev->epoch_lock);
1105         do {
1106                 next_epoch = NULL;
1107
1108                 epoch_size = atomic_read(&epoch->epoch_size);
1109
1110                 switch (ev & ~EV_CLEANUP) {
1111                 case EV_PUT:
1112                         atomic_dec(&epoch->active);
1113                         break;
1114                 case EV_GOT_BARRIER_NR:
1115                         set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1116                         break;
1117                 case EV_BECAME_LAST:
1118                         /* nothing to do*/
1119                         break;
1120                 }
1121
1122                 if (epoch_size != 0 &&
1123                     atomic_read(&epoch->active) == 0 &&
1124                     test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1125                         if (!(ev & EV_CLEANUP)) {
1126                                 spin_unlock(&mdev->epoch_lock);
1127                                 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1128                                 spin_lock(&mdev->epoch_lock);
1129                         }
1130                         dec_unacked(mdev);
1131
1132                         if (mdev->current_epoch != epoch) {
1133                                 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1134                                 list_del(&epoch->list);
1135                                 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1136                                 mdev->epochs--;
1137                                 kfree(epoch);
1138
1139                                 if (rv == FE_STILL_LIVE)
1140                                         rv = FE_DESTROYED;
1141                         } else {
1142                                 epoch->flags = 0;
1143                                 atomic_set(&epoch->epoch_size, 0);
1144                                 /* atomic_set(&epoch->active, 0); is already zero */
1145                                 if (rv == FE_STILL_LIVE)
1146                                         rv = FE_RECYCLED;
1147                                 wake_up(&mdev->ee_wait);
1148                         }
1149                 }
1150
1151                 if (!next_epoch)
1152                         break;
1153
1154                 epoch = next_epoch;
1155         } while (1);
1156
1157         spin_unlock(&mdev->epoch_lock);
1158
1159         return rv;
1160 }
1161
1162 /**
1163  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1164  * @mdev:       DRBD device.
1165  * @wo:         Write ordering method to try.
1166  */
1167 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1168 {
1169         struct disk_conf *dc;
1170         enum write_ordering_e pwo;
1171         static char *write_ordering_str[] = {
1172                 [WO_none] = "none",
1173                 [WO_drain_io] = "drain",
1174                 [WO_bdev_flush] = "flush",
1175         };
1176
1177         pwo = mdev->write_ordering;
1178         wo = min(pwo, wo);
1179         rcu_read_lock();
1180         dc = rcu_dereference(mdev->ldev->disk_conf);
1181
1182         if (wo == WO_bdev_flush && !dc->disk_flushes)
1183                 wo = WO_drain_io;
1184         if (wo == WO_drain_io && !dc->disk_drain)
1185                 wo = WO_none;
1186         rcu_read_unlock();
1187         mdev->write_ordering = wo;
1188         if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1189                 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1190 }
1191
1192 /**
1193  * drbd_submit_peer_request()
1194  * @mdev:       DRBD device.
1195  * @peer_req:   peer request
1196  * @rw:         flag field, see bio->bi_rw
1197  *
1198  * May spread the pages to multiple bios,
1199  * depending on bio_add_page restrictions.
1200  *
1201  * Returns 0 if all bios have been submitted,
1202  * -ENOMEM if we could not allocate enough bios,
1203  * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1204  *  single page to an empty bio (which should never happen and likely indicates
1205  *  that the lower level IO stack is in some way broken). This has been observed
1206  *  on certain Xen deployments.
1207  */
1208 /* TODO allocate from our own bio_set. */
1209 int drbd_submit_peer_request(struct drbd_conf *mdev,
1210                              struct drbd_peer_request *peer_req,
1211                              const unsigned rw, const int fault_type)
1212 {
1213         struct bio *bios = NULL;
1214         struct bio *bio;
1215         struct page *page = peer_req->pages;
1216         sector_t sector = peer_req->i.sector;
1217         unsigned ds = peer_req->i.size;
1218         unsigned n_bios = 0;
1219         unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1220         int err = -ENOMEM;
1221
1222         /* In most cases, we will only need one bio.  But in case the lower
1223          * level restrictions happen to be different at this offset on this
1224          * side than those of the sending peer, we may need to submit the
1225          * request in more than one bio.
1226          *
1227          * Plain bio_alloc is good enough here, this is no DRBD internally
1228          * generated bio, but a bio allocated on behalf of the peer.
1229          */
1230 next_bio:
1231         bio = bio_alloc(GFP_NOIO, nr_pages);
1232         if (!bio) {
1233                 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1234                 goto fail;
1235         }
1236         /* > peer_req->i.sector, unless this is the first bio */
1237         bio->bi_sector = sector;
1238         bio->bi_bdev = mdev->ldev->backing_bdev;
1239         bio->bi_rw = rw;
1240         bio->bi_private = peer_req;
1241         bio->bi_end_io = drbd_peer_request_endio;
1242
1243         bio->bi_next = bios;
1244         bios = bio;
1245         ++n_bios;
1246
1247         page_chain_for_each(page) {
1248                 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1249                 if (!bio_add_page(bio, page, len, 0)) {
1250                         /* A single page must always be possible!
1251                          * But in case it fails anyways,
1252                          * we deal with it, and complain (below). */
1253                         if (bio->bi_vcnt == 0) {
1254                                 dev_err(DEV,
1255                                         "bio_add_page failed for len=%u, "
1256                                         "bi_vcnt=0 (bi_sector=%llu)\n",
1257                                         len, (unsigned long long)bio->bi_sector);
1258                                 err = -ENOSPC;
1259                                 goto fail;
1260                         }
1261                         goto next_bio;
1262                 }
1263                 ds -= len;
1264                 sector += len >> 9;
1265                 --nr_pages;
1266         }
1267         D_ASSERT(page == NULL);
1268         D_ASSERT(ds == 0);
1269
1270         atomic_set(&peer_req->pending_bios, n_bios);
1271         do {
1272                 bio = bios;
1273                 bios = bios->bi_next;
1274                 bio->bi_next = NULL;
1275
1276                 drbd_generic_make_request(mdev, fault_type, bio);
1277         } while (bios);
1278         return 0;
1279
1280 fail:
1281         while (bios) {
1282                 bio = bios;
1283                 bios = bios->bi_next;
1284                 bio_put(bio);
1285         }
1286         return err;
1287 }
1288
1289 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1290                                              struct drbd_peer_request *peer_req)
1291 {
1292         struct drbd_interval *i = &peer_req->i;
1293
1294         drbd_remove_interval(&mdev->write_requests, i);
1295         drbd_clear_interval(i);
1296
1297         /* Wake up any processes waiting for this peer request to complete.  */
1298         if (i->waiting)
1299                 wake_up(&mdev->misc_wait);
1300 }
1301
1302 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1303 {
1304         struct drbd_conf *mdev;
1305         int rv;
1306         struct p_barrier *p = pi->data;
1307         struct drbd_epoch *epoch;
1308
1309         mdev = vnr_to_mdev(tconn, pi->vnr);
1310         if (!mdev)
1311                 return -EIO;
1312
1313         inc_unacked(mdev);
1314
1315         mdev->current_epoch->barrier_nr = p->barrier;
1316         rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1317
1318         /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1319          * the activity log, which means it would not be resynced in case the
1320          * R_PRIMARY crashes now.
1321          * Therefore we must send the barrier_ack after the barrier request was
1322          * completed. */
1323         switch (mdev->write_ordering) {
1324         case WO_none:
1325                 if (rv == FE_RECYCLED)
1326                         return 0;
1327
1328                 /* receiver context, in the writeout path of the other node.
1329                  * avoid potential distributed deadlock */
1330                 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1331                 if (epoch)
1332                         break;
1333                 else
1334                         dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1335                         /* Fall through */
1336
1337         case WO_bdev_flush:
1338         case WO_drain_io:
1339                 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1340                 drbd_flush(mdev);
1341
1342                 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1343                         epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1344                         if (epoch)
1345                                 break;
1346                 }
1347
1348                 epoch = mdev->current_epoch;
1349                 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1350
1351                 D_ASSERT(atomic_read(&epoch->active) == 0);
1352                 D_ASSERT(epoch->flags == 0);
1353
1354                 return 0;
1355         default:
1356                 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1357                 return -EIO;
1358         }
1359
1360         epoch->flags = 0;
1361         atomic_set(&epoch->epoch_size, 0);
1362         atomic_set(&epoch->active, 0);
1363
1364         spin_lock(&mdev->epoch_lock);
1365         if (atomic_read(&mdev->current_epoch->epoch_size)) {
1366                 list_add(&epoch->list, &mdev->current_epoch->list);
1367                 mdev->current_epoch = epoch;
1368                 mdev->epochs++;
1369         } else {
1370                 /* The current_epoch got recycled while we allocated this one... */
1371                 kfree(epoch);
1372         }
1373         spin_unlock(&mdev->epoch_lock);
1374
1375         return 0;
1376 }
1377
1378 /* used from receive_RSDataReply (recv_resync_read)
1379  * and from receive_Data */
1380 static struct drbd_peer_request *
1381 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1382               int data_size) __must_hold(local)
1383 {
1384         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1385         struct drbd_peer_request *peer_req;
1386         struct page *page;
1387         int dgs, ds, err;
1388         void *dig_in = mdev->tconn->int_dig_in;
1389         void *dig_vv = mdev->tconn->int_dig_vv;
1390         unsigned long *data;
1391
1392         dgs = 0;
1393         if (mdev->tconn->peer_integrity_tfm) {
1394                 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1395                 /*
1396                  * FIXME: Receive the incoming digest into the receive buffer
1397                  *        here, together with its struct p_data?
1398                  */
1399                 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1400                 if (err)
1401                         return NULL;
1402                 data_size -= dgs;
1403         }
1404
1405         if (!expect(data_size != 0))
1406                 return NULL;
1407         if (!expect(IS_ALIGNED(data_size, 512)))
1408                 return NULL;
1409         if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1410                 return NULL;
1411
1412         /* even though we trust out peer,
1413          * we sometimes have to double check. */
1414         if (sector + (data_size>>9) > capacity) {
1415                 dev_err(DEV, "request from peer beyond end of local disk: "
1416                         "capacity: %llus < sector: %llus + size: %u\n",
1417                         (unsigned long long)capacity,
1418                         (unsigned long long)sector, data_size);
1419                 return NULL;
1420         }
1421
1422         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1423          * "criss-cross" setup, that might cause write-out on some other DRBD,
1424          * which in turn might block on the other node at this very place.  */
1425         peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1426         if (!peer_req)
1427                 return NULL;
1428
1429         ds = data_size;
1430         page = peer_req->pages;
1431         page_chain_for_each(page) {
1432                 unsigned len = min_t(int, ds, PAGE_SIZE);
1433                 data = kmap(page);
1434                 err = drbd_recv_all_warn(mdev->tconn, data, len);
1435                 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1436                         dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1437                         data[0] = data[0] ^ (unsigned long)-1;
1438                 }
1439                 kunmap(page);
1440                 if (err) {
1441                         drbd_free_peer_req(mdev, peer_req);
1442                         return NULL;
1443                 }
1444                 ds -= len;
1445         }
1446
1447         if (dgs) {
1448                 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1449                 if (memcmp(dig_in, dig_vv, dgs)) {
1450                         dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1451                                 (unsigned long long)sector, data_size);
1452                         drbd_free_peer_req(mdev, peer_req);
1453                         return NULL;
1454                 }
1455         }
1456         mdev->recv_cnt += data_size>>9;
1457         return peer_req;
1458 }
1459
1460 /* drbd_drain_block() just takes a data block
1461  * out of the socket input buffer, and discards it.
1462  */
1463 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1464 {
1465         struct page *page;
1466         int err = 0;
1467         void *data;
1468
1469         if (!data_size)
1470                 return 0;
1471
1472         page = drbd_alloc_pages(mdev, 1, 1);
1473
1474         data = kmap(page);
1475         while (data_size) {
1476                 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1477
1478                 err = drbd_recv_all_warn(mdev->tconn, data, len);
1479                 if (err)
1480                         break;
1481                 data_size -= len;
1482         }
1483         kunmap(page);
1484         drbd_free_pages(mdev, page, 0);
1485         return err;
1486 }
1487
1488 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1489                            sector_t sector, int data_size)
1490 {
1491         struct bio_vec *bvec;
1492         struct bio *bio;
1493         int dgs, err, i, expect;
1494         void *dig_in = mdev->tconn->int_dig_in;
1495         void *dig_vv = mdev->tconn->int_dig_vv;
1496
1497         dgs = 0;
1498         if (mdev->tconn->peer_integrity_tfm) {
1499                 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1500                 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1501                 if (err)
1502                         return err;
1503                 data_size -= dgs;
1504         }
1505
1506         /* optimistically update recv_cnt.  if receiving fails below,
1507          * we disconnect anyways, and counters will be reset. */
1508         mdev->recv_cnt += data_size>>9;
1509
1510         bio = req->master_bio;
1511         D_ASSERT(sector == bio->bi_sector);
1512
1513         bio_for_each_segment(bvec, bio, i) {
1514                 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1515                 expect = min_t(int, data_size, bvec->bv_len);
1516                 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1517                 kunmap(bvec->bv_page);
1518                 if (err)
1519                         return err;
1520                 data_size -= expect;
1521         }
1522
1523         if (dgs) {
1524                 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1525                 if (memcmp(dig_in, dig_vv, dgs)) {
1526                         dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1527                         return -EINVAL;
1528                 }
1529         }
1530
1531         D_ASSERT(data_size == 0);
1532         return 0;
1533 }
1534
1535 /*
1536  * e_end_resync_block() is called in asender context via
1537  * drbd_finish_peer_reqs().
1538  */
1539 static int e_end_resync_block(struct drbd_work *w, int unused)
1540 {
1541         struct drbd_peer_request *peer_req =
1542                 container_of(w, struct drbd_peer_request, w);
1543         struct drbd_conf *mdev = w->mdev;
1544         sector_t sector = peer_req->i.sector;
1545         int err;
1546
1547         D_ASSERT(drbd_interval_empty(&peer_req->i));
1548
1549         if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1550                 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1551                 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1552         } else {
1553                 /* Record failure to sync */
1554                 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1555
1556                 err  = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1557         }
1558         dec_unacked(mdev);
1559
1560         return err;
1561 }
1562
1563 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1564 {
1565         struct drbd_peer_request *peer_req;
1566
1567         peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1568         if (!peer_req)
1569                 goto fail;
1570
1571         dec_rs_pending(mdev);
1572
1573         inc_unacked(mdev);
1574         /* corresponding dec_unacked() in e_end_resync_block()
1575          * respective _drbd_clear_done_ee */
1576
1577         peer_req->w.cb = e_end_resync_block;
1578
1579         spin_lock_irq(&mdev->tconn->req_lock);
1580         list_add(&peer_req->w.list, &mdev->sync_ee);
1581         spin_unlock_irq(&mdev->tconn->req_lock);
1582
1583         atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1584         if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1585                 return 0;
1586
1587         /* don't care for the reason here */
1588         dev_err(DEV, "submit failed, triggering re-connect\n");
1589         spin_lock_irq(&mdev->tconn->req_lock);
1590         list_del(&peer_req->w.list);
1591         spin_unlock_irq(&mdev->tconn->req_lock);
1592
1593         drbd_free_peer_req(mdev, peer_req);
1594 fail:
1595         put_ldev(mdev);
1596         return -EIO;
1597 }
1598
1599 static struct drbd_request *
1600 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1601              sector_t sector, bool missing_ok, const char *func)
1602 {
1603         struct drbd_request *req;
1604
1605         /* Request object according to our peer */
1606         req = (struct drbd_request *)(unsigned long)id;
1607         if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1608                 return req;
1609         if (!missing_ok) {
1610                 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1611                         (unsigned long)id, (unsigned long long)sector);
1612         }
1613         return NULL;
1614 }
1615
1616 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1617 {
1618         struct drbd_conf *mdev;
1619         struct drbd_request *req;
1620         sector_t sector;
1621         int err;
1622         struct p_data *p = pi->data;
1623
1624         mdev = vnr_to_mdev(tconn, pi->vnr);
1625         if (!mdev)
1626                 return -EIO;
1627
1628         sector = be64_to_cpu(p->sector);
1629
1630         spin_lock_irq(&mdev->tconn->req_lock);
1631         req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1632         spin_unlock_irq(&mdev->tconn->req_lock);
1633         if (unlikely(!req))
1634                 return -EIO;
1635
1636         /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1637          * special casing it there for the various failure cases.
1638          * still no race with drbd_fail_pending_reads */
1639         err = recv_dless_read(mdev, req, sector, pi->size);
1640         if (!err)
1641                 req_mod(req, DATA_RECEIVED);
1642         /* else: nothing. handled from drbd_disconnect...
1643          * I don't think we may complete this just yet
1644          * in case we are "on-disconnect: freeze" */
1645
1646         return err;
1647 }
1648
1649 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1650 {
1651         struct drbd_conf *mdev;
1652         sector_t sector;
1653         int err;
1654         struct p_data *p = pi->data;
1655
1656         mdev = vnr_to_mdev(tconn, pi->vnr);
1657         if (!mdev)
1658                 return -EIO;
1659
1660         sector = be64_to_cpu(p->sector);
1661         D_ASSERT(p->block_id == ID_SYNCER);
1662
1663         if (get_ldev(mdev)) {
1664                 /* data is submitted to disk within recv_resync_read.
1665                  * corresponding put_ldev done below on error,
1666                  * or in drbd_peer_request_endio. */
1667                 err = recv_resync_read(mdev, sector, pi->size);
1668         } else {
1669                 if (__ratelimit(&drbd_ratelimit_state))
1670                         dev_err(DEV, "Can not write resync data to local disk.\n");
1671
1672                 err = drbd_drain_block(mdev, pi->size);
1673
1674                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1675         }
1676
1677         atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1678
1679         return err;
1680 }
1681
1682 static int w_restart_write(struct drbd_work *w, int cancel)
1683 {
1684         struct drbd_request *req = container_of(w, struct drbd_request, w);
1685         struct drbd_conf *mdev = w->mdev;
1686         struct bio *bio;
1687         unsigned long start_time;
1688         unsigned long flags;
1689
1690         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1691         if (!expect(req->rq_state & RQ_POSTPONED)) {
1692                 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1693                 return -EIO;
1694         }
1695         bio = req->master_bio;
1696         start_time = req->start_time;
1697         /* Postponed requests will not have their master_bio completed!  */
1698         __req_mod(req, DISCARD_WRITE, NULL);
1699         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1700
1701         while (__drbd_make_request(mdev, bio, start_time))
1702                 /* retry */ ;
1703         return 0;
1704 }
1705
1706 static void restart_conflicting_writes(struct drbd_conf *mdev,
1707                                        sector_t sector, int size)
1708 {
1709         struct drbd_interval *i;
1710         struct drbd_request *req;
1711
1712         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1713                 if (!i->local)
1714                         continue;
1715                 req = container_of(i, struct drbd_request, i);
1716                 if (req->rq_state & RQ_LOCAL_PENDING ||
1717                     !(req->rq_state & RQ_POSTPONED))
1718                         continue;
1719                 if (expect(list_empty(&req->w.list))) {
1720                         req->w.mdev = mdev;
1721                         req->w.cb = w_restart_write;
1722                         drbd_queue_work(&mdev->tconn->data.work, &req->w);
1723                 }
1724         }
1725 }
1726
1727 /*
1728  * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1729  */
1730 static int e_end_block(struct drbd_work *w, int cancel)
1731 {
1732         struct drbd_peer_request *peer_req =
1733                 container_of(w, struct drbd_peer_request, w);
1734         struct drbd_conf *mdev = w->mdev;
1735         sector_t sector = peer_req->i.sector;
1736         int err = 0, pcmd;
1737
1738         if (peer_req->flags & EE_SEND_WRITE_ACK) {
1739                 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1740                         pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1741                                 mdev->state.conn <= C_PAUSED_SYNC_T &&
1742                                 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1743                                 P_RS_WRITE_ACK : P_WRITE_ACK;
1744                         err = drbd_send_ack(mdev, pcmd, peer_req);
1745                         if (pcmd == P_RS_WRITE_ACK)
1746                                 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1747                 } else {
1748                         err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1749                         /* we expect it to be marked out of sync anyways...
1750                          * maybe assert this?  */
1751                 }
1752                 dec_unacked(mdev);
1753         }
1754         /* we delete from the conflict detection hash _after_ we sent out the
1755          * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1756         if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1757                 spin_lock_irq(&mdev->tconn->req_lock);
1758                 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1759                 drbd_remove_epoch_entry_interval(mdev, peer_req);
1760                 if (peer_req->flags & EE_RESTART_REQUESTS)
1761                         restart_conflicting_writes(mdev, sector, peer_req->i.size);
1762                 spin_unlock_irq(&mdev->tconn->req_lock);
1763         } else
1764                 D_ASSERT(drbd_interval_empty(&peer_req->i));
1765
1766         drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1767
1768         return err;
1769 }
1770
1771 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1772 {
1773         struct drbd_conf *mdev = w->mdev;
1774         struct drbd_peer_request *peer_req =
1775                 container_of(w, struct drbd_peer_request, w);
1776         int err;
1777
1778         err = drbd_send_ack(mdev, ack, peer_req);
1779         dec_unacked(mdev);
1780
1781         return err;
1782 }
1783
1784 static int e_send_discard_write(struct drbd_work *w, int unused)
1785 {
1786         return e_send_ack(w, P_DISCARD_WRITE);
1787 }
1788
1789 static int e_send_retry_write(struct drbd_work *w, int unused)
1790 {
1791         struct drbd_tconn *tconn = w->mdev->tconn;
1792
1793         return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1794                              P_RETRY_WRITE : P_DISCARD_WRITE);
1795 }
1796
1797 static bool seq_greater(u32 a, u32 b)
1798 {
1799         /*
1800          * We assume 32-bit wrap-around here.
1801          * For 24-bit wrap-around, we would have to shift:
1802          *  a <<= 8; b <<= 8;
1803          */
1804         return (s32)a - (s32)b > 0;
1805 }
1806
1807 static u32 seq_max(u32 a, u32 b)
1808 {
1809         return seq_greater(a, b) ? a : b;
1810 }
1811
1812 static bool need_peer_seq(struct drbd_conf *mdev)
1813 {
1814         struct drbd_tconn *tconn = mdev->tconn;
1815         int tp;
1816
1817         /*
1818          * We only need to keep track of the last packet_seq number of our peer
1819          * if we are in dual-primary mode and we have the discard flag set; see
1820          * handle_write_conflicts().
1821          */
1822
1823         rcu_read_lock();
1824         tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1825         rcu_read_unlock();
1826
1827         return tp && test_bit(DISCARD_CONCURRENT, &tconn->flags);
1828 }
1829
1830 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1831 {
1832         unsigned int newest_peer_seq;
1833
1834         if (need_peer_seq(mdev)) {
1835                 spin_lock(&mdev->peer_seq_lock);
1836                 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1837                 mdev->peer_seq = newest_peer_seq;
1838                 spin_unlock(&mdev->peer_seq_lock);
1839                 /* wake up only if we actually changed mdev->peer_seq */
1840                 if (peer_seq == newest_peer_seq)
1841                         wake_up(&mdev->seq_wait);
1842         }
1843 }
1844
1845 /* Called from receive_Data.
1846  * Synchronize packets on sock with packets on msock.
1847  *
1848  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1849  * packet traveling on msock, they are still processed in the order they have
1850  * been sent.
1851  *
1852  * Note: we don't care for Ack packets overtaking P_DATA packets.
1853  *
1854  * In case packet_seq is larger than mdev->peer_seq number, there are
1855  * outstanding packets on the msock. We wait for them to arrive.
1856  * In case we are the logically next packet, we update mdev->peer_seq
1857  * ourselves. Correctly handles 32bit wrap around.
1858  *
1859  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1860  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1861  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1862  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1863  *
1864  * returns 0 if we may process the packet,
1865  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1866 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1867 {
1868         DEFINE_WAIT(wait);
1869         long timeout;
1870         int ret;
1871
1872         if (!need_peer_seq(mdev))
1873                 return 0;
1874
1875         spin_lock(&mdev->peer_seq_lock);
1876         for (;;) {
1877                 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1878                         mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1879                         ret = 0;
1880                         break;
1881                 }
1882                 if (signal_pending(current)) {
1883                         ret = -ERESTARTSYS;
1884                         break;
1885                 }
1886                 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1887                 spin_unlock(&mdev->peer_seq_lock);
1888                 rcu_read_lock();
1889                 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1890                 rcu_read_unlock();
1891                 timeout = schedule_timeout(timeout);
1892                 spin_lock(&mdev->peer_seq_lock);
1893                 if (!timeout) {
1894                         ret = -ETIMEDOUT;
1895                         dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
1896                         break;
1897                 }
1898         }
1899         spin_unlock(&mdev->peer_seq_lock);
1900         finish_wait(&mdev->seq_wait, &wait);
1901         return ret;
1902 }
1903
1904 /* see also bio_flags_to_wire()
1905  * DRBD_REQ_*, because we need to semantically map the flags to data packet
1906  * flags and back. We may replicate to other kernel versions. */
1907 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1908 {
1909         return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1910                 (dpf & DP_FUA ? REQ_FUA : 0) |
1911                 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1912                 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1913 }
1914
1915 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
1916                                     unsigned int size)
1917 {
1918         struct drbd_interval *i;
1919
1920     repeat:
1921         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1922                 struct drbd_request *req;
1923                 struct bio_and_error m;
1924
1925                 if (!i->local)
1926                         continue;
1927                 req = container_of(i, struct drbd_request, i);
1928                 if (!(req->rq_state & RQ_POSTPONED))
1929                         continue;
1930                 req->rq_state &= ~RQ_POSTPONED;
1931                 __req_mod(req, NEG_ACKED, &m);
1932                 spin_unlock_irq(&mdev->tconn->req_lock);
1933                 if (m.bio)
1934                         complete_master_bio(mdev, &m);
1935                 spin_lock_irq(&mdev->tconn->req_lock);
1936                 goto repeat;
1937         }
1938 }
1939
1940 static int handle_write_conflicts(struct drbd_conf *mdev,
1941                                   struct drbd_peer_request *peer_req)
1942 {
1943         struct drbd_tconn *tconn = mdev->tconn;
1944         bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
1945         sector_t sector = peer_req->i.sector;
1946         const unsigned int size = peer_req->i.size;
1947         struct drbd_interval *i;
1948         bool equal;
1949         int err;
1950
1951         /*
1952          * Inserting the peer request into the write_requests tree will prevent
1953          * new conflicting local requests from being added.
1954          */
1955         drbd_insert_interval(&mdev->write_requests, &peer_req->i);
1956
1957     repeat:
1958         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1959                 if (i == &peer_req->i)
1960                         continue;
1961
1962                 if (!i->local) {
1963                         /*
1964                          * Our peer has sent a conflicting remote request; this
1965                          * should not happen in a two-node setup.  Wait for the
1966                          * earlier peer request to complete.
1967                          */
1968                         err = drbd_wait_misc(mdev, i);
1969                         if (err)
1970                                 goto out;
1971                         goto repeat;
1972                 }
1973
1974                 equal = i->sector == sector && i->size == size;
1975                 if (resolve_conflicts) {
1976                         /*
1977                          * If the peer request is fully contained within the
1978                          * overlapping request, it can be discarded; otherwise,
1979                          * it will be retried once all overlapping requests
1980                          * have completed.
1981                          */
1982                         bool discard = i->sector <= sector && i->sector +
1983                                        (i->size >> 9) >= sector + (size >> 9);
1984
1985                         if (!equal)
1986                                 dev_alert(DEV, "Concurrent writes detected: "
1987                                                "local=%llus +%u, remote=%llus +%u, "
1988                                                "assuming %s came first\n",
1989                                           (unsigned long long)i->sector, i->size,
1990                                           (unsigned long long)sector, size,
1991                                           discard ? "local" : "remote");
1992
1993                         inc_unacked(mdev);
1994                         peer_req->w.cb = discard ? e_send_discard_write :
1995                                                    e_send_retry_write;
1996                         list_add_tail(&peer_req->w.list, &mdev->done_ee);
1997                         wake_asender(mdev->tconn);
1998
1999                         err = -ENOENT;
2000                         goto out;
2001                 } else {
2002                         struct drbd_request *req =
2003                                 container_of(i, struct drbd_request, i);
2004
2005                         if (!equal)
2006                                 dev_alert(DEV, "Concurrent writes detected: "
2007                                                "local=%llus +%u, remote=%llus +%u\n",
2008                                           (unsigned long long)i->sector, i->size,
2009                                           (unsigned long long)sector, size);
2010
2011                         if (req->rq_state & RQ_LOCAL_PENDING ||
2012                             !(req->rq_state & RQ_POSTPONED)) {
2013                                 /*
2014                                  * Wait for the node with the discard flag to
2015                                  * decide if this request will be discarded or
2016                                  * retried.  Requests that are discarded will
2017                                  * disappear from the write_requests tree.
2018                                  *
2019                                  * In addition, wait for the conflicting
2020                                  * request to finish locally before submitting
2021                                  * the conflicting peer request.
2022                                  */
2023                                 err = drbd_wait_misc(mdev, &req->i);
2024                                 if (err) {
2025                                         _conn_request_state(mdev->tconn,
2026                                                             NS(conn, C_TIMEOUT),
2027                                                             CS_HARD);
2028                                         fail_postponed_requests(mdev, sector, size);
2029                                         goto out;
2030                                 }
2031                                 goto repeat;
2032                         }
2033                         /*
2034                          * Remember to restart the conflicting requests after
2035                          * the new peer request has completed.
2036                          */
2037                         peer_req->flags |= EE_RESTART_REQUESTS;
2038                 }
2039         }
2040         err = 0;
2041
2042     out:
2043         if (err)
2044                 drbd_remove_epoch_entry_interval(mdev, peer_req);
2045         return err;
2046 }
2047
2048 /* mirrored write */
2049 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2050 {
2051         struct drbd_conf *mdev;
2052         sector_t sector;
2053         struct drbd_peer_request *peer_req;
2054         struct p_data *p = pi->data;
2055         u32 peer_seq = be32_to_cpu(p->seq_num);
2056         int rw = WRITE;
2057         u32 dp_flags;
2058         int err, tp;
2059
2060         mdev = vnr_to_mdev(tconn, pi->vnr);
2061         if (!mdev)
2062                 return -EIO;
2063
2064         if (!get_ldev(mdev)) {
2065                 int err2;
2066
2067                 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2068                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2069                 atomic_inc(&mdev->current_epoch->epoch_size);
2070                 err2 = drbd_drain_block(mdev, pi->size);
2071                 if (!err)
2072                         err = err2;
2073                 return err;
2074         }
2075
2076         /*
2077          * Corresponding put_ldev done either below (on various errors), or in
2078          * drbd_peer_request_endio, if we successfully submit the data at the
2079          * end of this function.
2080          */
2081
2082         sector = be64_to_cpu(p->sector);
2083         peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2084         if (!peer_req) {
2085                 put_ldev(mdev);
2086                 return -EIO;
2087         }
2088
2089         peer_req->w.cb = e_end_block;
2090
2091         dp_flags = be32_to_cpu(p->dp_flags);
2092         rw |= wire_flags_to_bio(mdev, dp_flags);
2093
2094         if (dp_flags & DP_MAY_SET_IN_SYNC)
2095                 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2096
2097         spin_lock(&mdev->epoch_lock);
2098         peer_req->epoch = mdev->current_epoch;
2099         atomic_inc(&peer_req->epoch->epoch_size);
2100         atomic_inc(&peer_req->epoch->active);
2101         spin_unlock(&mdev->epoch_lock);
2102
2103         rcu_read_lock();
2104         tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2105         rcu_read_unlock();
2106         if (tp) {
2107                 peer_req->flags |= EE_IN_INTERVAL_TREE;
2108                 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2109                 if (err)
2110                         goto out_interrupted;
2111                 spin_lock_irq(&mdev->tconn->req_lock);
2112                 err = handle_write_conflicts(mdev, peer_req);
2113                 if (err) {
2114                         spin_unlock_irq(&mdev->tconn->req_lock);
2115                         if (err == -ENOENT) {
2116                                 put_ldev(mdev);
2117                                 return 0;
2118                         }
2119                         goto out_interrupted;
2120                 }
2121         } else
2122                 spin_lock_irq(&mdev->tconn->req_lock);
2123         list_add(&peer_req->w.list, &mdev->active_ee);
2124         spin_unlock_irq(&mdev->tconn->req_lock);
2125
2126         if (mdev->tconn->agreed_pro_version < 100) {
2127                 rcu_read_lock();
2128                 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2129                 case DRBD_PROT_C:
2130                         dp_flags |= DP_SEND_WRITE_ACK;
2131                         break;
2132                 case DRBD_PROT_B:
2133                         dp_flags |= DP_SEND_RECEIVE_ACK;
2134                         break;
2135                 }
2136                 rcu_read_unlock();
2137         }
2138
2139         if (dp_flags & DP_SEND_WRITE_ACK) {
2140                 peer_req->flags |= EE_SEND_WRITE_ACK;
2141                 inc_unacked(mdev);
2142                 /* corresponding dec_unacked() in e_end_block()
2143                  * respective _drbd_clear_done_ee */
2144         }
2145
2146         if (dp_flags & DP_SEND_RECEIVE_ACK) {
2147                 /* I really don't like it that the receiver thread
2148                  * sends on the msock, but anyways */
2149                 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2150         }
2151
2152         if (mdev->state.pdsk < D_INCONSISTENT) {
2153                 /* In case we have the only disk of the cluster, */
2154                 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2155                 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2156                 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2157                 drbd_al_begin_io(mdev, &peer_req->i);
2158         }
2159
2160         err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2161         if (!err)
2162                 return 0;
2163
2164         /* don't care for the reason here */
2165         dev_err(DEV, "submit failed, triggering re-connect\n");
2166         spin_lock_irq(&mdev->tconn->req_lock);
2167         list_del(&peer_req->w.list);
2168         drbd_remove_epoch_entry_interval(mdev, peer_req);
2169         spin_unlock_irq(&mdev->tconn->req_lock);
2170         if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2171                 drbd_al_complete_io(mdev, &peer_req->i);
2172
2173 out_interrupted:
2174         drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
2175         put_ldev(mdev);
2176         drbd_free_peer_req(mdev, peer_req);
2177         return err;
2178 }
2179
2180 /* We may throttle resync, if the lower device seems to be busy,
2181  * and current sync rate is above c_min_rate.
2182  *
2183  * To decide whether or not the lower device is busy, we use a scheme similar
2184  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2185  * (more than 64 sectors) of activity we cannot account for with our own resync
2186  * activity, it obviously is "busy".
2187  *
2188  * The current sync rate used here uses only the most recent two step marks,
2189  * to have a short time average so we can react faster.
2190  */
2191 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2192 {
2193         struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2194         unsigned long db, dt, dbdt;
2195         struct lc_element *tmp;
2196         int curr_events;
2197         int throttle = 0;
2198         unsigned int c_min_rate;
2199
2200         rcu_read_lock();
2201         c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2202         rcu_read_unlock();
2203
2204         /* feature disabled? */
2205         if (c_min_rate == 0)
2206                 return 0;
2207
2208         spin_lock_irq(&mdev->al_lock);
2209         tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2210         if (tmp) {
2211                 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2212                 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2213                         spin_unlock_irq(&mdev->al_lock);
2214                         return 0;
2215                 }
2216                 /* Do not slow down if app IO is already waiting for this extent */
2217         }
2218         spin_unlock_irq(&mdev->al_lock);
2219
2220         curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2221                       (int)part_stat_read(&disk->part0, sectors[1]) -
2222                         atomic_read(&mdev->rs_sect_ev);
2223
2224         if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2225                 unsigned long rs_left;
2226                 int i;
2227
2228                 mdev->rs_last_events = curr_events;
2229
2230                 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2231                  * approx. */
2232                 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2233
2234                 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2235                         rs_left = mdev->ov_left;
2236                 else
2237                         rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2238
2239                 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2240                 if (!dt)
2241                         dt++;
2242                 db = mdev->rs_mark_left[i] - rs_left;
2243                 dbdt = Bit2KB(db/dt);
2244
2245                 if (dbdt > c_min_rate)
2246                         throttle = 1;
2247         }
2248         return throttle;
2249 }
2250
2251
2252 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2253 {
2254         struct drbd_conf *mdev;
2255         sector_t sector;
2256         sector_t capacity;
2257         struct drbd_peer_request *peer_req;
2258         struct digest_info *di = NULL;
2259         int size, verb;
2260         unsigned int fault_type;
2261         struct p_block_req *p = pi->data;
2262
2263         mdev = vnr_to_mdev(tconn, pi->vnr);
2264         if (!mdev)
2265                 return -EIO;
2266         capacity = drbd_get_capacity(mdev->this_bdev);
2267
2268         sector = be64_to_cpu(p->sector);
2269         size   = be32_to_cpu(p->blksize);
2270
2271         if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2272                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2273                                 (unsigned long long)sector, size);
2274                 return -EINVAL;
2275         }
2276         if (sector + (size>>9) > capacity) {
2277                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2278                                 (unsigned long long)sector, size);
2279                 return -EINVAL;
2280         }
2281
2282         if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2283                 verb = 1;
2284                 switch (pi->cmd) {
2285                 case P_DATA_REQUEST:
2286                         drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2287                         break;
2288                 case P_RS_DATA_REQUEST:
2289                 case P_CSUM_RS_REQUEST:
2290                 case P_OV_REQUEST:
2291                         drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2292                         break;
2293                 case P_OV_REPLY:
2294                         verb = 0;
2295                         dec_rs_pending(mdev);
2296                         drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2297                         break;
2298                 default:
2299                         BUG();
2300                 }
2301                 if (verb && __ratelimit(&drbd_ratelimit_state))
2302                         dev_err(DEV, "Can not satisfy peer's read request, "
2303                             "no local data.\n");
2304
2305                 /* drain possibly payload */
2306                 return drbd_drain_block(mdev, pi->size);
2307         }
2308
2309         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2310          * "criss-cross" setup, that might cause write-out on some other DRBD,
2311          * which in turn might block on the other node at this very place.  */
2312         peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2313         if (!peer_req) {
2314                 put_ldev(mdev);
2315                 return -ENOMEM;
2316         }
2317
2318         switch (pi->cmd) {
2319         case P_DATA_REQUEST:
2320                 peer_req->w.cb = w_e_end_data_req;
2321                 fault_type = DRBD_FAULT_DT_RD;
2322                 /* application IO, don't drbd_rs_begin_io */
2323                 goto submit;
2324
2325         case P_RS_DATA_REQUEST:
2326                 peer_req->w.cb = w_e_end_rsdata_req;
2327                 fault_type = DRBD_FAULT_RS_RD;
2328                 /* used in the sector offset progress display */
2329                 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2330                 break;
2331
2332         case P_OV_REPLY:
2333         case P_CSUM_RS_REQUEST:
2334                 fault_type = DRBD_FAULT_RS_RD;
2335                 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2336                 if (!di)
2337                         goto out_free_e;
2338
2339                 di->digest_size = pi->size;
2340                 di->digest = (((char *)di)+sizeof(struct digest_info));
2341
2342                 peer_req->digest = di;
2343                 peer_req->flags |= EE_HAS_DIGEST;
2344
2345                 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2346                         goto out_free_e;
2347
2348                 if (pi->cmd == P_CSUM_RS_REQUEST) {
2349                         D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2350                         peer_req->w.cb = w_e_end_csum_rs_req;
2351                         /* used in the sector offset progress display */
2352                         mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2353                 } else if (pi->cmd == P_OV_REPLY) {
2354                         /* track progress, we may need to throttle */
2355                         atomic_add(size >> 9, &mdev->rs_sect_in);
2356                         peer_req->w.cb = w_e_end_ov_reply;
2357                         dec_rs_pending(mdev);
2358                         /* drbd_rs_begin_io done when we sent this request,
2359                          * but accounting still needs to be done. */
2360                         goto submit_for_resync;
2361                 }
2362                 break;
2363
2364         case P_OV_REQUEST:
2365                 if (mdev->ov_start_sector == ~(sector_t)0 &&
2366                     mdev->tconn->agreed_pro_version >= 90) {
2367                         unsigned long now = jiffies;
2368                         int i;
2369                         mdev->ov_start_sector = sector;
2370                         mdev->ov_position = sector;
2371                         mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2372                         mdev->rs_total = mdev->ov_left;
2373                         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2374                                 mdev->rs_mark_left[i] = mdev->ov_left;
2375                                 mdev->rs_mark_time[i] = now;
2376                         }
2377                         dev_info(DEV, "Online Verify start sector: %llu\n",
2378                                         (unsigned long long)sector);
2379                 }
2380                 peer_req->w.cb = w_e_end_ov_req;
2381                 fault_type = DRBD_FAULT_RS_RD;
2382                 break;
2383
2384         default:
2385                 BUG();
2386         }
2387
2388         /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2389          * wrt the receiver, but it is not as straightforward as it may seem.
2390          * Various places in the resync start and stop logic assume resync
2391          * requests are processed in order, requeuing this on the worker thread
2392          * introduces a bunch of new code for synchronization between threads.
2393          *
2394          * Unlimited throttling before drbd_rs_begin_io may stall the resync
2395          * "forever", throttling after drbd_rs_begin_io will lock that extent
2396          * for application writes for the same time.  For now, just throttle
2397          * here, where the rest of the code expects the receiver to sleep for
2398          * a while, anyways.
2399          */
2400
2401         /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2402          * this defers syncer requests for some time, before letting at least
2403          * on request through.  The resync controller on the receiving side
2404          * will adapt to the incoming rate accordingly.
2405          *
2406          * We cannot throttle here if remote is Primary/SyncTarget:
2407          * we would also throttle its application reads.
2408          * In that case, throttling is done on the SyncTarget only.
2409          */
2410         if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2411                 schedule_timeout_uninterruptible(HZ/10);
2412         if (drbd_rs_begin_io(mdev, sector))
2413                 goto out_free_e;
2414
2415 submit_for_resync:
2416         atomic_add(size >> 9, &mdev->rs_sect_ev);
2417
2418 submit:
2419         inc_unacked(mdev);
2420         spin_lock_irq(&mdev->tconn->req_lock);
2421         list_add_tail(&peer_req->w.list, &mdev->read_ee);
2422         spin_unlock_irq(&mdev->tconn->req_lock);
2423
2424         if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2425                 return 0;
2426
2427         /* don't care for the reason here */
2428         dev_err(DEV, "submit failed, triggering re-connect\n");
2429         spin_lock_irq(&mdev->tconn->req_lock);
2430         list_del(&peer_req->w.list);
2431         spin_unlock_irq(&mdev->tconn->req_lock);
2432         /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2433
2434 out_free_e:
2435         put_ldev(mdev);
2436         drbd_free_peer_req(mdev, peer_req);
2437         return -EIO;
2438 }
2439
2440 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2441 {
2442         int self, peer, rv = -100;
2443         unsigned long ch_self, ch_peer;
2444         enum drbd_after_sb_p after_sb_0p;
2445
2446         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2447         peer = mdev->p_uuid[UI_BITMAP] & 1;
2448
2449         ch_peer = mdev->p_uuid[UI_SIZE];
2450         ch_self = mdev->comm_bm_set;
2451
2452         rcu_read_lock();
2453         after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2454         rcu_read_unlock();
2455         switch (after_sb_0p) {
2456         case ASB_CONSENSUS:
2457         case ASB_DISCARD_SECONDARY:
2458         case ASB_CALL_HELPER:
2459         case ASB_VIOLENTLY:
2460                 dev_err(DEV, "Configuration error.\n");
2461                 break;
2462         case ASB_DISCONNECT:
2463                 break;
2464         case ASB_DISCARD_YOUNGER_PRI:
2465                 if (self == 0 && peer == 1) {
2466                         rv = -1;
2467                         break;
2468                 }
2469                 if (self == 1 && peer == 0) {
2470                         rv =  1;
2471                         break;
2472                 }
2473                 /* Else fall through to one of the other strategies... */
2474         case ASB_DISCARD_OLDER_PRI:
2475                 if (self == 0 && peer == 1) {
2476                         rv = 1;
2477                         break;
2478                 }
2479                 if (self == 1 && peer == 0) {
2480                         rv = -1;
2481                         break;
2482                 }
2483                 /* Else fall through to one of the other strategies... */
2484                 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2485                      "Using discard-least-changes instead\n");
2486         case ASB_DISCARD_ZERO_CHG:
2487                 if (ch_peer == 0 && ch_self == 0) {
2488                         rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2489                                 ? -1 : 1;
2490                         break;
2491                 } else {
2492                         if (ch_peer == 0) { rv =  1; break; }
2493                         if (ch_self == 0) { rv = -1; break; }
2494                 }
2495                 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2496                         break;
2497         case ASB_DISCARD_LEAST_CHG:
2498                 if      (ch_self < ch_peer)
2499                         rv = -1;
2500                 else if (ch_self > ch_peer)
2501                         rv =  1;
2502                 else /* ( ch_self == ch_peer ) */
2503                      /* Well, then use something else. */
2504                         rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2505                                 ? -1 : 1;
2506                 break;
2507         case ASB_DISCARD_LOCAL:
2508                 rv = -1;
2509                 break;
2510         case ASB_DISCARD_REMOTE:
2511                 rv =  1;
2512         }
2513
2514         return rv;
2515 }
2516
2517 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2518 {
2519         int hg, rv = -100;
2520         enum drbd_after_sb_p after_sb_1p;
2521
2522         rcu_read_lock();
2523         after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2524         rcu_read_unlock();
2525         switch (after_sb_1p) {
2526         case ASB_DISCARD_YOUNGER_PRI:
2527         case ASB_DISCARD_OLDER_PRI:
2528         case ASB_DISCARD_LEAST_CHG:
2529         case ASB_DISCARD_LOCAL:
2530         case ASB_DISCARD_REMOTE:
2531         case ASB_DISCARD_ZERO_CHG:
2532                 dev_err(DEV, "Configuration error.\n");
2533                 break;
2534         case ASB_DISCONNECT:
2535                 break;
2536         case ASB_CONSENSUS:
2537                 hg = drbd_asb_recover_0p(mdev);
2538                 if (hg == -1 && mdev->state.role == R_SECONDARY)
2539                         rv = hg;
2540                 if (hg == 1  && mdev->state.role == R_PRIMARY)
2541                         rv = hg;
2542                 break;
2543         case ASB_VIOLENTLY:
2544                 rv = drbd_asb_recover_0p(mdev);
2545                 break;
2546         case ASB_DISCARD_SECONDARY:
2547                 return mdev->state.role == R_PRIMARY ? 1 : -1;
2548         case ASB_CALL_HELPER:
2549                 hg = drbd_asb_recover_0p(mdev);
2550                 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2551                         enum drbd_state_rv rv2;
2552
2553                         drbd_set_role(mdev, R_SECONDARY, 0);
2554                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2555                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2556                           * we do not need to wait for the after state change work either. */
2557                         rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2558                         if (rv2 != SS_SUCCESS) {
2559                                 drbd_khelper(mdev, "pri-lost-after-sb");
2560                         } else {
2561                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2562                                 rv = hg;
2563                         }
2564                 } else
2565                         rv = hg;
2566         }
2567
2568         return rv;
2569 }
2570
2571 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2572 {
2573         int hg, rv = -100;
2574         enum drbd_after_sb_p after_sb_2p;
2575
2576         rcu_read_lock();
2577         after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2578         rcu_read_unlock();
2579         switch (after_sb_2p) {
2580         case ASB_DISCARD_YOUNGER_PRI:
2581         case ASB_DISCARD_OLDER_PRI:
2582         case ASB_DISCARD_LEAST_CHG:
2583         case ASB_DISCARD_LOCAL:
2584         case ASB_DISCARD_REMOTE:
2585         case ASB_CONSENSUS:
2586         case ASB_DISCARD_SECONDARY:
2587         case ASB_DISCARD_ZERO_CHG:
2588                 dev_err(DEV, "Configuration error.\n");
2589                 break;
2590         case ASB_VIOLENTLY:
2591                 rv = drbd_asb_recover_0p(mdev);
2592                 break;
2593         case ASB_DISCONNECT:
2594                 break;
2595         case ASB_CALL_HELPER:
2596                 hg = drbd_asb_recover_0p(mdev);
2597                 if (hg == -1) {
2598                         enum drbd_state_rv rv2;
2599
2600                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2601                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2602                           * we do not need to wait for the after state change work either. */
2603                         rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2604                         if (rv2 != SS_SUCCESS) {
2605                                 drbd_khelper(mdev, "pri-lost-after-sb");
2606                         } else {
2607                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2608                                 rv = hg;
2609                         }
2610                 } else
2611                         rv = hg;
2612         }
2613
2614         return rv;
2615 }
2616
2617 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2618                            u64 bits, u64 flags)
2619 {
2620         if (!uuid) {
2621                 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2622                 return;
2623         }
2624         dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2625              text,
2626              (unsigned long long)uuid[UI_CURRENT],
2627              (unsigned long long)uuid[UI_BITMAP],
2628              (unsigned long long)uuid[UI_HISTORY_START],
2629              (unsigned long long)uuid[UI_HISTORY_END],
2630              (unsigned long long)bits,
2631              (unsigned long long)flags);
2632 }
2633
2634 /*
2635   100   after split brain try auto recover
2636     2   C_SYNC_SOURCE set BitMap
2637     1   C_SYNC_SOURCE use BitMap
2638     0   no Sync
2639    -1   C_SYNC_TARGET use BitMap
2640    -2   C_SYNC_TARGET set BitMap
2641  -100   after split brain, disconnect
2642 -1000   unrelated data
2643 -1091   requires proto 91
2644 -1096   requires proto 96
2645  */
2646 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2647 {
2648         u64 self, peer;
2649         int i, j;
2650
2651         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2652         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2653
2654         *rule_nr = 10;
2655         if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2656                 return 0;
2657
2658         *rule_nr = 20;
2659         if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2660              peer != UUID_JUST_CREATED)
2661                 return -2;
2662
2663         *rule_nr = 30;
2664         if (self != UUID_JUST_CREATED &&
2665             (peer == UUID_JUST_CREATED || peer == (u64)0))
2666                 return 2;
2667
2668         if (self == peer) {
2669                 int rct, dc; /* roles at crash time */
2670
2671                 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2672
2673                         if (mdev->tconn->agreed_pro_version < 91)
2674                                 return -1091;
2675
2676                         if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2677                             (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2678                                 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2679                                 drbd_uuid_set_bm(mdev, 0UL);
2680
2681                                 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2682                                                mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2683                                 *rule_nr = 34;
2684                         } else {
2685                                 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2686                                 *rule_nr = 36;
2687                         }
2688
2689                         return 1;
2690                 }
2691
2692                 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2693
2694                         if (mdev->tconn->agreed_pro_version < 91)
2695                                 return -1091;
2696
2697                         if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2698                             (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2699                                 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2700
2701                                 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2702                                 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2703                                 mdev->p_uuid[UI_BITMAP] = 0UL;
2704
2705                                 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2706                                 *rule_nr = 35;
2707                         } else {
2708                                 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2709                                 *rule_nr = 37;
2710                         }
2711
2712                         return -1;
2713                 }
2714
2715                 /* Common power [off|failure] */
2716                 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2717                         (mdev->p_uuid[UI_FLAGS] & 2);
2718                 /* lowest bit is set when we were primary,
2719                  * next bit (weight 2) is set when peer was primary */
2720                 *rule_nr = 40;
2721
2722                 switch (rct) {
2723                 case 0: /* !self_pri && !peer_pri */ return 0;
2724                 case 1: /*  self_pri && !peer_pri */ return 1;
2725                 case 2: /* !self_pri &&  peer_pri */ return -1;
2726                 case 3: /*  self_pri &&  peer_pri */
2727                         dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2728                         return dc ? -1 : 1;
2729                 }
2730         }
2731
2732         *rule_nr = 50;
2733         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2734         if (self == peer)
2735                 return -1;
2736
2737         *rule_nr = 51;
2738         peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2739         if (self == peer) {
2740                 if (mdev->tconn->agreed_pro_version < 96 ?
2741                     (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2742                     (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2743                     peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2744                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2745                            resync as sync source modifications of the peer's UUIDs. */
2746
2747                         if (mdev->tconn->agreed_pro_version < 91)
2748                                 return -1091;
2749
2750                         mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2751                         mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2752
2753                         dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2754                         drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2755
2756                         return -1;
2757                 }
2758         }
2759
2760         *rule_nr = 60;
2761         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2762         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2763                 peer = mdev->p_uuid[i] & ~((u64)1);
2764                 if (self == peer)
2765                         return -2;
2766         }
2767
2768         *rule_nr = 70;
2769         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2770         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2771         if (self == peer)
2772                 return 1;
2773
2774         *rule_nr = 71;
2775         self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2776         if (self == peer) {
2777                 if (mdev->tconn->agreed_pro_version < 96 ?
2778                     (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2779                     (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2780                     self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2781                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2782                            resync as sync source modifications of our UUIDs. */
2783
2784                         if (mdev->tconn->agreed_pro_version < 91)
2785                                 return -1091;
2786
2787                         _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2788                         _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2789
2790                         dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2791                         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2792                                        mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2793
2794                         return 1;
2795                 }
2796         }
2797
2798
2799         *rule_nr = 80;
2800         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2801         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2802                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2803                 if (self == peer)
2804                         return 2;
2805         }
2806
2807         *rule_nr = 90;
2808         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2809         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2810         if (self == peer && self != ((u64)0))
2811                 return 100;
2812
2813         *rule_nr = 100;
2814         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2815                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2816                 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2817                         peer = mdev->p_uuid[j] & ~((u64)1);
2818                         if (self == peer)
2819                                 return -100;
2820                 }
2821         }
2822
2823         return -1000;
2824 }
2825
2826 /* drbd_sync_handshake() returns the new conn state on success, or
2827    CONN_MASK (-1) on failure.
2828  */
2829 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2830                                            enum drbd_disk_state peer_disk) __must_hold(local)
2831 {
2832         enum drbd_conns rv = C_MASK;
2833         enum drbd_disk_state mydisk;
2834         struct net_conf *nc;
2835         int hg, rule_nr, rr_conflict, dry_run;
2836
2837         mydisk = mdev->state.disk;
2838         if (mydisk == D_NEGOTIATING)
2839                 mydisk = mdev->new_state_tmp.disk;
2840
2841         dev_info(DEV, "drbd_sync_handshake:\n");
2842         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2843         drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2844                        mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2845
2846         hg = drbd_uuid_compare(mdev, &rule_nr);
2847
2848         dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2849
2850         if (hg == -1000) {
2851                 dev_alert(DEV, "Unrelated data, aborting!\n");
2852                 return C_MASK;
2853         }
2854         if (hg < -1000) {
2855                 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2856                 return C_MASK;
2857         }
2858
2859         if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2860             (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2861                 int f = (hg == -100) || abs(hg) == 2;
2862                 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2863                 if (f)
2864                         hg = hg*2;
2865                 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2866                      hg > 0 ? "source" : "target");
2867         }
2868
2869         if (abs(hg) == 100)
2870                 drbd_khelper(mdev, "initial-split-brain");
2871
2872         rcu_read_lock();
2873         nc = rcu_dereference(mdev->tconn->net_conf);
2874
2875         if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2876                 int pcount = (mdev->state.role == R_PRIMARY)
2877                            + (peer_role == R_PRIMARY);
2878                 int forced = (hg == -100);
2879
2880                 switch (pcount) {
2881                 case 0:
2882                         hg = drbd_asb_recover_0p(mdev);
2883                         break;
2884                 case 1:
2885                         hg = drbd_asb_recover_1p(mdev);
2886                         break;
2887                 case 2:
2888                         hg = drbd_asb_recover_2p(mdev);
2889                         break;
2890                 }
2891                 if (abs(hg) < 100) {
2892                         dev_warn(DEV, "Split-Brain detected, %d primaries, "
2893                              "automatically solved. Sync from %s node\n",
2894                              pcount, (hg < 0) ? "peer" : "this");
2895                         if (forced) {
2896                                 dev_warn(DEV, "Doing a full sync, since"
2897                                      " UUIDs where ambiguous.\n");
2898                                 hg = hg*2;
2899                         }
2900                 }
2901         }
2902
2903         if (hg == -100) {
2904                 if (nc->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2905                         hg = -1;
2906                 if (!nc->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2907                         hg = 1;
2908
2909                 if (abs(hg) < 100)
2910                         dev_warn(DEV, "Split-Brain detected, manually solved. "
2911                              "Sync from %s node\n",
2912                              (hg < 0) ? "peer" : "this");
2913         }
2914         rr_conflict = nc->rr_conflict;
2915         dry_run = nc->dry_run;
2916         rcu_read_unlock();
2917
2918         if (hg == -100) {
2919                 /* FIXME this log message is not correct if we end up here
2920                  * after an attempted attach on a diskless node.
2921                  * We just refuse to attach -- well, we drop the "connection"
2922                  * to that disk, in a way... */
2923                 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2924                 drbd_khelper(mdev, "split-brain");
2925                 return C_MASK;
2926         }
2927
2928         if (hg > 0 && mydisk <= D_INCONSISTENT) {
2929                 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2930                 return C_MASK;
2931         }
2932
2933         if (hg < 0 && /* by intention we do not use mydisk here. */
2934             mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2935                 switch (rr_conflict) {
2936                 case ASB_CALL_HELPER:
2937                         drbd_khelper(mdev, "pri-lost");
2938                         /* fall through */
2939                 case ASB_DISCONNECT:
2940                         dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2941                         return C_MASK;
2942                 case ASB_VIOLENTLY:
2943                         dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2944                              "assumption\n");
2945                 }
2946         }
2947
2948         if (dry_run || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
2949                 if (hg == 0)
2950                         dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2951                 else
2952                         dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2953                                  drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2954                                  abs(hg) >= 2 ? "full" : "bit-map based");
2955                 return C_MASK;
2956         }
2957
2958         if (abs(hg) >= 2) {
2959                 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2960                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2961                                         BM_LOCKED_SET_ALLOWED))
2962                         return C_MASK;
2963         }
2964
2965         if (hg > 0) { /* become sync source. */
2966                 rv = C_WF_BITMAP_S;
2967         } else if (hg < 0) { /* become sync target */
2968                 rv = C_WF_BITMAP_T;
2969         } else {
2970                 rv = C_CONNECTED;
2971                 if (drbd_bm_total_weight(mdev)) {
2972                         dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2973                              drbd_bm_total_weight(mdev));
2974                 }
2975         }
2976
2977         return rv;
2978 }
2979
2980 /* returns 1 if invalid */
2981 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2982 {
2983         /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2984         if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2985             (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2986                 return 0;
2987
2988         /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2989         if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2990             self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2991                 return 1;
2992
2993         /* everything else is valid if they are equal on both sides. */
2994         if (peer == self)
2995                 return 0;
2996
2997         /* everything es is invalid. */
2998         return 1;
2999 }
3000
3001 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3002 {
3003         struct p_protocol *p = pi->data;
3004         int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3005         int p_want_lose, p_two_primaries, cf;
3006         struct net_conf *nc;
3007
3008         p_proto         = be32_to_cpu(p->protocol);
3009         p_after_sb_0p   = be32_to_cpu(p->after_sb_0p);
3010         p_after_sb_1p   = be32_to_cpu(p->after_sb_1p);
3011         p_after_sb_2p   = be32_to_cpu(p->after_sb_2p);
3012         p_two_primaries = be32_to_cpu(p->two_primaries);
3013         cf              = be32_to_cpu(p->conn_flags);
3014         p_want_lose = cf & CF_WANT_LOSE;
3015
3016         if (tconn->agreed_pro_version >= 87) {
3017                 char integrity_alg[SHARED_SECRET_MAX];
3018                 struct crypto_hash *tfm = NULL;
3019                 int err;
3020
3021                 if (pi->size > sizeof(integrity_alg))
3022                         return -EIO;
3023                 err = drbd_recv_all(tconn, integrity_alg, pi->size);
3024                 if (err)
3025                         return err;
3026                 integrity_alg[SHARED_SECRET_MAX-1] = 0;
3027
3028                 if (integrity_alg[0]) {
3029                         tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3030                         if (!tfm) {
3031                                 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3032                                          integrity_alg);
3033                                 goto disconnect;
3034                         }
3035                         conn_info(tconn, "peer data-integrity-alg: %s\n", integrity_alg);
3036                 }
3037
3038                 if (tconn->peer_integrity_tfm)
3039                         crypto_free_hash(tconn->peer_integrity_tfm);
3040                 tconn->peer_integrity_tfm = tfm;
3041         }
3042
3043         clear_bit(CONN_DRY_RUN, &tconn->flags);
3044
3045         if (cf & CF_DRY_RUN)
3046                 set_bit(CONN_DRY_RUN, &tconn->flags);
3047
3048         rcu_read_lock();
3049         nc = rcu_dereference(tconn->net_conf);
3050
3051         if (p_proto != nc->wire_protocol && tconn->agreed_pro_version < 100) {
3052                 conn_err(tconn, "incompatible communication protocols\n");
3053                 goto disconnect_rcu_unlock;
3054         }
3055
3056         if (cmp_after_sb(p_after_sb_0p, nc->after_sb_0p)) {
3057                 conn_err(tconn, "incompatible after-sb-0pri settings\n");
3058                 goto disconnect_rcu_unlock;
3059         }
3060
3061         if (cmp_after_sb(p_after_sb_1p, nc->after_sb_1p)) {
3062                 conn_err(tconn, "incompatible after-sb-1pri settings\n");
3063                 goto disconnect_rcu_unlock;
3064         }
3065
3066         if (cmp_after_sb(p_after_sb_2p, nc->after_sb_2p)) {
3067                 conn_err(tconn, "incompatible after-sb-2pri settings\n");
3068                 goto disconnect_rcu_unlock;
3069         }
3070
3071         if (p_want_lose && nc->want_lose) {
3072                 conn_err(tconn, "both sides have the 'want_lose' flag set\n");
3073                 goto disconnect_rcu_unlock;
3074         }
3075
3076         if (p_two_primaries != nc->two_primaries) {
3077                 conn_err(tconn, "incompatible setting of the two-primaries options\n");
3078                 goto disconnect_rcu_unlock;
3079         }
3080
3081         rcu_read_unlock();
3082
3083         return 0;
3084
3085 disconnect_rcu_unlock:
3086         rcu_read_unlock();
3087 disconnect:
3088         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3089         return -EIO;
3090 }
3091
3092 /* helper function
3093  * input: alg name, feature name
3094  * return: NULL (alg name was "")
3095  *         ERR_PTR(error) if something goes wrong
3096  *         or the crypto hash ptr, if it worked out ok. */
3097 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3098                 const char *alg, const char *name)
3099 {
3100         struct crypto_hash *tfm;
3101
3102         if (!alg[0])
3103                 return NULL;
3104
3105         tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3106         if (IS_ERR(tfm)) {
3107                 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3108                         alg, name, PTR_ERR(tfm));
3109                 return tfm;
3110         }
3111         return tfm;
3112 }
3113
3114 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3115 {
3116         void *buffer = tconn->data.rbuf;
3117         int size = pi->size;
3118
3119         while (size) {
3120                 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3121                 s = drbd_recv(tconn, buffer, s);
3122                 if (s <= 0) {
3123                         if (s < 0)
3124                                 return s;
3125                         break;
3126                 }
3127                 size -= s;
3128         }
3129         if (size)
3130                 return -EIO;
3131         return 0;
3132 }
3133
3134 /*
3135  * config_unknown_volume  -  device configuration command for unknown volume
3136  *
3137  * When a device is added to an existing connection, the node on which the
3138  * device is added first will send configuration commands to its peer but the
3139  * peer will not know about the device yet.  It will warn and ignore these
3140  * commands.  Once the device is added on the second node, the second node will
3141  * send the same device configuration commands, but in the other direction.
3142  *
3143  * (We can also end up here if drbd is misconfigured.)
3144  */
3145 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3146 {
3147         conn_warn(tconn, "Volume %u unknown; ignoring %s packet\n",
3148                   pi->vnr, cmdname(pi->cmd));
3149         return ignore_remaining_packet(tconn, pi);
3150 }
3151
3152 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3153 {
3154         struct drbd_conf *mdev;
3155         struct p_rs_param_95 *p;
3156         unsigned int header_size, data_size, exp_max_sz;
3157         struct crypto_hash *verify_tfm = NULL;
3158         struct crypto_hash *csums_tfm = NULL;
3159         struct net_conf *old_net_conf, *new_net_conf = NULL;
3160         struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3161         const int apv = tconn->agreed_pro_version;
3162         struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3163         int fifo_size = 0;
3164         int err;
3165
3166         mdev = vnr_to_mdev(tconn, pi->vnr);
3167         if (!mdev)
3168                 return config_unknown_volume(tconn, pi);
3169
3170         exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
3171                     : apv == 88 ? sizeof(struct p_rs_param)
3172                                         + SHARED_SECRET_MAX
3173                     : apv <= 94 ? sizeof(struct p_rs_param_89)
3174                     : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3175
3176         if (pi->size > exp_max_sz) {
3177                 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3178                     pi->size, exp_max_sz);
3179                 return -EIO;
3180         }
3181
3182         if (apv <= 88) {
3183                 header_size = sizeof(struct p_rs_param);
3184                 data_size = pi->size - header_size;
3185         } else if (apv <= 94) {
3186                 header_size = sizeof(struct p_rs_param_89);
3187                 data_size = pi->size - header_size;
3188                 D_ASSERT(data_size == 0);
3189         } else {
3190                 header_size = sizeof(struct p_rs_param_95);
3191                 data_size = pi->size - header_size;
3192                 D_ASSERT(data_size == 0);
3193         }
3194
3195         /* initialize verify_alg and csums_alg */
3196         p = pi->data;
3197         memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3198
3199         err = drbd_recv_all(mdev->tconn, p, header_size);
3200         if (err)
3201                 return err;
3202
3203         mutex_lock(&mdev->tconn->conf_update);
3204         old_net_conf = mdev->tconn->net_conf;
3205         if (get_ldev(mdev)) {
3206                 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3207                 if (!new_disk_conf) {
3208                         put_ldev(mdev);
3209                         mutex_unlock(&mdev->tconn->conf_update);
3210                         dev_err(DEV, "Allocation of new disk_conf failed\n");
3211                         return -ENOMEM;
3212                 }
3213
3214                 old_disk_conf = mdev->ldev->disk_conf;
3215                 *new_disk_conf = *old_disk_conf;
3216
3217                 new_disk_conf->resync_rate = be32_to_cpu(p->rate);
3218         }
3219
3220         if (apv >= 88) {
3221                 if (apv == 88) {
3222                         if (data_size > SHARED_SECRET_MAX) {
3223                                 dev_err(DEV, "verify-alg too long, "
3224                                     "peer wants %u, accepting only %u byte\n",
3225                                                 data_size, SHARED_SECRET_MAX);
3226                                 err = -EIO;
3227                                 goto reconnect;
3228                         }
3229
3230                         err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3231                         if (err)
3232                                 goto reconnect;
3233                         /* we expect NUL terminated string */
3234                         /* but just in case someone tries to be evil */
3235                         D_ASSERT(p->verify_alg[data_size-1] == 0);
3236                         p->verify_alg[data_size-1] = 0;
3237
3238                 } else /* apv >= 89 */ {
3239                         /* we still expect NUL terminated strings */
3240                         /* but just in case someone tries to be evil */
3241                         D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3242                         D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3243                         p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3244                         p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3245                 }
3246
3247                 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3248                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3249                                 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3250                                     old_net_conf->verify_alg, p->verify_alg);
3251                                 goto disconnect;
3252                         }
3253                         verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3254                                         p->verify_alg, "verify-alg");
3255                         if (IS_ERR(verify_tfm)) {
3256                                 verify_tfm = NULL;
3257                                 goto disconnect;
3258                         }
3259                 }
3260
3261                 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3262                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3263                                 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3264                                     old_net_conf->csums_alg, p->csums_alg);
3265                                 goto disconnect;
3266                         }
3267                         csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3268                                         p->csums_alg, "csums-alg");
3269                         if (IS_ERR(csums_tfm)) {
3270                                 csums_tfm = NULL;
3271                                 goto disconnect;
3272                         }
3273                 }
3274
3275                 if (apv > 94 && new_disk_conf) {
3276                         new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3277                         new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3278                         new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3279                         new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3280
3281                         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3282                         if (fifo_size != mdev->rs_plan_s->size) {
3283                                 new_plan = fifo_alloc(fifo_size);
3284                                 if (!new_plan) {
3285                                         dev_err(DEV, "kmalloc of fifo_buffer failed");
3286                                         put_ldev(mdev);
3287                                         goto disconnect;
3288                                 }
3289                         }
3290                 }
3291
3292                 if (verify_tfm || csums_tfm) {
3293                         new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3294                         if (!new_net_conf) {
3295                                 dev_err(DEV, "Allocation of new net_conf failed\n");
3296                                 goto disconnect;
3297                         }
3298
3299                         *new_net_conf = *old_net_conf;
3300
3301                         if (verify_tfm) {
3302                                 strcpy(new_net_conf->verify_alg, p->verify_alg);
3303                                 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3304                                 crypto_free_hash(mdev->tconn->verify_tfm);
3305                                 mdev->tconn->verify_tfm = verify_tfm;
3306                                 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3307                         }
3308                         if (csums_tfm) {
3309                                 strcpy(new_net_conf->csums_alg, p->csums_alg);
3310                                 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3311                                 crypto_free_hash(mdev->tconn->csums_tfm);
3312                                 mdev->tconn->csums_tfm = csums_tfm;
3313                                 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3314                         }
3315                         rcu_assign_pointer(tconn->net_conf, new_net_conf);
3316                 }
3317         }
3318
3319         if (new_disk_conf) {
3320                 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3321                 put_ldev(mdev);
3322         }
3323
3324         if (new_plan) {
3325                 old_plan = mdev->rs_plan_s;
3326                 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3327         }
3328
3329         mutex_unlock(&mdev->tconn->conf_update);
3330         synchronize_rcu();
3331         if (new_net_conf)
3332                 kfree(old_net_conf);
3333         kfree(old_disk_conf);
3334         kfree(old_plan);
3335
3336         return 0;
3337
3338 reconnect:
3339         if (new_disk_conf) {
3340                 put_ldev(mdev);
3341                 kfree(new_disk_conf);
3342         }
3343         mutex_unlock(&mdev->tconn->conf_update);
3344         return -EIO;
3345
3346 disconnect:
3347         kfree(new_plan);
3348         if (new_disk_conf) {
3349                 put_ldev(mdev);
3350                 kfree(new_disk_conf);
3351         }
3352         mutex_unlock(&mdev->tconn->conf_update);
3353         /* just for completeness: actually not needed,
3354          * as this is not reached if csums_tfm was ok. */
3355         crypto_free_hash(csums_tfm);
3356         /* but free the verify_tfm again, if csums_tfm did not work out */
3357         crypto_free_hash(verify_tfm);
3358         conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3359         return -EIO;
3360 }
3361
3362 /* warn if the arguments differ by more than 12.5% */
3363 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3364         const char *s, sector_t a, sector_t b)
3365 {
3366         sector_t d;
3367         if (a == 0 || b == 0)
3368                 return;
3369         d = (a > b) ? (a - b) : (b - a);
3370         if (d > (a>>3) || d > (b>>3))
3371                 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3372                      (unsigned long long)a, (unsigned long long)b);
3373 }
3374
3375 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3376 {
3377         struct drbd_conf *mdev;
3378         struct p_sizes *p = pi->data;
3379         enum determine_dev_size dd = unchanged;
3380         sector_t p_size, p_usize, my_usize;
3381         int ldsc = 0; /* local disk size changed */
3382         enum dds_flags ddsf;
3383
3384         mdev = vnr_to_mdev(tconn, pi->vnr);
3385         if (!mdev)
3386                 return config_unknown_volume(tconn, pi);
3387
3388         p_size = be64_to_cpu(p->d_size);
3389         p_usize = be64_to_cpu(p->u_size);
3390
3391         /* just store the peer's disk size for now.
3392          * we still need to figure out whether we accept that. */
3393         mdev->p_size = p_size;
3394
3395         if (get_ldev(mdev)) {
3396                 rcu_read_lock();
3397                 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3398                 rcu_read_unlock();
3399
3400                 warn_if_differ_considerably(mdev, "lower level device sizes",
3401                            p_size, drbd_get_max_capacity(mdev->ldev));
3402                 warn_if_differ_considerably(mdev, "user requested size",
3403                                             p_usize, my_usize);
3404
3405                 /* if this is the first connect, or an otherwise expected
3406                  * param exchange, choose the minimum */
3407                 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3408                         p_usize = min_not_zero(my_usize, p_usize);
3409
3410                 /* Never shrink a device with usable data during connect.
3411                    But allow online shrinking if we are connected. */
3412                 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3413                     drbd_get_capacity(mdev->this_bdev) &&
3414                     mdev->state.disk >= D_OUTDATED &&
3415                     mdev->state.conn < C_CONNECTED) {
3416                         dev_err(DEV, "The peer's disk size is too small!\n");
3417                         conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3418                         put_ldev(mdev);
3419                         return -EIO;
3420                 }
3421
3422                 if (my_usize != p_usize) {
3423                         struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3424
3425                         new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3426                         if (!new_disk_conf) {
3427                                 dev_err(DEV, "Allocation of new disk_conf failed\n");
3428                                 put_ldev(mdev);
3429                                 return -ENOMEM;
3430                         }
3431
3432                         mutex_lock(&mdev->tconn->conf_update);
3433                         old_disk_conf = mdev->ldev->disk_conf;
3434                         *new_disk_conf = *old_disk_conf;
3435                         new_disk_conf->disk_size = p_usize;
3436
3437                         rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3438                         mutex_unlock(&mdev->tconn->conf_update);
3439                         synchronize_rcu();
3440                         kfree(old_disk_conf);
3441
3442                         dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3443                                  (unsigned long)my_usize);
3444                 }
3445
3446                 put_ldev(mdev);
3447         }
3448
3449         ddsf = be16_to_cpu(p->dds_flags);
3450         if (get_ldev(mdev)) {
3451                 dd = drbd_determine_dev_size(mdev, ddsf);
3452                 put_ldev(mdev);
3453                 if (dd == dev_size_error)
3454                         return -EIO;
3455                 drbd_md_sync(mdev);
3456         } else {
3457                 /* I am diskless, need to accept the peer's size. */
3458                 drbd_set_my_capacity(mdev, p_size);
3459         }
3460
3461         mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3462         drbd_reconsider_max_bio_size(mdev);
3463
3464         if (get_ldev(mdev)) {
3465                 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3466                         mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3467                         ldsc = 1;
3468                 }
3469
3470                 put_ldev(mdev);
3471         }
3472
3473         if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3474                 if (be64_to_cpu(p->c_size) !=
3475                     drbd_get_capacity(mdev->this_bdev) || ldsc) {
3476                         /* we have different sizes, probably peer
3477                          * needs to know my new size... */
3478                         drbd_send_sizes(mdev, 0, ddsf);
3479                 }
3480                 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3481                     (dd == grew && mdev->state.conn == C_CONNECTED)) {
3482                         if (mdev->state.pdsk >= D_INCONSISTENT &&
3483                             mdev->state.disk >= D_INCONSISTENT) {
3484                                 if (ddsf & DDSF_NO_RESYNC)
3485                                         dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3486                                 else
3487                                         resync_after_online_grow(mdev);
3488                         } else
3489                                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3490                 }
3491         }
3492
3493         return 0;
3494 }
3495
3496 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3497 {
3498         struct drbd_conf *mdev;
3499         struct p_uuids *p = pi->data;
3500         u64 *p_uuid;
3501         int i, updated_uuids = 0;
3502
3503         mdev = vnr_to_mdev(tconn, pi->vnr);
3504         if (!mdev)
3505                 return config_unknown_volume(tconn, pi);
3506
3507         p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3508
3509         for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3510                 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3511
3512         kfree(mdev->p_uuid);
3513         mdev->p_uuid = p_uuid;
3514
3515         if (mdev->state.conn < C_CONNECTED &&
3516             mdev->state.disk < D_INCONSISTENT &&
3517             mdev->state.role == R_PRIMARY &&
3518             (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3519                 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3520                     (unsigned long long)mdev->ed_uuid);
3521                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3522                 return -EIO;
3523         }
3524
3525         if (get_ldev(mdev)) {
3526                 int skip_initial_sync =
3527                         mdev->state.conn == C_CONNECTED &&
3528                         mdev->tconn->agreed_pro_version >= 90 &&
3529                         mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3530                         (p_uuid[UI_FLAGS] & 8);
3531                 if (skip_initial_sync) {
3532                         dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3533                         drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3534                                         "clear_n_write from receive_uuids",
3535                                         BM_LOCKED_TEST_ALLOWED);
3536                         _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3537                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
3538                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3539                                         CS_VERBOSE, NULL);
3540                         drbd_md_sync(mdev);
3541                         updated_uuids = 1;
3542                 }
3543                 put_ldev(mdev);
3544         } else if (mdev->state.disk < D_INCONSISTENT &&
3545                    mdev->state.role == R_PRIMARY) {
3546                 /* I am a diskless primary, the peer just created a new current UUID
3547                    for me. */
3548                 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3549         }
3550
3551         /* Before we test for the disk state, we should wait until an eventually
3552            ongoing cluster wide state change is finished. That is important if
3553            we are primary and are detaching from our disk. We need to see the
3554            new disk state... */
3555         mutex_lock(mdev->state_mutex);
3556         mutex_unlock(mdev->state_mutex);
3557         if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3558                 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3559
3560         if (updated_uuids)
3561                 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3562
3563         return 0;
3564 }
3565
3566 /**
3567  * convert_state() - Converts the peer's view of the cluster state to our point of view
3568  * @ps:         The state as seen by the peer.
3569  */
3570 static union drbd_state convert_state(union drbd_state ps)
3571 {
3572         union drbd_state ms;
3573
3574         static enum drbd_conns c_tab[] = {
3575                 [C_CONNECTED] = C_CONNECTED,
3576
3577                 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3578                 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3579                 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3580                 [C_VERIFY_S]       = C_VERIFY_T,
3581                 [C_MASK]   = C_MASK,
3582         };
3583
3584         ms.i = ps.i;
3585
3586         ms.conn = c_tab[ps.conn];
3587         ms.peer = ps.role;
3588         ms.role = ps.peer;
3589         ms.pdsk = ps.disk;
3590         ms.disk = ps.pdsk;
3591         ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3592
3593         return ms;
3594 }
3595
3596 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3597 {
3598         struct drbd_conf *mdev;
3599         struct p_req_state *p = pi->data;
3600         union drbd_state mask, val;
3601         enum drbd_state_rv rv;
3602
3603         mdev = vnr_to_mdev(tconn, pi->vnr);
3604         if (!mdev)
3605                 return -EIO;
3606
3607         mask.i = be32_to_cpu(p->mask);
3608         val.i = be32_to_cpu(p->val);
3609
3610         if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
3611             mutex_is_locked(mdev->state_mutex)) {
3612                 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3613                 return 0;
3614         }
3615
3616         mask = convert_state(mask);
3617         val = convert_state(val);
3618
3619         rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3620         drbd_send_sr_reply(mdev, rv);
3621
3622         drbd_md_sync(mdev);
3623
3624         return 0;
3625 }
3626
3627 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3628 {
3629         struct p_req_state *p = pi->data;
3630         union drbd_state mask, val;
3631         enum drbd_state_rv rv;
3632
3633         mask.i = be32_to_cpu(p->mask);
3634         val.i = be32_to_cpu(p->val);
3635
3636         if (test_bit(DISCARD_CONCURRENT, &tconn->flags) &&
3637             mutex_is_locked(&tconn->cstate_mutex)) {
3638                 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3639                 return 0;
3640         }
3641
3642         mask = convert_state(mask);
3643         val = convert_state(val);
3644
3645         rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3646         conn_send_sr_reply(tconn, rv);
3647
3648         return 0;
3649 }
3650
3651 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3652 {
3653         struct drbd_conf *mdev;
3654         struct p_state *p = pi->data;
3655         union drbd_state os, ns, peer_state;
3656         enum drbd_disk_state real_peer_disk;
3657         enum chg_state_flags cs_flags;
3658         int rv;
3659
3660         mdev = vnr_to_mdev(tconn, pi->vnr);
3661         if (!mdev)
3662                 return config_unknown_volume(tconn, pi);
3663
3664         peer_state.i = be32_to_cpu(p->state);
3665
3666         real_peer_disk = peer_state.disk;
3667         if (peer_state.disk == D_NEGOTIATING) {
3668                 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3669                 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3670         }
3671
3672         spin_lock_irq(&mdev->tconn->req_lock);
3673  retry:
3674         os = ns = drbd_read_state(mdev);
3675         spin_unlock_irq(&mdev->tconn->req_lock);
3676
3677         /* peer says his disk is uptodate, while we think it is inconsistent,
3678          * and this happens while we think we have a sync going on. */
3679         if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3680             os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3681                 /* If we are (becoming) SyncSource, but peer is still in sync
3682                  * preparation, ignore its uptodate-ness to avoid flapping, it
3683                  * will change to inconsistent once the peer reaches active
3684                  * syncing states.
3685                  * It may have changed syncer-paused flags, however, so we
3686                  * cannot ignore this completely. */
3687                 if (peer_state.conn > C_CONNECTED &&
3688                     peer_state.conn < C_SYNC_SOURCE)
3689                         real_peer_disk = D_INCONSISTENT;
3690
3691                 /* if peer_state changes to connected at the same time,
3692                  * it explicitly notifies us that it finished resync.
3693                  * Maybe we should finish it up, too? */
3694                 else if (os.conn >= C_SYNC_SOURCE &&
3695                          peer_state.conn == C_CONNECTED) {
3696                         if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3697                                 drbd_resync_finished(mdev);
3698                         return 0;
3699                 }
3700         }
3701
3702         /* peer says his disk is inconsistent, while we think it is uptodate,
3703          * and this happens while the peer still thinks we have a sync going on,
3704          * but we think we are already done with the sync.
3705          * We ignore this to avoid flapping pdsk.
3706          * This should not happen, if the peer is a recent version of drbd. */
3707         if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3708             os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3709                 real_peer_disk = D_UP_TO_DATE;
3710
3711         if (ns.conn == C_WF_REPORT_PARAMS)
3712                 ns.conn = C_CONNECTED;
3713
3714         if (peer_state.conn == C_AHEAD)
3715                 ns.conn = C_BEHIND;
3716
3717         if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3718             get_ldev_if_state(mdev, D_NEGOTIATING)) {
3719                 int cr; /* consider resync */
3720
3721                 /* if we established a new connection */
3722                 cr  = (os.conn < C_CONNECTED);
3723                 /* if we had an established connection
3724                  * and one of the nodes newly attaches a disk */
3725                 cr |= (os.conn == C_CONNECTED &&
3726                        (peer_state.disk == D_NEGOTIATING ||
3727                         os.disk == D_NEGOTIATING));
3728                 /* if we have both been inconsistent, and the peer has been
3729                  * forced to be UpToDate with --overwrite-data */
3730                 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3731                 /* if we had been plain connected, and the admin requested to
3732                  * start a sync by "invalidate" or "invalidate-remote" */
3733                 cr |= (os.conn == C_CONNECTED &&
3734                                 (peer_state.conn >= C_STARTING_SYNC_S &&
3735                                  peer_state.conn <= C_WF_BITMAP_T));
3736
3737                 if (cr)
3738                         ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3739
3740                 put_ldev(mdev);
3741                 if (ns.conn == C_MASK) {
3742                         ns.conn = C_CONNECTED;
3743                         if (mdev->state.disk == D_NEGOTIATING) {
3744                                 drbd_force_state(mdev, NS(disk, D_FAILED));
3745                         } else if (peer_state.disk == D_NEGOTIATING) {
3746                                 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3747                                 peer_state.disk = D_DISKLESS;
3748                                 real_peer_disk = D_DISKLESS;
3749                         } else {
3750                                 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3751                                         return -EIO;
3752                                 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3753                                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3754                                 return -EIO;
3755                         }
3756                 }
3757         }
3758
3759         spin_lock_irq(&mdev->tconn->req_lock);
3760         if (os.i != drbd_read_state(mdev).i)
3761                 goto retry;
3762         clear_bit(CONSIDER_RESYNC, &mdev->flags);
3763         ns.peer = peer_state.role;
3764         ns.pdsk = real_peer_disk;
3765         ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3766         if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3767                 ns.disk = mdev->new_state_tmp.disk;
3768         cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3769         if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3770             test_bit(NEW_CUR_UUID, &mdev->flags)) {
3771                 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3772                    for temporal network outages! */
3773                 spin_unlock_irq(&mdev->tconn->req_lock);
3774                 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3775                 tl_clear(mdev->tconn);
3776                 drbd_uuid_new_current(mdev);
3777                 clear_bit(NEW_CUR_UUID, &mdev->flags);
3778                 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3779                 return -EIO;
3780         }
3781         rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3782         ns = drbd_read_state(mdev);
3783         spin_unlock_irq(&mdev->tconn->req_lock);
3784
3785         if (rv < SS_SUCCESS) {
3786                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3787                 return -EIO;
3788         }
3789
3790         if (os.conn > C_WF_REPORT_PARAMS) {
3791                 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3792                     peer_state.disk != D_NEGOTIATING ) {
3793                         /* we want resync, peer has not yet decided to sync... */
3794                         /* Nowadays only used when forcing a node into primary role and
3795                            setting its disk to UpToDate with that */
3796                         drbd_send_uuids(mdev);
3797                         drbd_send_state(mdev);
3798                 }
3799         }
3800
3801         mutex_lock(&mdev->tconn->conf_update);
3802         mdev->tconn->net_conf->want_lose = 0; /* without copy; single bit op is atomic */
3803         mutex_unlock(&mdev->tconn->conf_update);
3804
3805         drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3806
3807         return 0;
3808 }
3809
3810 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
3811 {
3812         struct drbd_conf *mdev;
3813         struct p_rs_uuid *p = pi->data;
3814
3815         mdev = vnr_to_mdev(tconn, pi->vnr);
3816         if (!mdev)
3817                 return -EIO;
3818
3819         wait_event(mdev->misc_wait,
3820                    mdev->state.conn == C_WF_SYNC_UUID ||
3821                    mdev->state.conn == C_BEHIND ||
3822                    mdev->state.conn < C_CONNECTED ||
3823                    mdev->state.disk < D_NEGOTIATING);
3824
3825         /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3826
3827         /* Here the _drbd_uuid_ functions are right, current should
3828            _not_ be rotated into the history */
3829         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3830                 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3831                 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3832
3833                 drbd_print_uuids(mdev, "updated sync uuid");
3834                 drbd_start_resync(mdev, C_SYNC_TARGET);
3835
3836                 put_ldev(mdev);
3837         } else
3838                 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3839
3840         return 0;
3841 }
3842
3843 /**
3844  * receive_bitmap_plain
3845  *
3846  * Return 0 when done, 1 when another iteration is needed, and a negative error
3847  * code upon failure.
3848  */
3849 static int
3850 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
3851                      unsigned long *p, struct bm_xfer_ctx *c)
3852 {
3853         unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
3854                                  drbd_header_size(mdev->tconn);
3855         unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
3856                                        c->bm_words - c->word_offset);
3857         unsigned int want = num_words * sizeof(*p);
3858         int err;
3859
3860         if (want != size) {
3861                 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
3862                 return -EIO;
3863         }
3864         if (want == 0)
3865                 return 0;
3866         err = drbd_recv_all(mdev->tconn, p, want);
3867         if (err)
3868                 return err;
3869
3870         drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
3871
3872         c->word_offset += num_words;
3873         c->bit_offset = c->word_offset * BITS_PER_LONG;
3874         if (c->bit_offset > c->bm_bits)
3875                 c->bit_offset = c->bm_bits;
3876
3877         return 1;
3878 }
3879
3880 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
3881 {
3882         return (enum drbd_bitmap_code)(p->encoding & 0x0f);
3883 }
3884
3885 static int dcbp_get_start(struct p_compressed_bm *p)
3886 {
3887         return (p->encoding & 0x80) != 0;
3888 }
3889
3890 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
3891 {
3892         return (p->encoding >> 4) & 0x7;
3893 }
3894
3895 /**
3896  * recv_bm_rle_bits
3897  *
3898  * Return 0 when done, 1 when another iteration is needed, and a negative error
3899  * code upon failure.
3900  */
3901 static int
3902 recv_bm_rle_bits(struct drbd_conf *mdev,
3903                 struct p_compressed_bm *p,
3904                  struct bm_xfer_ctx *c,
3905                  unsigned int len)
3906 {
3907         struct bitstream bs;
3908         u64 look_ahead;
3909         u64 rl;
3910         u64 tmp;
3911         unsigned long s = c->bit_offset;
3912         unsigned long e;
3913         int toggle = dcbp_get_start(p);
3914         int have;
3915         int bits;
3916
3917         bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
3918
3919         bits = bitstream_get_bits(&bs, &look_ahead, 64);
3920         if (bits < 0)
3921                 return -EIO;
3922
3923         for (have = bits; have > 0; s += rl, toggle = !toggle) {
3924                 bits = vli_decode_bits(&rl, look_ahead);
3925                 if (bits <= 0)
3926                         return -EIO;
3927
3928                 if (toggle) {
3929                         e = s + rl -1;
3930                         if (e >= c->bm_bits) {
3931                                 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3932                                 return -EIO;
3933                         }
3934                         _drbd_bm_set_bits(mdev, s, e);
3935                 }
3936
3937                 if (have < bits) {
3938                         dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3939                                 have, bits, look_ahead,
3940                                 (unsigned int)(bs.cur.b - p->code),
3941                                 (unsigned int)bs.buf_len);
3942                         return -EIO;
3943                 }
3944                 look_ahead >>= bits;
3945                 have -= bits;
3946
3947                 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3948                 if (bits < 0)
3949                         return -EIO;
3950                 look_ahead |= tmp << have;
3951                 have += bits;
3952         }
3953
3954         c->bit_offset = s;
3955         bm_xfer_ctx_bit_to_word_offset(c);
3956
3957         return (s != c->bm_bits);
3958 }
3959
3960 /**
3961  * decode_bitmap_c
3962  *
3963  * Return 0 when done, 1 when another iteration is needed, and a negative error
3964  * code upon failure.
3965  */
3966 static int
3967 decode_bitmap_c(struct drbd_conf *mdev,
3968                 struct p_compressed_bm *p,
3969                 struct bm_xfer_ctx *c,
3970                 unsigned int len)
3971 {
3972         if (dcbp_get_code(p) == RLE_VLI_Bits)
3973                 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
3974
3975         /* other variants had been implemented for evaluation,
3976          * but have been dropped as this one turned out to be "best"
3977          * during all our tests. */
3978
3979         dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3980         conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
3981         return -EIO;
3982 }
3983
3984 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3985                 const char *direction, struct bm_xfer_ctx *c)
3986 {
3987         /* what would it take to transfer it "plaintext" */
3988         unsigned int header_size = drbd_header_size(mdev->tconn);
3989         unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
3990         unsigned int plain =
3991                 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
3992                 c->bm_words * sizeof(unsigned long);
3993         unsigned int total = c->bytes[0] + c->bytes[1];
3994         unsigned int r;
3995
3996         /* total can not be zero. but just in case: */
3997         if (total == 0)
3998                 return;
3999
4000         /* don't report if not compressed */
4001         if (total >= plain)
4002                 return;
4003
4004         /* total < plain. check for overflow, still */
4005         r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4006                                     : (1000 * total / plain);
4007
4008         if (r > 1000)
4009                 r = 1000;
4010
4011         r = 1000 - r;
4012         dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4013              "total %u; compression: %u.%u%%\n",
4014                         direction,
4015                         c->bytes[1], c->packets[1],
4016                         c->bytes[0], c->packets[0],
4017                         total, r/10, r % 10);
4018 }
4019
4020 /* Since we are processing the bitfield from lower addresses to higher,
4021    it does not matter if the process it in 32 bit chunks or 64 bit
4022    chunks as long as it is little endian. (Understand it as byte stream,
4023    beginning with the lowest byte...) If we would use big endian
4024    we would need to process it from the highest address to the lowest,
4025    in order to be agnostic to the 32 vs 64 bits issue.
4026
4027    returns 0 on failure, 1 if we successfully received it. */
4028 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4029 {
4030         struct drbd_conf *mdev;
4031         struct bm_xfer_ctx c;
4032         int err;
4033
4034         mdev = vnr_to_mdev(tconn, pi->vnr);
4035         if (!mdev)
4036                 return -EIO;
4037
4038         drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4039         /* you are supposed to send additional out-of-sync information
4040          * if you actually set bits during this phase */
4041
4042         c = (struct bm_xfer_ctx) {
4043                 .bm_bits = drbd_bm_bits(mdev),
4044                 .bm_words = drbd_bm_words(mdev),
4045         };
4046
4047         for(;;) {
4048                 if (pi->cmd == P_BITMAP)
4049                         err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4050                 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4051                         /* MAYBE: sanity check that we speak proto >= 90,
4052                          * and the feature is enabled! */
4053                         struct p_compressed_bm *p = pi->data;
4054
4055                         if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4056                                 dev_err(DEV, "ReportCBitmap packet too large\n");
4057                                 err = -EIO;
4058                                 goto out;
4059                         }
4060                         if (pi->size <= sizeof(*p)) {
4061                                 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4062                                 err = -EIO;
4063                                 goto out;
4064                         }
4065                         err = drbd_recv_all(mdev->tconn, p, pi->size);
4066                         if (err)
4067                                goto out;
4068                         err = decode_bitmap_c(mdev, p, &c, pi->size);
4069                 } else {
4070                         dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4071                         err = -EIO;
4072                         goto out;
4073                 }
4074
4075                 c.packets[pi->cmd == P_BITMAP]++;
4076                 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4077
4078                 if (err <= 0) {
4079                         if (err < 0)
4080                                 goto out;
4081                         break;
4082                 }
4083                 err = drbd_recv_header(mdev->tconn, pi);
4084                 if (err)
4085                         goto out;
4086         }
4087
4088         INFO_bm_xfer_stats(mdev, "receive", &c);
4089
4090         if (mdev->state.conn == C_WF_BITMAP_T) {
4091                 enum drbd_state_rv rv;
4092
4093                 err = drbd_send_bitmap(mdev);
4094                 if (err)
4095                         goto out;
4096                 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4097                 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4098                 D_ASSERT(rv == SS_SUCCESS);
4099         } else if (mdev->state.conn != C_WF_BITMAP_S) {
4100                 /* admin may have requested C_DISCONNECTING,
4101                  * other threads may have noticed network errors */
4102                 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4103                     drbd_conn_str(mdev->state.conn));
4104         }
4105         err = 0;
4106
4107  out:
4108         drbd_bm_unlock(mdev);
4109         if (!err && mdev->state.conn == C_WF_BITMAP_S)
4110                 drbd_start_resync(mdev, C_SYNC_SOURCE);
4111         return err;
4112 }
4113
4114 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4115 {
4116         conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4117                  pi->cmd, pi->size);
4118
4119         return ignore_remaining_packet(tconn, pi);
4120 }
4121
4122 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4123 {
4124         /* Make sure we've acked all the TCP data associated
4125          * with the data requests being unplugged */
4126         drbd_tcp_quickack(tconn->data.socket);
4127
4128         return 0;
4129 }
4130
4131 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4132 {
4133         struct drbd_conf *mdev;
4134         struct p_block_desc *p = pi->data;
4135
4136         mdev = vnr_to_mdev(tconn, pi->vnr);
4137         if (!mdev)
4138                 return -EIO;
4139
4140         switch (mdev->state.conn) {
4141         case C_WF_SYNC_UUID:
4142         case C_WF_BITMAP_T:
4143         case C_BEHIND:
4144                         break;
4145         default:
4146                 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4147                                 drbd_conn_str(mdev->state.conn));
4148         }
4149
4150         drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4151
4152         return 0;
4153 }
4154
4155 struct data_cmd {
4156         int expect_payload;
4157         size_t pkt_size;
4158         int (*fn)(struct drbd_tconn *, struct packet_info *);
4159 };
4160
4161 static struct data_cmd drbd_cmd_handler[] = {
4162         [P_DATA]            = { 1, sizeof(struct p_data), receive_Data },
4163         [P_DATA_REPLY]      = { 1, sizeof(struct p_data), receive_DataReply },
4164         [P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4165         [P_BARRIER]         = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4166         [P_BITMAP]          = { 1, 0, receive_bitmap } ,
4167         [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4168         [P_UNPLUG_REMOTE]   = { 0, 0, receive_UnplugRemote },
4169         [P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
4170         [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4171         [P_SYNC_PARAM]      = { 1, 0, receive_SyncParam },
4172         [P_SYNC_PARAM89]    = { 1, 0, receive_SyncParam },
4173         [P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
4174         [P_UUIDS]           = { 0, sizeof(struct p_uuids), receive_uuids },
4175         [P_SIZES]           = { 0, sizeof(struct p_sizes), receive_sizes },
4176         [P_STATE]           = { 0, sizeof(struct p_state), receive_state },
4177         [P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
4178         [P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4179         [P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
4180         [P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
4181         [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4182         [P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
4183         [P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4184         [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4185 };
4186
4187 static void drbdd(struct drbd_tconn *tconn)
4188 {
4189         struct packet_info pi;
4190         size_t shs; /* sub header size */
4191         int err;
4192
4193         while (get_t_state(&tconn->receiver) == RUNNING) {
4194                 struct data_cmd *cmd;
4195
4196                 drbd_thread_current_set_cpu(&tconn->receiver);
4197                 if (drbd_recv_header(tconn, &pi))
4198                         goto err_out;
4199
4200                 cmd = &drbd_cmd_handler[pi.cmd];
4201                 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4202                         conn_err(tconn, "unknown packet type %d, l: %d!\n", pi.cmd, pi.size);
4203                         goto err_out;
4204                 }
4205
4206                 shs = cmd->pkt_size;
4207                 if (pi.size > shs && !cmd->expect_payload) {
4208                         conn_err(tconn, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size);
4209                         goto err_out;
4210                 }
4211
4212                 if (shs) {
4213                         err = drbd_recv_all_warn(tconn, pi.data, shs);
4214                         if (err)
4215                                 goto err_out;
4216                         pi.size -= shs;
4217                 }
4218
4219                 err = cmd->fn(tconn, &pi);
4220                 if (err) {
4221                         conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4222                                  cmdname(pi.cmd), err, pi.size);
4223                         goto err_out;
4224                 }
4225         }
4226         return;
4227
4228     err_out:
4229         conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4230 }
4231
4232 void conn_flush_workqueue(struct drbd_tconn *tconn)
4233 {
4234         struct drbd_wq_barrier barr;
4235
4236         barr.w.cb = w_prev_work_done;
4237         barr.w.tconn = tconn;
4238         init_completion(&barr.done);
4239         drbd_queue_work(&tconn->data.work, &barr.w);
4240         wait_for_completion(&barr.done);
4241 }
4242
4243 static void conn_disconnect(struct drbd_tconn *tconn)
4244 {
4245         enum drbd_conns oc;
4246         int rv = SS_UNKNOWN_ERROR;
4247
4248         if (tconn->cstate == C_STANDALONE)
4249                 return;
4250
4251         /* asender does not clean up anything. it must not interfere, either */
4252         drbd_thread_stop(&tconn->asender);
4253         drbd_free_sock(tconn);
4254
4255         down_read(&drbd_cfg_rwsem);
4256         idr_for_each(&tconn->volumes, drbd_disconnected, tconn);
4257         up_read(&drbd_cfg_rwsem);
4258         conn_info(tconn, "Connection closed\n");
4259
4260         if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4261                 conn_try_outdate_peer_async(tconn);
4262
4263         spin_lock_irq(&tconn->req_lock);
4264         oc = tconn->cstate;
4265         if (oc >= C_UNCONNECTED)
4266                 rv = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4267
4268         spin_unlock_irq(&tconn->req_lock);
4269
4270         if (oc == C_DISCONNECTING)
4271                 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4272 }
4273
4274 static int drbd_disconnected(int vnr, void *p, void *data)
4275 {
4276         struct drbd_conf *mdev = (struct drbd_conf *)p;
4277         enum drbd_fencing_p fp;
4278         unsigned int i;
4279
4280         /* wait for current activity to cease. */
4281         spin_lock_irq(&mdev->tconn->req_lock);
4282         _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4283         _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4284         _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4285         spin_unlock_irq(&mdev->tconn->req_lock);
4286
4287         /* We do not have data structures that would allow us to
4288          * get the rs_pending_cnt down to 0 again.
4289          *  * On C_SYNC_TARGET we do not have any data structures describing
4290          *    the pending RSDataRequest's we have sent.
4291          *  * On C_SYNC_SOURCE there is no data structure that tracks
4292          *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4293          *  And no, it is not the sum of the reference counts in the
4294          *  resync_LRU. The resync_LRU tracks the whole operation including
4295          *  the disk-IO, while the rs_pending_cnt only tracks the blocks
4296          *  on the fly. */
4297         drbd_rs_cancel_all(mdev);
4298         mdev->rs_total = 0;
4299         mdev->rs_failed = 0;
4300         atomic_set(&mdev->rs_pending_cnt, 0);
4301         wake_up(&mdev->misc_wait);
4302
4303         del_timer(&mdev->request_timer);
4304
4305         del_timer_sync(&mdev->resync_timer);
4306         resync_timer_fn((unsigned long)mdev);
4307
4308         /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4309          * w_make_resync_request etc. which may still be on the worker queue
4310          * to be "canceled" */
4311         drbd_flush_workqueue(mdev);
4312
4313         drbd_finish_peer_reqs(mdev);
4314
4315         kfree(mdev->p_uuid);
4316         mdev->p_uuid = NULL;
4317
4318         if (!drbd_suspended(mdev))
4319                 tl_clear(mdev->tconn);
4320
4321         drbd_md_sync(mdev);
4322
4323         fp = FP_DONT_CARE;
4324         if (get_ldev(mdev)) {
4325                 rcu_read_lock();
4326                 fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
4327                 rcu_read_unlock();
4328                 put_ldev(mdev);
4329         }
4330
4331         /* serialize with bitmap writeout triggered by the state change,
4332          * if any. */
4333         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4334
4335         /* tcp_close and release of sendpage pages can be deferred.  I don't
4336          * want to use SO_LINGER, because apparently it can be deferred for
4337          * more than 20 seconds (longest time I checked).
4338          *
4339          * Actually we don't care for exactly when the network stack does its
4340          * put_page(), but release our reference on these pages right here.
4341          */
4342         i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4343         if (i)
4344                 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4345         i = atomic_read(&mdev->pp_in_use_by_net);
4346         if (i)
4347                 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4348         i = atomic_read(&mdev->pp_in_use);
4349         if (i)
4350                 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4351
4352         D_ASSERT(list_empty(&mdev->read_ee));
4353         D_ASSERT(list_empty(&mdev->active_ee));
4354         D_ASSERT(list_empty(&mdev->sync_ee));
4355         D_ASSERT(list_empty(&mdev->done_ee));
4356
4357         /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4358         atomic_set(&mdev->current_epoch->epoch_size, 0);
4359         D_ASSERT(list_empty(&mdev->current_epoch->list));
4360
4361         return 0;
4362 }
4363
4364 /*
4365  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4366  * we can agree on is stored in agreed_pro_version.
4367  *
4368  * feature flags and the reserved array should be enough room for future
4369  * enhancements of the handshake protocol, and possible plugins...
4370  *
4371  * for now, they are expected to be zero, but ignored.
4372  */
4373 static int drbd_send_features(struct drbd_tconn *tconn)
4374 {
4375         struct drbd_socket *sock;
4376         struct p_connection_features *p;
4377
4378         sock = &tconn->data;
4379         p = conn_prepare_command(tconn, sock);
4380         if (!p)
4381                 return -EIO;
4382         memset(p, 0, sizeof(*p));
4383         p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4384         p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4385         return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4386 }
4387
4388 /*
4389  * return values:
4390  *   1 yes, we have a valid connection
4391  *   0 oops, did not work out, please try again
4392  *  -1 peer talks different language,
4393  *     no point in trying again, please go standalone.
4394  */
4395 static int drbd_do_features(struct drbd_tconn *tconn)
4396 {
4397         /* ASSERT current == tconn->receiver ... */
4398         struct p_connection_features *p;
4399         const int expect = sizeof(struct p_connection_features);
4400         struct packet_info pi;
4401         int err;
4402
4403         err = drbd_send_features(tconn);
4404         if (err)
4405                 return 0;
4406
4407         err = drbd_recv_header(tconn, &pi);
4408         if (err)
4409                 return 0;
4410
4411         if (pi.cmd != P_CONNECTION_FEATURES) {
4412                 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4413                      cmdname(pi.cmd), pi.cmd);
4414                 return -1;
4415         }
4416
4417         if (pi.size != expect) {
4418                 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4419                      expect, pi.size);
4420                 return -1;
4421         }
4422
4423         p = pi.data;
4424         err = drbd_recv_all_warn(tconn, p, expect);
4425         if (err)
4426                 return 0;
4427
4428         p->protocol_min = be32_to_cpu(p->protocol_min);
4429         p->protocol_max = be32_to_cpu(p->protocol_max);
4430         if (p->protocol_max == 0)
4431                 p->protocol_max = p->protocol_min;
4432
4433         if (PRO_VERSION_MAX < p->protocol_min ||
4434             PRO_VERSION_MIN > p->protocol_max)
4435                 goto incompat;
4436
4437         tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4438
4439         conn_info(tconn, "Handshake successful: "
4440              "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4441
4442         return 1;
4443
4444  incompat:
4445         conn_err(tconn, "incompatible DRBD dialects: "
4446             "I support %d-%d, peer supports %d-%d\n",
4447             PRO_VERSION_MIN, PRO_VERSION_MAX,
4448             p->protocol_min, p->protocol_max);
4449         return -1;
4450 }
4451
4452 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4453 static int drbd_do_auth(struct drbd_tconn *tconn)
4454 {
4455         dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4456         dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4457         return -1;
4458 }
4459 #else
4460 #define CHALLENGE_LEN 64
4461
4462 /* Return value:
4463         1 - auth succeeded,
4464         0 - failed, try again (network error),
4465         -1 - auth failed, don't try again.
4466 */
4467
4468 static int drbd_do_auth(struct drbd_tconn *tconn)
4469 {
4470         struct drbd_socket *sock;
4471         char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
4472         struct scatterlist sg;
4473         char *response = NULL;
4474         char *right_response = NULL;
4475         char *peers_ch = NULL;
4476         unsigned int key_len;
4477         char secret[SHARED_SECRET_MAX]; /* 64 byte */
4478         unsigned int resp_size;
4479         struct hash_desc desc;
4480         struct packet_info pi;
4481         struct net_conf *nc;
4482         int err, rv;
4483
4484         /* FIXME: Put the challenge/response into the preallocated socket buffer.  */
4485
4486         rcu_read_lock();
4487         nc = rcu_dereference(tconn->net_conf);
4488         key_len = strlen(nc->shared_secret);
4489         memcpy(secret, nc->shared_secret, key_len);
4490         rcu_read_unlock();
4491
4492         desc.tfm = tconn->cram_hmac_tfm;
4493         desc.flags = 0;
4494
4495         rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4496         if (rv) {
4497                 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4498                 rv = -1;
4499                 goto fail;
4500         }
4501
4502         get_random_bytes(my_challenge, CHALLENGE_LEN);
4503
4504         sock = &tconn->data;
4505         if (!conn_prepare_command(tconn, sock)) {
4506                 rv = 0;
4507                 goto fail;
4508         }
4509         rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4510                                 my_challenge, CHALLENGE_LEN);
4511         if (!rv)
4512                 goto fail;
4513
4514         err = drbd_recv_header(tconn, &pi);
4515         if (err) {
4516                 rv = 0;
4517                 goto fail;
4518         }
4519
4520         if (pi.cmd != P_AUTH_CHALLENGE) {
4521                 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4522                     cmdname(pi.cmd), pi.cmd);
4523                 rv = 0;
4524                 goto fail;
4525         }
4526
4527         if (pi.size > CHALLENGE_LEN * 2) {
4528                 conn_err(tconn, "expected AuthChallenge payload too big.\n");
4529                 rv = -1;
4530                 goto fail;
4531         }
4532
4533         peers_ch = kmalloc(pi.size, GFP_NOIO);
4534         if (peers_ch == NULL) {
4535                 conn_err(tconn, "kmalloc of peers_ch failed\n");
4536                 rv = -1;
4537                 goto fail;
4538         }
4539
4540         err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4541         if (err) {
4542                 rv = 0;
4543                 goto fail;
4544         }
4545
4546         resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4547         response = kmalloc(resp_size, GFP_NOIO);
4548         if (response == NULL) {
4549                 conn_err(tconn, "kmalloc of response failed\n");
4550                 rv = -1;
4551                 goto fail;
4552         }
4553
4554         sg_init_table(&sg, 1);
4555         sg_set_buf(&sg, peers_ch, pi.size);
4556
4557         rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4558         if (rv) {
4559                 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4560                 rv = -1;
4561                 goto fail;
4562         }
4563
4564         if (!conn_prepare_command(tconn, sock)) {
4565                 rv = 0;
4566                 goto fail;
4567         }
4568         rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4569                                 response, resp_size);
4570         if (!rv)
4571                 goto fail;
4572
4573         err = drbd_recv_header(tconn, &pi);
4574         if (err) {
4575                 rv = 0;
4576                 goto fail;
4577         }
4578
4579         if (pi.cmd != P_AUTH_RESPONSE) {
4580                 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4581                         cmdname(pi.cmd), pi.cmd);
4582                 rv = 0;
4583                 goto fail;
4584         }
4585
4586         if (pi.size != resp_size) {
4587                 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4588                 rv = 0;
4589                 goto fail;
4590         }
4591
4592         err = drbd_recv_all_warn(tconn, response , resp_size);
4593         if (err) {
4594                 rv = 0;
4595                 goto fail;
4596         }
4597
4598         right_response = kmalloc(resp_size, GFP_NOIO);
4599         if (right_response == NULL) {
4600                 conn_err(tconn, "kmalloc of right_response failed\n");
4601                 rv = -1;
4602                 goto fail;
4603         }
4604
4605         sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4606
4607         rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4608         if (rv) {
4609                 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4610                 rv = -1;
4611                 goto fail;
4612         }
4613
4614         rv = !memcmp(response, right_response, resp_size);
4615
4616         if (rv)
4617                 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4618                      resp_size);
4619         else
4620                 rv = -1;
4621
4622  fail:
4623         kfree(peers_ch);
4624         kfree(response);
4625         kfree(right_response);
4626
4627         return rv;
4628 }
4629 #endif
4630
4631 int drbdd_init(struct drbd_thread *thi)
4632 {
4633         struct drbd_tconn *tconn = thi->tconn;
4634         int h;
4635
4636         conn_info(tconn, "receiver (re)started\n");
4637
4638         do {
4639                 h = conn_connect(tconn);
4640                 if (h == 0) {
4641                         conn_disconnect(tconn);
4642                         schedule_timeout_interruptible(HZ);
4643                 }
4644                 if (h == -1) {
4645                         conn_warn(tconn, "Discarding network configuration.\n");
4646                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4647                 }
4648         } while (h == 0);
4649
4650         if (h > 0)
4651                 drbdd(tconn);
4652
4653         conn_disconnect(tconn);
4654
4655         conn_info(tconn, "receiver terminated\n");
4656         return 0;
4657 }
4658
4659 /* ********* acknowledge sender ******** */
4660
4661 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4662 {
4663         struct p_req_state_reply *p = pi->data;
4664         int retcode = be32_to_cpu(p->retcode);
4665
4666         if (retcode >= SS_SUCCESS) {
4667                 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4668         } else {
4669                 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4670                 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4671                          drbd_set_st_err_str(retcode), retcode);
4672         }
4673         wake_up(&tconn->ping_wait);
4674
4675         return 0;
4676 }
4677
4678 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4679 {
4680         struct drbd_conf *mdev;
4681         struct p_req_state_reply *p = pi->data;
4682         int retcode = be32_to_cpu(p->retcode);
4683
4684         mdev = vnr_to_mdev(tconn, pi->vnr);
4685         if (!mdev)
4686                 return -EIO;
4687
4688         if (retcode >= SS_SUCCESS) {
4689                 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4690         } else {
4691                 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4692                 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4693                         drbd_set_st_err_str(retcode), retcode);
4694         }
4695         wake_up(&mdev->state_wait);
4696
4697         return 0;
4698 }
4699
4700 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4701 {
4702         return drbd_send_ping_ack(tconn);
4703
4704 }
4705
4706 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4707 {
4708         /* restore idle timeout */
4709         tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4710         if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4711                 wake_up(&tconn->ping_wait);
4712
4713         return 0;
4714 }
4715
4716 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4717 {
4718         struct drbd_conf *mdev;
4719         struct p_block_ack *p = pi->data;
4720         sector_t sector = be64_to_cpu(p->sector);
4721         int blksize = be32_to_cpu(p->blksize);
4722
4723         mdev = vnr_to_mdev(tconn, pi->vnr);
4724         if (!mdev)
4725                 return -EIO;
4726
4727         D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4728
4729         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4730
4731         if (get_ldev(mdev)) {
4732                 drbd_rs_complete_io(mdev, sector);
4733                 drbd_set_in_sync(mdev, sector, blksize);
4734                 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4735                 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4736                 put_ldev(mdev);
4737         }
4738         dec_rs_pending(mdev);
4739         atomic_add(blksize >> 9, &mdev->rs_sect_in);
4740
4741         return 0;
4742 }
4743
4744 static int
4745 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4746                               struct rb_root *root, const char *func,
4747                               enum drbd_req_event what, bool missing_ok)
4748 {
4749         struct drbd_request *req;
4750         struct bio_and_error m;
4751
4752         spin_lock_irq(&mdev->tconn->req_lock);
4753         req = find_request(mdev, root, id, sector, missing_ok, func);
4754         if (unlikely(!req)) {
4755                 spin_unlock_irq(&mdev->tconn->req_lock);
4756                 return -EIO;
4757         }
4758         __req_mod(req, what, &m);
4759         spin_unlock_irq(&mdev->tconn->req_lock);
4760
4761         if (m.bio)
4762                 complete_master_bio(mdev, &m);
4763         return 0;
4764 }
4765
4766 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4767 {
4768         struct drbd_conf *mdev;
4769         struct p_block_ack *p = pi->data;
4770         sector_t sector = be64_to_cpu(p->sector);
4771         int blksize = be32_to_cpu(p->blksize);
4772         enum drbd_req_event what;
4773
4774         mdev = vnr_to_mdev(tconn, pi->vnr);
4775         if (!mdev)
4776                 return -EIO;
4777
4778         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4779
4780         if (p->block_id == ID_SYNCER) {
4781                 drbd_set_in_sync(mdev, sector, blksize);
4782                 dec_rs_pending(mdev);
4783                 return 0;
4784         }
4785         switch (pi->cmd) {
4786         case P_RS_WRITE_ACK:
4787                 what = WRITE_ACKED_BY_PEER_AND_SIS;
4788                 break;
4789         case P_WRITE_ACK:
4790                 what = WRITE_ACKED_BY_PEER;
4791                 break;
4792         case P_RECV_ACK:
4793                 what = RECV_ACKED_BY_PEER;
4794                 break;
4795         case P_DISCARD_WRITE:
4796                 what = DISCARD_WRITE;
4797                 break;
4798         case P_RETRY_WRITE:
4799                 what = POSTPONE_WRITE;
4800                 break;
4801         default:
4802                 BUG();
4803         }
4804
4805         return validate_req_change_req_state(mdev, p->block_id, sector,
4806                                              &mdev->write_requests, __func__,
4807                                              what, false);
4808 }
4809
4810 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
4811 {
4812         struct drbd_conf *mdev;
4813         struct p_block_ack *p = pi->data;
4814         sector_t sector = be64_to_cpu(p->sector);
4815         int size = be32_to_cpu(p->blksize);
4816         int err;
4817
4818         mdev = vnr_to_mdev(tconn, pi->vnr);
4819         if (!mdev)
4820                 return -EIO;
4821
4822         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4823
4824         if (p->block_id == ID_SYNCER) {
4825                 dec_rs_pending(mdev);
4826                 drbd_rs_failed_io(mdev, sector, size);
4827                 return 0;
4828         }
4829
4830         err = validate_req_change_req_state(mdev, p->block_id, sector,
4831                                             &mdev->write_requests, __func__,
4832                                             NEG_ACKED, true);
4833         if (err) {
4834                 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4835                    The master bio might already be completed, therefore the
4836                    request is no longer in the collision hash. */
4837                 /* In Protocol B we might already have got a P_RECV_ACK
4838                    but then get a P_NEG_ACK afterwards. */
4839                 drbd_set_out_of_sync(mdev, sector, size);
4840         }
4841         return 0;
4842 }
4843
4844 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
4845 {
4846         struct drbd_conf *mdev;
4847         struct p_block_ack *p = pi->data;
4848         sector_t sector = be64_to_cpu(p->sector);
4849
4850         mdev = vnr_to_mdev(tconn, pi->vnr);
4851         if (!mdev)
4852                 return -EIO;
4853
4854         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4855
4856         dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4857             (unsigned long long)sector, be32_to_cpu(p->blksize));
4858
4859         return validate_req_change_req_state(mdev, p->block_id, sector,
4860                                              &mdev->read_requests, __func__,
4861                                              NEG_ACKED, false);
4862 }
4863
4864 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
4865 {
4866         struct drbd_conf *mdev;
4867         sector_t sector;
4868         int size;
4869         struct p_block_ack *p = pi->data;
4870
4871         mdev = vnr_to_mdev(tconn, pi->vnr);
4872         if (!mdev)
4873                 return -EIO;
4874
4875         sector = be64_to_cpu(p->sector);
4876         size = be32_to_cpu(p->blksize);
4877
4878         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4879
4880         dec_rs_pending(mdev);
4881
4882         if (get_ldev_if_state(mdev, D_FAILED)) {
4883                 drbd_rs_complete_io(mdev, sector);
4884                 switch (pi->cmd) {
4885                 case P_NEG_RS_DREPLY:
4886                         drbd_rs_failed_io(mdev, sector, size);
4887                 case P_RS_CANCEL:
4888                         break;
4889                 default:
4890                         BUG();
4891                 }
4892                 put_ldev(mdev);
4893         }
4894
4895         return 0;
4896 }
4897
4898 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
4899 {
4900         struct drbd_conf *mdev;
4901         struct p_barrier_ack *p = pi->data;
4902
4903         mdev = vnr_to_mdev(tconn, pi->vnr);
4904         if (!mdev)
4905                 return -EIO;
4906
4907         tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
4908
4909         if (mdev->state.conn == C_AHEAD &&
4910             atomic_read(&mdev->ap_in_flight) == 0 &&
4911             !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4912                 mdev->start_resync_timer.expires = jiffies + HZ;
4913                 add_timer(&mdev->start_resync_timer);
4914         }
4915
4916         return 0;
4917 }
4918
4919 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
4920 {
4921         struct drbd_conf *mdev;
4922         struct p_block_ack *p = pi->data;
4923         struct drbd_work *w;
4924         sector_t sector;
4925         int size;
4926
4927         mdev = vnr_to_mdev(tconn, pi->vnr);
4928         if (!mdev)
4929                 return -EIO;
4930
4931         sector = be64_to_cpu(p->sector);
4932         size = be32_to_cpu(p->blksize);
4933
4934         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4935
4936         if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4937                 drbd_ov_out_of_sync_found(mdev, sector, size);
4938         else
4939                 ov_out_of_sync_print(mdev);
4940
4941         if (!get_ldev(mdev))
4942                 return 0;
4943
4944         drbd_rs_complete_io(mdev, sector);
4945         dec_rs_pending(mdev);
4946
4947         --mdev->ov_left;
4948
4949         /* let's advance progress step marks only for every other megabyte */
4950         if ((mdev->ov_left & 0x200) == 0x200)
4951                 drbd_advance_rs_marks(mdev, mdev->ov_left);
4952
4953         if (mdev->ov_left == 0) {
4954                 w = kmalloc(sizeof(*w), GFP_NOIO);
4955                 if (w) {
4956                         w->cb = w_ov_finished;
4957                         w->mdev = mdev;
4958                         drbd_queue_work_front(&mdev->tconn->data.work, w);
4959                 } else {
4960                         dev_err(DEV, "kmalloc(w) failed.");
4961                         ov_out_of_sync_print(mdev);
4962                         drbd_resync_finished(mdev);
4963                 }
4964         }
4965         put_ldev(mdev);
4966         return 0;
4967 }
4968
4969 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4970 {
4971         return 0;
4972 }
4973
4974 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
4975 {
4976         struct drbd_conf *mdev;
4977         int i, not_empty = 0;
4978
4979         do {
4980                 clear_bit(SIGNAL_ASENDER, &tconn->flags);
4981                 flush_signals(current);
4982                 down_read(&drbd_cfg_rwsem);
4983                 idr_for_each_entry(&tconn->volumes, mdev, i) {
4984                         if (drbd_finish_peer_reqs(mdev)) {
4985                                 up_read(&drbd_cfg_rwsem);
4986                                 return 1; /* error */
4987                         }
4988                 }
4989                 up_read(&drbd_cfg_rwsem);
4990                 set_bit(SIGNAL_ASENDER, &tconn->flags);
4991
4992                 spin_lock_irq(&tconn->req_lock);
4993                 rcu_read_lock();
4994                 idr_for_each_entry(&tconn->volumes, mdev, i) {
4995                         not_empty = !list_empty(&mdev->done_ee);
4996                         if (not_empty)
4997                                 break;
4998                 }
4999                 rcu_read_unlock();
5000                 spin_unlock_irq(&tconn->req_lock);
5001         } while (not_empty);
5002
5003         return 0;
5004 }
5005
5006 struct asender_cmd {
5007         size_t pkt_size;
5008         int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5009 };
5010
5011 static struct asender_cmd asender_tbl[] = {
5012         [P_PING]            = { 0, got_Ping },
5013         [P_PING_ACK]        = { 0, got_PingAck },
5014         [P_RECV_ACK]        = { sizeof(struct p_block_ack), got_BlockAck },
5015         [P_WRITE_ACK]       = { sizeof(struct p_block_ack), got_BlockAck },
5016         [P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
5017         [P_DISCARD_WRITE]   = { sizeof(struct p_block_ack), got_BlockAck },
5018         [P_NEG_ACK]         = { sizeof(struct p_block_ack), got_NegAck },
5019         [P_NEG_DREPLY]      = { sizeof(struct p_block_ack), got_NegDReply },
5020         [P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply },
5021         [P_OV_RESULT]       = { sizeof(struct p_block_ack), got_OVResult },
5022         [P_BARRIER_ACK]     = { sizeof(struct p_barrier_ack), got_BarrierAck },
5023         [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5024         [P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
5025         [P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
5026         [P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply },
5027         [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5028         [P_RETRY_WRITE]     = { sizeof(struct p_block_ack), got_BlockAck },
5029 };
5030
5031 int drbd_asender(struct drbd_thread *thi)
5032 {
5033         struct drbd_tconn *tconn = thi->tconn;
5034         struct asender_cmd *cmd = NULL;
5035         struct packet_info pi;
5036         int rv;
5037         void *buf    = tconn->meta.rbuf;
5038         int received = 0;
5039         unsigned int header_size = drbd_header_size(tconn);
5040         int expect   = header_size;
5041         bool ping_timeout_active = false;
5042         struct net_conf *nc;
5043         int ping_timeo, tcp_cork, ping_int;
5044
5045         current->policy = SCHED_RR;  /* Make this a realtime task! */
5046         current->rt_priority = 2;    /* more important than all other tasks */
5047
5048         while (get_t_state(thi) == RUNNING) {
5049                 drbd_thread_current_set_cpu(thi);
5050
5051                 rcu_read_lock();
5052                 nc = rcu_dereference(tconn->net_conf);
5053                 ping_timeo = nc->ping_timeo;
5054                 tcp_cork = nc->tcp_cork;
5055                 ping_int = nc->ping_int;
5056                 rcu_read_unlock();
5057
5058                 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5059                         if (drbd_send_ping(tconn)) {
5060                                 conn_err(tconn, "drbd_send_ping has failed\n");
5061                                 goto reconnect;
5062                         }
5063                         tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5064                         ping_timeout_active = true;
5065                 }
5066
5067                 /* TODO: conditionally cork; it may hurt latency if we cork without
5068                    much to send */
5069                 if (tcp_cork)
5070                         drbd_tcp_cork(tconn->meta.socket);
5071                 if (tconn_finish_peer_reqs(tconn)) {
5072                         conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5073                         goto reconnect;
5074                 }
5075                 /* but unconditionally uncork unless disabled */
5076                 if (tcp_cork)
5077                         drbd_tcp_uncork(tconn->meta.socket);
5078
5079                 /* short circuit, recv_msg would return EINTR anyways. */
5080                 if (signal_pending(current))
5081                         continue;
5082
5083                 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5084                 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5085
5086                 flush_signals(current);
5087
5088                 /* Note:
5089                  * -EINTR        (on meta) we got a signal
5090                  * -EAGAIN       (on meta) rcvtimeo expired
5091                  * -ECONNRESET   other side closed the connection
5092                  * -ERESTARTSYS  (on data) we got a signal
5093                  * rv <  0       other than above: unexpected error!
5094                  * rv == expected: full header or command
5095                  * rv <  expected: "woken" by signal during receive
5096                  * rv == 0       : "connection shut down by peer"
5097                  */
5098                 if (likely(rv > 0)) {
5099                         received += rv;
5100                         buf      += rv;
5101                 } else if (rv == 0) {
5102                         conn_err(tconn, "meta connection shut down by peer.\n");
5103                         goto reconnect;
5104                 } else if (rv == -EAGAIN) {
5105                         /* If the data socket received something meanwhile,
5106                          * that is good enough: peer is still alive. */
5107                         if (time_after(tconn->last_received,
5108                                 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5109                                 continue;
5110                         if (ping_timeout_active) {
5111                                 conn_err(tconn, "PingAck did not arrive in time.\n");
5112                                 goto reconnect;
5113                         }
5114                         set_bit(SEND_PING, &tconn->flags);
5115                         continue;
5116                 } else if (rv == -EINTR) {
5117                         continue;
5118                 } else {
5119                         conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5120                         goto reconnect;
5121                 }
5122
5123                 if (received == expect && cmd == NULL) {
5124                         if (decode_header(tconn, tconn->meta.rbuf, &pi))
5125                                 goto reconnect;
5126                         cmd = &asender_tbl[pi.cmd];
5127                         if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5128                                 conn_err(tconn, "unknown command %d on meta (l: %d)\n",
5129                                         pi.cmd, pi.size);
5130                                 goto disconnect;
5131                         }
5132                         expect = header_size + cmd->pkt_size;
5133                         if (pi.size != expect - header_size) {
5134                                 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5135                                         pi.cmd, pi.size);
5136                                 goto reconnect;
5137                         }
5138                 }
5139                 if (received == expect) {
5140                         bool err;
5141
5142                         err = cmd->fn(tconn, &pi);
5143                         if (err) {
5144                                 conn_err(tconn, "%pf failed\n", cmd->fn);
5145                                 goto reconnect;
5146                         }
5147
5148                         tconn->last_received = jiffies;
5149
5150                         if (cmd == &asender_tbl[P_PING_ACK]) {
5151                                 /* restore idle timeout */
5152                                 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5153                                 ping_timeout_active = false;
5154                         }
5155
5156                         buf      = tconn->meta.rbuf;
5157                         received = 0;
5158                         expect   = header_size;
5159                         cmd      = NULL;
5160                 }
5161         }
5162
5163         if (0) {
5164 reconnect:
5165                 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5166         }
5167         if (0) {
5168 disconnect:
5169                 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5170         }
5171         clear_bit(SIGNAL_ASENDER, &tconn->flags);
5172
5173         conn_info(tconn, "asender terminated\n");
5174
5175         return 0;
5176 }