4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf *mdev);
58 static int drbd_do_auth(struct drbd_conf *mdev);
60 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page *page_chain_del(struct page **head, int n)
89 tmp = page_chain_next(page);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page *page_chain_tail(struct page *page, int *len)
113 while ((tmp = page_chain_next(page)))
120 static int page_chain_free(struct page *page)
124 page_chain_for_each_safe(page, tmp) {
131 static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
145 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
147 struct page *page = NULL;
148 struct page *tmp = NULL;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant >= number) {
154 spin_lock(&drbd_pp_lock);
155 page = page_chain_del(&drbd_pp_pool, number);
157 drbd_pp_vacant -= number;
158 spin_unlock(&drbd_pp_lock);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
170 set_page_private(tmp, (unsigned long)page);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
185 spin_unlock(&drbd_pp_lock);
190 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
202 if (drbd_ee_has_active_page(e))
204 list_move(le, to_be_freed);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
235 struct page *page = NULL;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
243 while (page == NULL) {
244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
246 drbd_kick_lo_and_reclaim_net(mdev);
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait, &wait);
267 atomic_add(number, &mdev->pp_in_use);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page);
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
288 spin_unlock(&drbd_pp_lock);
290 i = atomic_sub_return(i, a);
292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
294 wake_up(&drbd_pp_wait);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
318 struct drbd_epoch_entry *e;
320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
336 INIT_HLIST_NODE(&e->collision);
340 atomic_set(&e->pending_bios, 0);
349 mempool_free(e, drbd_ee_mempool);
353 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
355 if (e->flags & EE_HAS_DIGEST)
357 drbd_pp_free(mdev, e->pages, is_net);
358 D_ASSERT(atomic_read(&e->pending_bios) == 0);
359 D_ASSERT(hlist_unhashed(&e->collision));
360 mempool_free(e, drbd_ee_mempool);
363 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
365 LIST_HEAD(work_list);
366 struct drbd_epoch_entry *e, *t;
368 int is_net = list == &mdev->net_ee;
370 spin_lock_irq(&mdev->req_lock);
371 list_splice_init(list, &work_list);
372 spin_unlock_irq(&mdev->req_lock);
374 list_for_each_entry_safe(e, t, &work_list, w.list) {
375 drbd_free_some_ee(mdev, e, is_net);
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
391 static int drbd_process_done_ee(struct drbd_conf *mdev)
393 LIST_HEAD(work_list);
394 LIST_HEAD(reclaimed);
395 struct drbd_epoch_entry *e, *t;
396 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
398 spin_lock_irq(&mdev->req_lock);
399 reclaim_net_ee(mdev, &reclaimed);
400 list_splice_init(&mdev->done_ee, &work_list);
401 spin_unlock_irq(&mdev->req_lock);
403 list_for_each_entry_safe(e, t, &reclaimed, w.list)
404 drbd_free_net_ee(mdev, e);
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
410 list_for_each_entry_safe(e, t, &work_list, w.list) {
411 /* list_del not necessary, next/prev members not touched */
412 ok = e->w.cb(mdev, &e->w, !ok) && ok;
413 drbd_free_ee(mdev, e);
415 wake_up(&mdev->ee_wait);
420 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head)) {
427 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
428 spin_unlock_irq(&mdev->req_lock);
430 finish_wait(&mdev->ee_wait, &wait);
431 spin_lock_irq(&mdev->req_lock);
435 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
437 spin_lock_irq(&mdev->req_lock);
438 _drbd_wait_ee_list_empty(mdev, head);
439 spin_unlock_irq(&mdev->req_lock);
442 /* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444 static int drbd_accept(struct drbd_conf *mdev, const char **what,
445 struct socket *sock, struct socket **newsock)
447 struct sock *sk = sock->sk;
451 err = sock->ops->listen(sock, 5);
455 *what = "sock_create_lite";
456 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
462 err = sock->ops->accept(sock, *newsock, 0);
464 sock_release(*newsock);
468 (*newsock)->ops = sock->ops;
474 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
475 void *buf, size_t size, int flags)
482 struct msghdr msg = {
484 .msg_iov = (struct iovec *)&iov,
485 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
491 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
497 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
504 struct msghdr msg = {
506 .msg_iov = (struct iovec *)&iov,
507 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
515 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
525 if (rv == -ECONNRESET)
526 dev_info(DEV, "sock was reset by peer\n");
527 else if (rv != -ERESTARTSYS)
528 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
530 } else if (rv == 0) {
531 dev_info(DEV, "sock was shut down by peer\n");
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
537 /* D_ASSERT(signal_pending(current)); */
545 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
555 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
558 /* open coded SO_SNDBUF, SO_RCVBUF */
560 sock->sk->sk_sndbuf = snd;
561 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
564 sock->sk->sk_rcvbuf = rcv;
565 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
569 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
573 struct sockaddr_in6 src_in6;
575 int disconnect_on_error = 1;
577 if (!get_net_conf(mdev))
580 what = "sock_create_kern";
581 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
582 SOCK_STREAM, IPPROTO_TCP, &sock);
588 sock->sk->sk_rcvtimeo =
589 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
590 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
591 mdev->net_conf->rcvbuf_size);
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
600 memcpy(&src_in6, mdev->net_conf->my_addr,
601 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
602 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
603 src_in6.sin6_port = 0;
605 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
607 what = "bind before connect";
608 err = sock->ops->bind(sock,
609 (struct sockaddr *) &src_in6,
610 mdev->net_conf->my_addr_len);
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error = 0;
618 err = sock->ops->connect(sock,
619 (struct sockaddr *)mdev->net_conf->peer_addr,
620 mdev->net_conf->peer_addr_len, 0);
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
631 case EINTR: case ERESTARTSYS:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED: case ENETUNREACH:
634 case EHOSTDOWN: case EHOSTUNREACH:
635 disconnect_on_error = 0;
638 dev_err(DEV, "%s failed, err = %d\n", what, err);
640 if (disconnect_on_error)
641 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
647 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
650 struct socket *s_estab = NULL, *s_listen;
653 if (!get_net_conf(mdev))
656 what = "sock_create_kern";
657 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
658 SOCK_STREAM, IPPROTO_TCP, &s_listen);
664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo;
670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
671 mdev->net_conf->rcvbuf_size);
673 what = "bind before listen";
674 err = s_listen->ops->bind(s_listen,
675 (struct sockaddr *) mdev->net_conf->my_addr,
676 mdev->net_conf->my_addr_len);
680 err = drbd_accept(mdev, &what, s_listen, &s_estab);
684 sock_release(s_listen);
686 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
687 dev_err(DEV, "%s failed, err = %d\n", what, err);
688 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
696 static int drbd_send_fp(struct drbd_conf *mdev,
697 struct socket *sock, enum drbd_packets cmd)
699 struct p_header80 *h = &mdev->data.sbuf.header.h80;
701 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
704 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
706 struct p_header80 *h = &mdev->data.rbuf.header.h80;
709 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
711 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
712 return be16_to_cpu(h->command);
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
722 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
730 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
732 if (rr > 0 || rr == -EAGAIN) {
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
749 static int drbd_connect(struct drbd_conf *mdev)
751 struct socket *s, *sock, *msock;
754 D_ASSERT(!mdev->data.socket);
756 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
759 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
766 /* 3 tries, this should take less than a second! */
767 s = drbd_try_connect(mdev);
770 /* give the other side time to call bind() & listen() */
771 schedule_timeout_interruptible(HZ / 10);
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
784 dev_err(DEV, "Logic error in drbd_connect()\n");
785 goto out_release_sockets;
790 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
791 ok = drbd_socket_okay(mdev, &sock);
792 ok = drbd_socket_okay(mdev, &msock) && ok;
798 s = drbd_wait_for_connect(mdev);
800 try = drbd_recv_fp(mdev, s);
801 drbd_socket_okay(mdev, &sock);
802 drbd_socket_okay(mdev, &msock);
806 dev_warn(DEV, "initial packet S crossed\n");
813 dev_warn(DEV, "initial packet M crossed\n");
817 set_bit(DISCARD_CONCURRENT, &mdev->flags);
820 dev_warn(DEV, "Error receiving initial packet\n");
827 if (mdev->state.conn <= C_DISCONNECTING)
828 goto out_release_sockets;
829 if (signal_pending(current)) {
830 flush_signals(current);
832 if (get_t_state(&mdev->receiver) == Exiting)
833 goto out_release_sockets;
837 ok = drbd_socket_okay(mdev, &sock);
838 ok = drbd_socket_okay(mdev, &msock) && ok;
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO;
850 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
851 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
864 /* we don't want delays.
865 * we use TCP_CORK where appropriate, though */
866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
873 D_ASSERT(mdev->asender.task == NULL);
875 h = drbd_do_handshake(mdev);
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 switch (drbd_do_auth(mdev)) {
883 dev_err(DEV, "Authentication of peer failed\n");
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
891 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
892 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
894 atomic_set(&mdev->packet_seq, 0);
897 if (drbd_send_protocol(mdev) == -1)
899 drbd_send_sync_param(mdev, &mdev->sync_conf);
900 drbd_send_sizes(mdev, 0, 0);
901 drbd_send_uuids(mdev);
902 drbd_send_current_state(mdev);
903 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
904 clear_bit(RESIZE_PENDING, &mdev->flags);
906 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
909 drbd_thread_start(&mdev->asender);
910 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
922 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
924 union p_header *h = &mdev->data.rbuf.header;
927 r = drbd_recv(mdev, h, sizeof(*h));
928 if (unlikely(r != sizeof(*h))) {
929 if (!signal_pending(current))
930 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
934 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
935 *cmd = be16_to_cpu(h->h80.command);
936 *packet_size = be16_to_cpu(h->h80.length);
937 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
938 *cmd = be16_to_cpu(h->h95.command);
939 *packet_size = be32_to_cpu(h->h95.length);
941 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
942 be32_to_cpu(h->h80.magic),
943 be16_to_cpu(h->h80.command),
944 be16_to_cpu(h->h80.length));
947 mdev->last_received = jiffies;
952 static void drbd_flush(struct drbd_conf *mdev)
956 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
957 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
960 dev_info(DEV, "local disk flush failed with status %d\n", rv);
961 /* would rather check on EOPNOTSUPP, but that is not reliable.
962 * don't try again for ANY return value != 0
963 * if (rv == -EOPNOTSUPP) */
964 drbd_bump_write_ordering(mdev, WO_drain_io);
971 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
972 * @mdev: DRBD device.
973 * @epoch: Epoch object.
976 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
977 struct drbd_epoch *epoch,
981 struct drbd_epoch *next_epoch;
982 enum finish_epoch rv = FE_STILL_LIVE;
984 spin_lock(&mdev->epoch_lock);
988 epoch_size = atomic_read(&epoch->epoch_size);
990 switch (ev & ~EV_CLEANUP) {
992 atomic_dec(&epoch->active);
994 case EV_GOT_BARRIER_NR:
995 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1002 if (epoch_size != 0 &&
1003 atomic_read(&epoch->active) == 0 &&
1004 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1005 if (!(ev & EV_CLEANUP)) {
1006 spin_unlock(&mdev->epoch_lock);
1007 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1008 spin_lock(&mdev->epoch_lock);
1010 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1013 if (mdev->current_epoch != epoch) {
1014 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1015 list_del(&epoch->list);
1016 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1020 if (rv == FE_STILL_LIVE)
1024 atomic_set(&epoch->epoch_size, 0);
1025 /* atomic_set(&epoch->active, 0); is already zero */
1026 if (rv == FE_STILL_LIVE)
1028 wake_up(&mdev->ee_wait);
1038 spin_unlock(&mdev->epoch_lock);
1044 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1045 * @mdev: DRBD device.
1046 * @wo: Write ordering method to try.
1048 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1050 enum write_ordering_e pwo;
1051 static char *write_ordering_str[] = {
1053 [WO_drain_io] = "drain",
1054 [WO_bdev_flush] = "flush",
1057 pwo = mdev->write_ordering;
1059 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1061 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1063 mdev->write_ordering = wo;
1064 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1065 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1070 * @mdev: DRBD device.
1072 * @rw: flag field, see bio->bi_rw
1074 * May spread the pages to multiple bios,
1075 * depending on bio_add_page restrictions.
1077 * Returns 0 if all bios have been submitted,
1078 * -ENOMEM if we could not allocate enough bios,
1079 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1080 * single page to an empty bio (which should never happen and likely indicates
1081 * that the lower level IO stack is in some way broken). This has been observed
1082 * on certain Xen deployments.
1084 /* TODO allocate from our own bio_set. */
1085 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1086 const unsigned rw, const int fault_type)
1088 struct bio *bios = NULL;
1090 struct page *page = e->pages;
1091 sector_t sector = e->sector;
1092 unsigned ds = e->size;
1093 unsigned n_bios = 0;
1094 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1097 /* In most cases, we will only need one bio. But in case the lower
1098 * level restrictions happen to be different at this offset on this
1099 * side than those of the sending peer, we may need to submit the
1100 * request in more than one bio. */
1102 bio = bio_alloc(GFP_NOIO, nr_pages);
1104 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1107 /* > e->sector, unless this is the first bio */
1108 bio->bi_sector = sector;
1109 bio->bi_bdev = mdev->ldev->backing_bdev;
1111 bio->bi_private = e;
1112 bio->bi_end_io = drbd_endio_sec;
1114 bio->bi_next = bios;
1118 page_chain_for_each(page) {
1119 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1120 if (!bio_add_page(bio, page, len, 0)) {
1121 /* A single page must always be possible!
1122 * But in case it fails anyways,
1123 * we deal with it, and complain (below). */
1124 if (bio->bi_vcnt == 0) {
1126 "bio_add_page failed for len=%u, "
1127 "bi_vcnt=0 (bi_sector=%llu)\n",
1128 len, (unsigned long long)bio->bi_sector);
1138 D_ASSERT(page == NULL);
1141 atomic_set(&e->pending_bios, n_bios);
1144 bios = bios->bi_next;
1145 bio->bi_next = NULL;
1147 drbd_generic_make_request(mdev, fault_type, bio);
1154 bios = bios->bi_next;
1160 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1163 struct p_barrier *p = &mdev->data.rbuf.barrier;
1164 struct drbd_epoch *epoch;
1168 mdev->current_epoch->barrier_nr = p->barrier;
1169 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1171 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1172 * the activity log, which means it would not be resynced in case the
1173 * R_PRIMARY crashes now.
1174 * Therefore we must send the barrier_ack after the barrier request was
1176 switch (mdev->write_ordering) {
1178 if (rv == FE_RECYCLED)
1181 /* receiver context, in the writeout path of the other node.
1182 * avoid potential distributed deadlock */
1183 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1187 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1192 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1195 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1196 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1201 epoch = mdev->current_epoch;
1202 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1204 D_ASSERT(atomic_read(&epoch->active) == 0);
1205 D_ASSERT(epoch->flags == 0);
1209 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1214 atomic_set(&epoch->epoch_size, 0);
1215 atomic_set(&epoch->active, 0);
1217 spin_lock(&mdev->epoch_lock);
1218 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1219 list_add(&epoch->list, &mdev->current_epoch->list);
1220 mdev->current_epoch = epoch;
1223 /* The current_epoch got recycled while we allocated this one... */
1226 spin_unlock(&mdev->epoch_lock);
1231 /* used from receive_RSDataReply (recv_resync_read)
1232 * and from receive_Data */
1233 static struct drbd_epoch_entry *
1234 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1236 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1237 struct drbd_epoch_entry *e;
1240 void *dig_in = mdev->int_dig_in;
1241 void *dig_vv = mdev->int_dig_vv;
1242 unsigned long *data;
1244 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1245 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1248 rr = drbd_recv(mdev, dig_in, dgs);
1250 if (!signal_pending(current))
1252 "short read receiving data digest: read %d expected %d\n",
1260 ERR_IF(data_size == 0) return NULL;
1261 ERR_IF(data_size & 0x1ff) return NULL;
1262 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1264 /* even though we trust out peer,
1265 * we sometimes have to double check. */
1266 if (sector + (data_size>>9) > capacity) {
1267 dev_err(DEV, "request from peer beyond end of local disk: "
1268 "capacity: %llus < sector: %llus + size: %u\n",
1269 (unsigned long long)capacity,
1270 (unsigned long long)sector, data_size);
1274 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1275 * "criss-cross" setup, that might cause write-out on some other DRBD,
1276 * which in turn might block on the other node at this very place. */
1277 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1283 page_chain_for_each(page) {
1284 unsigned len = min_t(int, ds, PAGE_SIZE);
1286 rr = drbd_recv(mdev, data, len);
1287 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1288 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1289 data[0] = data[0] ^ (unsigned long)-1;
1293 drbd_free_ee(mdev, e);
1294 if (!signal_pending(current))
1295 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1303 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1304 if (memcmp(dig_in, dig_vv, dgs)) {
1305 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1306 (unsigned long long)sector, data_size);
1307 drbd_bcast_ee(mdev, "digest failed",
1308 dgs, dig_in, dig_vv, e);
1309 drbd_free_ee(mdev, e);
1313 mdev->recv_cnt += data_size>>9;
1317 /* drbd_drain_block() just takes a data block
1318 * out of the socket input buffer, and discards it.
1320 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1329 page = drbd_pp_alloc(mdev, 1, 1);
1333 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1334 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1336 if (!signal_pending(current))
1338 "short read receiving data: read %d expected %d\n",
1339 rr, min_t(int, data_size, PAGE_SIZE));
1345 drbd_pp_free(mdev, page, 0);
1349 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1350 sector_t sector, int data_size)
1352 struct bio_vec *bvec;
1354 int dgs, rr, i, expect;
1355 void *dig_in = mdev->int_dig_in;
1356 void *dig_vv = mdev->int_dig_vv;
1358 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1359 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1362 rr = drbd_recv(mdev, dig_in, dgs);
1364 if (!signal_pending(current))
1366 "short read receiving data reply digest: read %d expected %d\n",
1374 /* optimistically update recv_cnt. if receiving fails below,
1375 * we disconnect anyways, and counters will be reset. */
1376 mdev->recv_cnt += data_size>>9;
1378 bio = req->master_bio;
1379 D_ASSERT(sector == bio->bi_sector);
1381 bio_for_each_segment(bvec, bio, i) {
1382 expect = min_t(int, data_size, bvec->bv_len);
1383 rr = drbd_recv(mdev,
1384 kmap(bvec->bv_page)+bvec->bv_offset,
1386 kunmap(bvec->bv_page);
1388 if (!signal_pending(current))
1389 dev_warn(DEV, "short read receiving data reply: "
1390 "read %d expected %d\n",
1398 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1399 if (memcmp(dig_in, dig_vv, dgs)) {
1400 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1405 D_ASSERT(data_size == 0);
1409 /* e_end_resync_block() is called via
1410 * drbd_process_done_ee() by asender only */
1411 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1413 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1414 sector_t sector = e->sector;
1417 D_ASSERT(hlist_unhashed(&e->collision));
1419 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1420 drbd_set_in_sync(mdev, sector, e->size);
1421 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1423 /* Record failure to sync */
1424 drbd_rs_failed_io(mdev, sector, e->size);
1426 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1433 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1435 struct drbd_epoch_entry *e;
1437 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1441 dec_rs_pending(mdev);
1444 /* corresponding dec_unacked() in e_end_resync_block()
1445 * respective _drbd_clear_done_ee */
1447 e->w.cb = e_end_resync_block;
1449 spin_lock_irq(&mdev->req_lock);
1450 list_add(&e->w.list, &mdev->sync_ee);
1451 spin_unlock_irq(&mdev->req_lock);
1453 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1454 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1457 /* don't care for the reason here */
1458 dev_err(DEV, "submit failed, triggering re-connect\n");
1459 spin_lock_irq(&mdev->req_lock);
1460 list_del(&e->w.list);
1461 spin_unlock_irq(&mdev->req_lock);
1463 drbd_free_ee(mdev, e);
1469 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1471 struct drbd_request *req;
1474 struct p_data *p = &mdev->data.rbuf.data;
1476 sector = be64_to_cpu(p->sector);
1478 spin_lock_irq(&mdev->req_lock);
1479 req = _ar_id_to_req(mdev, p->block_id, sector);
1480 spin_unlock_irq(&mdev->req_lock);
1481 if (unlikely(!req)) {
1482 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1486 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1487 * special casing it there for the various failure cases.
1488 * still no race with drbd_fail_pending_reads */
1489 ok = recv_dless_read(mdev, req, sector, data_size);
1492 req_mod(req, data_received);
1493 /* else: nothing. handled from drbd_disconnect...
1494 * I don't think we may complete this just yet
1495 * in case we are "on-disconnect: freeze" */
1500 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1504 struct p_data *p = &mdev->data.rbuf.data;
1506 sector = be64_to_cpu(p->sector);
1507 D_ASSERT(p->block_id == ID_SYNCER);
1509 if (get_ldev(mdev)) {
1510 /* data is submitted to disk within recv_resync_read.
1511 * corresponding put_ldev done below on error,
1512 * or in drbd_endio_write_sec. */
1513 ok = recv_resync_read(mdev, sector, data_size);
1515 if (__ratelimit(&drbd_ratelimit_state))
1516 dev_err(DEV, "Can not write resync data to local disk.\n");
1518 ok = drbd_drain_block(mdev, data_size);
1520 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1523 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1528 /* e_end_block() is called via drbd_process_done_ee().
1529 * this means this function only runs in the asender thread
1531 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1533 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1534 sector_t sector = e->sector;
1537 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1538 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1539 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1540 mdev->state.conn <= C_PAUSED_SYNC_T &&
1541 e->flags & EE_MAY_SET_IN_SYNC) ?
1542 P_RS_WRITE_ACK : P_WRITE_ACK;
1543 ok &= drbd_send_ack(mdev, pcmd, e);
1544 if (pcmd == P_RS_WRITE_ACK)
1545 drbd_set_in_sync(mdev, sector, e->size);
1547 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1548 /* we expect it to be marked out of sync anyways...
1549 * maybe assert this? */
1553 /* we delete from the conflict detection hash _after_ we sent out the
1554 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1555 if (mdev->net_conf->two_primaries) {
1556 spin_lock_irq(&mdev->req_lock);
1557 D_ASSERT(!hlist_unhashed(&e->collision));
1558 hlist_del_init(&e->collision);
1559 spin_unlock_irq(&mdev->req_lock);
1561 D_ASSERT(hlist_unhashed(&e->collision));
1564 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1569 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1571 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1574 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1575 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1577 spin_lock_irq(&mdev->req_lock);
1578 D_ASSERT(!hlist_unhashed(&e->collision));
1579 hlist_del_init(&e->collision);
1580 spin_unlock_irq(&mdev->req_lock);
1587 /* Called from receive_Data.
1588 * Synchronize packets on sock with packets on msock.
1590 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1591 * packet traveling on msock, they are still processed in the order they have
1594 * Note: we don't care for Ack packets overtaking P_DATA packets.
1596 * In case packet_seq is larger than mdev->peer_seq number, there are
1597 * outstanding packets on the msock. We wait for them to arrive.
1598 * In case we are the logically next packet, we update mdev->peer_seq
1599 * ourselves. Correctly handles 32bit wrap around.
1601 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1602 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1603 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1604 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1606 * returns 0 if we may process the packet,
1607 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1608 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1614 spin_lock(&mdev->peer_seq_lock);
1616 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1617 if (seq_le(packet_seq, mdev->peer_seq+1))
1619 if (signal_pending(current)) {
1623 p_seq = mdev->peer_seq;
1624 spin_unlock(&mdev->peer_seq_lock);
1625 timeout = schedule_timeout(30*HZ);
1626 spin_lock(&mdev->peer_seq_lock);
1627 if (timeout == 0 && p_seq == mdev->peer_seq) {
1629 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1633 finish_wait(&mdev->seq_wait, &wait);
1634 if (mdev->peer_seq+1 == packet_seq)
1636 spin_unlock(&mdev->peer_seq_lock);
1640 /* see also bio_flags_to_wire()
1641 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1642 * flags and back. We may replicate to other kernel versions. */
1643 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1645 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1646 (dpf & DP_FUA ? REQ_FUA : 0) |
1647 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1648 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1651 /* mirrored write */
1652 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1655 struct drbd_epoch_entry *e;
1656 struct p_data *p = &mdev->data.rbuf.data;
1660 if (!get_ldev(mdev)) {
1661 spin_lock(&mdev->peer_seq_lock);
1662 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1664 spin_unlock(&mdev->peer_seq_lock);
1666 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1667 atomic_inc(&mdev->current_epoch->epoch_size);
1668 return drbd_drain_block(mdev, data_size);
1671 /* get_ldev(mdev) successful.
1672 * Corresponding put_ldev done either below (on various errors),
1673 * or in drbd_endio_write_sec, if we successfully submit the data at
1674 * the end of this function. */
1676 sector = be64_to_cpu(p->sector);
1677 e = read_in_block(mdev, p->block_id, sector, data_size);
1683 e->w.cb = e_end_block;
1685 dp_flags = be32_to_cpu(p->dp_flags);
1686 rw |= wire_flags_to_bio(mdev, dp_flags);
1688 if (dp_flags & DP_MAY_SET_IN_SYNC)
1689 e->flags |= EE_MAY_SET_IN_SYNC;
1691 spin_lock(&mdev->epoch_lock);
1692 e->epoch = mdev->current_epoch;
1693 atomic_inc(&e->epoch->epoch_size);
1694 atomic_inc(&e->epoch->active);
1695 spin_unlock(&mdev->epoch_lock);
1697 /* I'm the receiver, I do hold a net_cnt reference. */
1698 if (!mdev->net_conf->two_primaries) {
1699 spin_lock_irq(&mdev->req_lock);
1701 /* don't get the req_lock yet,
1702 * we may sleep in drbd_wait_peer_seq */
1703 const int size = e->size;
1704 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1706 struct drbd_request *i;
1707 struct hlist_node *n;
1708 struct hlist_head *slot;
1711 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1712 BUG_ON(mdev->ee_hash == NULL);
1713 BUG_ON(mdev->tl_hash == NULL);
1715 /* conflict detection and handling:
1716 * 1. wait on the sequence number,
1717 * in case this data packet overtook ACK packets.
1718 * 2. check our hash tables for conflicting requests.
1719 * we only need to walk the tl_hash, since an ee can not
1720 * have a conflict with an other ee: on the submitting
1721 * node, the corresponding req had already been conflicting,
1722 * and a conflicting req is never sent.
1724 * Note: for two_primaries, we are protocol C,
1725 * so there cannot be any request that is DONE
1726 * but still on the transfer log.
1728 * unconditionally add to the ee_hash.
1730 * if no conflicting request is found:
1733 * if any conflicting request is found
1734 * that has not yet been acked,
1735 * AND I have the "discard concurrent writes" flag:
1736 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1738 * if any conflicting request is found:
1739 * block the receiver, waiting on misc_wait
1740 * until no more conflicting requests are there,
1741 * or we get interrupted (disconnect).
1743 * we do not just write after local io completion of those
1744 * requests, but only after req is done completely, i.e.
1745 * we wait for the P_DISCARD_ACK to arrive!
1747 * then proceed normally, i.e. submit.
1749 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1750 goto out_interrupted;
1752 spin_lock_irq(&mdev->req_lock);
1754 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
1756 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1757 slot = tl_hash_slot(mdev, sector);
1760 int have_unacked = 0;
1761 int have_conflict = 0;
1762 prepare_to_wait(&mdev->misc_wait, &wait,
1763 TASK_INTERRUPTIBLE);
1764 hlist_for_each_entry(i, n, slot, collision) {
1766 /* only ALERT on first iteration,
1767 * we may be woken up early... */
1769 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1770 " new: %llus +%u; pending: %llus +%u\n",
1771 current->comm, current->pid,
1772 (unsigned long long)sector, size,
1773 (unsigned long long)i->sector, i->size);
1774 if (i->rq_state & RQ_NET_PENDING)
1783 /* Discard Ack only for the _first_ iteration */
1784 if (first && discard && have_unacked) {
1785 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1786 (unsigned long long)sector);
1788 e->w.cb = e_send_discard_ack;
1789 list_add_tail(&e->w.list, &mdev->done_ee);
1791 spin_unlock_irq(&mdev->req_lock);
1793 /* we could probably send that P_DISCARD_ACK ourselves,
1794 * but I don't like the receiver using the msock */
1798 finish_wait(&mdev->misc_wait, &wait);
1802 if (signal_pending(current)) {
1803 hlist_del_init(&e->collision);
1805 spin_unlock_irq(&mdev->req_lock);
1807 finish_wait(&mdev->misc_wait, &wait);
1808 goto out_interrupted;
1811 spin_unlock_irq(&mdev->req_lock);
1814 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1815 "sec=%llus\n", (unsigned long long)sector);
1816 } else if (discard) {
1817 /* we had none on the first iteration.
1818 * there must be none now. */
1819 D_ASSERT(have_unacked == 0);
1822 spin_lock_irq(&mdev->req_lock);
1824 finish_wait(&mdev->misc_wait, &wait);
1827 list_add(&e->w.list, &mdev->active_ee);
1828 spin_unlock_irq(&mdev->req_lock);
1830 switch (mdev->net_conf->wire_protocol) {
1833 /* corresponding dec_unacked() in e_end_block()
1834 * respective _drbd_clear_done_ee */
1837 /* I really don't like it that the receiver thread
1838 * sends on the msock, but anyways */
1839 drbd_send_ack(mdev, P_RECV_ACK, e);
1846 if (mdev->state.pdsk < D_INCONSISTENT) {
1847 /* In case we have the only disk of the cluster, */
1848 drbd_set_out_of_sync(mdev, e->sector, e->size);
1849 e->flags |= EE_CALL_AL_COMPLETE_IO;
1850 e->flags &= ~EE_MAY_SET_IN_SYNC;
1851 drbd_al_begin_io(mdev, e->sector);
1854 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1857 /* don't care for the reason here */
1858 dev_err(DEV, "submit failed, triggering re-connect\n");
1859 spin_lock_irq(&mdev->req_lock);
1860 list_del(&e->w.list);
1861 hlist_del_init(&e->collision);
1862 spin_unlock_irq(&mdev->req_lock);
1863 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1864 drbd_al_complete_io(mdev, e->sector);
1867 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
1869 drbd_free_ee(mdev, e);
1873 /* We may throttle resync, if the lower device seems to be busy,
1874 * and current sync rate is above c_min_rate.
1876 * To decide whether or not the lower device is busy, we use a scheme similar
1877 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1878 * (more than 64 sectors) of activity we cannot account for with our own resync
1879 * activity, it obviously is "busy".
1881 * The current sync rate used here uses only the most recent two step marks,
1882 * to have a short time average so we can react faster.
1884 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
1886 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1887 unsigned long db, dt, dbdt;
1888 struct lc_element *tmp;
1892 /* feature disabled? */
1893 if (mdev->sync_conf.c_min_rate == 0)
1896 spin_lock_irq(&mdev->al_lock);
1897 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1899 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1900 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1901 spin_unlock_irq(&mdev->al_lock);
1904 /* Do not slow down if app IO is already waiting for this extent */
1906 spin_unlock_irq(&mdev->al_lock);
1908 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1909 (int)part_stat_read(&disk->part0, sectors[1]) -
1910 atomic_read(&mdev->rs_sect_ev);
1912 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1913 unsigned long rs_left;
1916 mdev->rs_last_events = curr_events;
1918 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1920 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1922 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1923 rs_left = mdev->ov_left;
1925 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
1927 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1930 db = mdev->rs_mark_left[i] - rs_left;
1931 dbdt = Bit2KB(db/dt);
1933 if (dbdt > mdev->sync_conf.c_min_rate)
1940 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
1943 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1944 struct drbd_epoch_entry *e;
1945 struct digest_info *di = NULL;
1947 unsigned int fault_type;
1948 struct p_block_req *p = &mdev->data.rbuf.block_req;
1950 sector = be64_to_cpu(p->sector);
1951 size = be32_to_cpu(p->blksize);
1953 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1954 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1955 (unsigned long long)sector, size);
1958 if (sector + (size>>9) > capacity) {
1959 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1960 (unsigned long long)sector, size);
1964 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1967 case P_DATA_REQUEST:
1968 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
1970 case P_RS_DATA_REQUEST:
1971 case P_CSUM_RS_REQUEST:
1973 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
1977 dec_rs_pending(mdev);
1978 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
1981 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
1984 if (verb && __ratelimit(&drbd_ratelimit_state))
1985 dev_err(DEV, "Can not satisfy peer's read request, "
1986 "no local data.\n");
1988 /* drain possibly payload */
1989 return drbd_drain_block(mdev, digest_size);
1992 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1993 * "criss-cross" setup, that might cause write-out on some other DRBD,
1994 * which in turn might block on the other node at this very place. */
1995 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2002 case P_DATA_REQUEST:
2003 e->w.cb = w_e_end_data_req;
2004 fault_type = DRBD_FAULT_DT_RD;
2005 /* application IO, don't drbd_rs_begin_io */
2008 case P_RS_DATA_REQUEST:
2009 e->w.cb = w_e_end_rsdata_req;
2010 fault_type = DRBD_FAULT_RS_RD;
2011 /* used in the sector offset progress display */
2012 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2016 case P_CSUM_RS_REQUEST:
2017 fault_type = DRBD_FAULT_RS_RD;
2018 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2022 di->digest_size = digest_size;
2023 di->digest = (((char *)di)+sizeof(struct digest_info));
2026 e->flags |= EE_HAS_DIGEST;
2028 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2031 if (cmd == P_CSUM_RS_REQUEST) {
2032 D_ASSERT(mdev->agreed_pro_version >= 89);
2033 e->w.cb = w_e_end_csum_rs_req;
2034 /* used in the sector offset progress display */
2035 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2036 } else if (cmd == P_OV_REPLY) {
2037 /* track progress, we may need to throttle */
2038 atomic_add(size >> 9, &mdev->rs_sect_in);
2039 e->w.cb = w_e_end_ov_reply;
2040 dec_rs_pending(mdev);
2041 /* drbd_rs_begin_io done when we sent this request,
2042 * but accounting still needs to be done. */
2043 goto submit_for_resync;
2048 if (mdev->ov_start_sector == ~(sector_t)0 &&
2049 mdev->agreed_pro_version >= 90) {
2050 unsigned long now = jiffies;
2052 mdev->ov_start_sector = sector;
2053 mdev->ov_position = sector;
2054 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2055 mdev->rs_total = mdev->ov_left;
2056 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2057 mdev->rs_mark_left[i] = mdev->ov_left;
2058 mdev->rs_mark_time[i] = now;
2060 dev_info(DEV, "Online Verify start sector: %llu\n",
2061 (unsigned long long)sector);
2063 e->w.cb = w_e_end_ov_req;
2064 fault_type = DRBD_FAULT_RS_RD;
2068 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2070 fault_type = DRBD_FAULT_MAX;
2074 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2075 * wrt the receiver, but it is not as straightforward as it may seem.
2076 * Various places in the resync start and stop logic assume resync
2077 * requests are processed in order, requeuing this on the worker thread
2078 * introduces a bunch of new code for synchronization between threads.
2080 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2081 * "forever", throttling after drbd_rs_begin_io will lock that extent
2082 * for application writes for the same time. For now, just throttle
2083 * here, where the rest of the code expects the receiver to sleep for
2087 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2088 * this defers syncer requests for some time, before letting at least
2089 * on request through. The resync controller on the receiving side
2090 * will adapt to the incoming rate accordingly.
2092 * We cannot throttle here if remote is Primary/SyncTarget:
2093 * we would also throttle its application reads.
2094 * In that case, throttling is done on the SyncTarget only.
2096 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2097 schedule_timeout_uninterruptible(HZ/10);
2098 if (drbd_rs_begin_io(mdev, sector))
2102 atomic_add(size >> 9, &mdev->rs_sect_ev);
2106 spin_lock_irq(&mdev->req_lock);
2107 list_add_tail(&e->w.list, &mdev->read_ee);
2108 spin_unlock_irq(&mdev->req_lock);
2110 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2113 /* don't care for the reason here */
2114 dev_err(DEV, "submit failed, triggering re-connect\n");
2115 spin_lock_irq(&mdev->req_lock);
2116 list_del(&e->w.list);
2117 spin_unlock_irq(&mdev->req_lock);
2118 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2122 drbd_free_ee(mdev, e);
2126 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2128 int self, peer, rv = -100;
2129 unsigned long ch_self, ch_peer;
2131 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2132 peer = mdev->p_uuid[UI_BITMAP] & 1;
2134 ch_peer = mdev->p_uuid[UI_SIZE];
2135 ch_self = mdev->comm_bm_set;
2137 switch (mdev->net_conf->after_sb_0p) {
2139 case ASB_DISCARD_SECONDARY:
2140 case ASB_CALL_HELPER:
2141 dev_err(DEV, "Configuration error.\n");
2143 case ASB_DISCONNECT:
2145 case ASB_DISCARD_YOUNGER_PRI:
2146 if (self == 0 && peer == 1) {
2150 if (self == 1 && peer == 0) {
2154 /* Else fall through to one of the other strategies... */
2155 case ASB_DISCARD_OLDER_PRI:
2156 if (self == 0 && peer == 1) {
2160 if (self == 1 && peer == 0) {
2164 /* Else fall through to one of the other strategies... */
2165 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2166 "Using discard-least-changes instead\n");
2167 case ASB_DISCARD_ZERO_CHG:
2168 if (ch_peer == 0 && ch_self == 0) {
2169 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2173 if (ch_peer == 0) { rv = 1; break; }
2174 if (ch_self == 0) { rv = -1; break; }
2176 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2178 case ASB_DISCARD_LEAST_CHG:
2179 if (ch_self < ch_peer)
2181 else if (ch_self > ch_peer)
2183 else /* ( ch_self == ch_peer ) */
2184 /* Well, then use something else. */
2185 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2188 case ASB_DISCARD_LOCAL:
2191 case ASB_DISCARD_REMOTE:
2198 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2202 switch (mdev->net_conf->after_sb_1p) {
2203 case ASB_DISCARD_YOUNGER_PRI:
2204 case ASB_DISCARD_OLDER_PRI:
2205 case ASB_DISCARD_LEAST_CHG:
2206 case ASB_DISCARD_LOCAL:
2207 case ASB_DISCARD_REMOTE:
2208 dev_err(DEV, "Configuration error.\n");
2210 case ASB_DISCONNECT:
2213 hg = drbd_asb_recover_0p(mdev);
2214 if (hg == -1 && mdev->state.role == R_SECONDARY)
2216 if (hg == 1 && mdev->state.role == R_PRIMARY)
2220 rv = drbd_asb_recover_0p(mdev);
2222 case ASB_DISCARD_SECONDARY:
2223 return mdev->state.role == R_PRIMARY ? 1 : -1;
2224 case ASB_CALL_HELPER:
2225 hg = drbd_asb_recover_0p(mdev);
2226 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2227 enum drbd_state_rv rv2;
2229 drbd_set_role(mdev, R_SECONDARY, 0);
2230 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2231 * we might be here in C_WF_REPORT_PARAMS which is transient.
2232 * we do not need to wait for the after state change work either. */
2233 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2234 if (rv2 != SS_SUCCESS) {
2235 drbd_khelper(mdev, "pri-lost-after-sb");
2237 dev_warn(DEV, "Successfully gave up primary role.\n");
2247 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2251 switch (mdev->net_conf->after_sb_2p) {
2252 case ASB_DISCARD_YOUNGER_PRI:
2253 case ASB_DISCARD_OLDER_PRI:
2254 case ASB_DISCARD_LEAST_CHG:
2255 case ASB_DISCARD_LOCAL:
2256 case ASB_DISCARD_REMOTE:
2258 case ASB_DISCARD_SECONDARY:
2259 dev_err(DEV, "Configuration error.\n");
2262 rv = drbd_asb_recover_0p(mdev);
2264 case ASB_DISCONNECT:
2266 case ASB_CALL_HELPER:
2267 hg = drbd_asb_recover_0p(mdev);
2269 enum drbd_state_rv rv2;
2271 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2272 * we might be here in C_WF_REPORT_PARAMS which is transient.
2273 * we do not need to wait for the after state change work either. */
2274 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2275 if (rv2 != SS_SUCCESS) {
2276 drbd_khelper(mdev, "pri-lost-after-sb");
2278 dev_warn(DEV, "Successfully gave up primary role.\n");
2288 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2289 u64 bits, u64 flags)
2292 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2295 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2297 (unsigned long long)uuid[UI_CURRENT],
2298 (unsigned long long)uuid[UI_BITMAP],
2299 (unsigned long long)uuid[UI_HISTORY_START],
2300 (unsigned long long)uuid[UI_HISTORY_END],
2301 (unsigned long long)bits,
2302 (unsigned long long)flags);
2306 100 after split brain try auto recover
2307 2 C_SYNC_SOURCE set BitMap
2308 1 C_SYNC_SOURCE use BitMap
2310 -1 C_SYNC_TARGET use BitMap
2311 -2 C_SYNC_TARGET set BitMap
2312 -100 after split brain, disconnect
2313 -1000 unrelated data
2314 -1091 requires proto 91
2315 -1096 requires proto 96
2317 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2322 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2323 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2326 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2330 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2331 peer != UUID_JUST_CREATED)
2335 if (self != UUID_JUST_CREATED &&
2336 (peer == UUID_JUST_CREATED || peer == (u64)0))
2340 int rct, dc; /* roles at crash time */
2342 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2344 if (mdev->agreed_pro_version < 91)
2347 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2348 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2349 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2350 drbd_uuid_set_bm(mdev, 0UL);
2352 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2353 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2356 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2363 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2365 if (mdev->agreed_pro_version < 91)
2368 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2369 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2370 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2372 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2373 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2374 mdev->p_uuid[UI_BITMAP] = 0UL;
2376 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2379 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2386 /* Common power [off|failure] */
2387 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2388 (mdev->p_uuid[UI_FLAGS] & 2);
2389 /* lowest bit is set when we were primary,
2390 * next bit (weight 2) is set when peer was primary */
2394 case 0: /* !self_pri && !peer_pri */ return 0;
2395 case 1: /* self_pri && !peer_pri */ return 1;
2396 case 2: /* !self_pri && peer_pri */ return -1;
2397 case 3: /* self_pri && peer_pri */
2398 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2404 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2409 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2411 if (mdev->agreed_pro_version < 96 ?
2412 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2413 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2414 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2415 /* The last P_SYNC_UUID did not get though. Undo the last start of
2416 resync as sync source modifications of the peer's UUIDs. */
2418 if (mdev->agreed_pro_version < 91)
2421 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2422 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2424 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2425 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2432 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2433 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2434 peer = mdev->p_uuid[i] & ~((u64)1);
2440 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2441 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2446 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2448 if (mdev->agreed_pro_version < 96 ?
2449 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2450 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2451 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2452 /* The last P_SYNC_UUID did not get though. Undo the last start of
2453 resync as sync source modifications of our UUIDs. */
2455 if (mdev->agreed_pro_version < 91)
2458 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2459 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2461 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2462 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2463 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2471 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2472 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2473 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2479 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2480 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2481 if (self == peer && self != ((u64)0))
2485 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2486 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2487 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2488 peer = mdev->p_uuid[j] & ~((u64)1);
2497 /* drbd_sync_handshake() returns the new conn state on success, or
2498 CONN_MASK (-1) on failure.
2500 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2501 enum drbd_disk_state peer_disk) __must_hold(local)
2504 enum drbd_conns rv = C_MASK;
2505 enum drbd_disk_state mydisk;
2507 mydisk = mdev->state.disk;
2508 if (mydisk == D_NEGOTIATING)
2509 mydisk = mdev->new_state_tmp.disk;
2511 dev_info(DEV, "drbd_sync_handshake:\n");
2512 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2513 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2514 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2516 hg = drbd_uuid_compare(mdev, &rule_nr);
2518 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2521 dev_alert(DEV, "Unrelated data, aborting!\n");
2525 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2529 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2530 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2531 int f = (hg == -100) || abs(hg) == 2;
2532 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2535 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2536 hg > 0 ? "source" : "target");
2540 drbd_khelper(mdev, "initial-split-brain");
2542 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2543 int pcount = (mdev->state.role == R_PRIMARY)
2544 + (peer_role == R_PRIMARY);
2545 int forced = (hg == -100);
2549 hg = drbd_asb_recover_0p(mdev);
2552 hg = drbd_asb_recover_1p(mdev);
2555 hg = drbd_asb_recover_2p(mdev);
2558 if (abs(hg) < 100) {
2559 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2560 "automatically solved. Sync from %s node\n",
2561 pcount, (hg < 0) ? "peer" : "this");
2563 dev_warn(DEV, "Doing a full sync, since"
2564 " UUIDs where ambiguous.\n");
2571 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2573 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2577 dev_warn(DEV, "Split-Brain detected, manually solved. "
2578 "Sync from %s node\n",
2579 (hg < 0) ? "peer" : "this");
2583 /* FIXME this log message is not correct if we end up here
2584 * after an attempted attach on a diskless node.
2585 * We just refuse to attach -- well, we drop the "connection"
2586 * to that disk, in a way... */
2587 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2588 drbd_khelper(mdev, "split-brain");
2592 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2593 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2597 if (hg < 0 && /* by intention we do not use mydisk here. */
2598 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2599 switch (mdev->net_conf->rr_conflict) {
2600 case ASB_CALL_HELPER:
2601 drbd_khelper(mdev, "pri-lost");
2603 case ASB_DISCONNECT:
2604 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2607 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2612 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2614 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2616 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2617 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2618 abs(hg) >= 2 ? "full" : "bit-map based");
2623 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2624 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2625 BM_LOCKED_SET_ALLOWED))
2629 if (hg > 0) { /* become sync source. */
2631 } else if (hg < 0) { /* become sync target */
2635 if (drbd_bm_total_weight(mdev)) {
2636 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2637 drbd_bm_total_weight(mdev));
2644 /* returns 1 if invalid */
2645 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2647 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2648 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2649 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2652 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2653 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2654 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2657 /* everything else is valid if they are equal on both sides. */
2661 /* everything es is invalid. */
2665 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2667 struct p_protocol *p = &mdev->data.rbuf.protocol;
2668 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2669 int p_want_lose, p_two_primaries, cf;
2670 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2672 p_proto = be32_to_cpu(p->protocol);
2673 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2674 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2675 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2676 p_two_primaries = be32_to_cpu(p->two_primaries);
2677 cf = be32_to_cpu(p->conn_flags);
2678 p_want_lose = cf & CF_WANT_LOSE;
2680 clear_bit(CONN_DRY_RUN, &mdev->flags);
2682 if (cf & CF_DRY_RUN)
2683 set_bit(CONN_DRY_RUN, &mdev->flags);
2685 if (p_proto != mdev->net_conf->wire_protocol) {
2686 dev_err(DEV, "incompatible communication protocols\n");
2690 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2691 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2695 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2696 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2700 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2701 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2705 if (p_want_lose && mdev->net_conf->want_lose) {
2706 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2710 if (p_two_primaries != mdev->net_conf->two_primaries) {
2711 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2715 if (mdev->agreed_pro_version >= 87) {
2716 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2718 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2721 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2722 if (strcmp(p_integrity_alg, my_alg)) {
2723 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2726 dev_info(DEV, "data-integrity-alg: %s\n",
2727 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2733 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2738 * input: alg name, feature name
2739 * return: NULL (alg name was "")
2740 * ERR_PTR(error) if something goes wrong
2741 * or the crypto hash ptr, if it worked out ok. */
2742 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2743 const char *alg, const char *name)
2745 struct crypto_hash *tfm;
2750 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2752 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2753 alg, name, PTR_ERR(tfm));
2756 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2757 crypto_free_hash(tfm);
2758 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2759 return ERR_PTR(-EINVAL);
2764 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2767 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2768 unsigned int header_size, data_size, exp_max_sz;
2769 struct crypto_hash *verify_tfm = NULL;
2770 struct crypto_hash *csums_tfm = NULL;
2771 const int apv = mdev->agreed_pro_version;
2772 int *rs_plan_s = NULL;
2775 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2776 : apv == 88 ? sizeof(struct p_rs_param)
2778 : apv <= 94 ? sizeof(struct p_rs_param_89)
2779 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2781 if (packet_size > exp_max_sz) {
2782 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2783 packet_size, exp_max_sz);
2788 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2789 data_size = packet_size - header_size;
2790 } else if (apv <= 94) {
2791 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2792 data_size = packet_size - header_size;
2793 D_ASSERT(data_size == 0);
2795 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2796 data_size = packet_size - header_size;
2797 D_ASSERT(data_size == 0);
2800 /* initialize verify_alg and csums_alg */
2801 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2803 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2806 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2810 if (data_size > SHARED_SECRET_MAX) {
2811 dev_err(DEV, "verify-alg too long, "
2812 "peer wants %u, accepting only %u byte\n",
2813 data_size, SHARED_SECRET_MAX);
2817 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2820 /* we expect NUL terminated string */
2821 /* but just in case someone tries to be evil */
2822 D_ASSERT(p->verify_alg[data_size-1] == 0);
2823 p->verify_alg[data_size-1] = 0;
2825 } else /* apv >= 89 */ {
2826 /* we still expect NUL terminated strings */
2827 /* but just in case someone tries to be evil */
2828 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2829 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2830 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2831 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2834 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2835 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2836 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2837 mdev->sync_conf.verify_alg, p->verify_alg);
2840 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2841 p->verify_alg, "verify-alg");
2842 if (IS_ERR(verify_tfm)) {
2848 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2849 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2850 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2851 mdev->sync_conf.csums_alg, p->csums_alg);
2854 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2855 p->csums_alg, "csums-alg");
2856 if (IS_ERR(csums_tfm)) {
2863 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2864 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2865 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2866 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2867 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2869 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2870 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2871 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2873 dev_err(DEV, "kmalloc of fifo_buffer failed");
2879 spin_lock(&mdev->peer_seq_lock);
2880 /* lock against drbd_nl_syncer_conf() */
2882 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2883 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2884 crypto_free_hash(mdev->verify_tfm);
2885 mdev->verify_tfm = verify_tfm;
2886 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2889 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2890 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2891 crypto_free_hash(mdev->csums_tfm);
2892 mdev->csums_tfm = csums_tfm;
2893 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2895 if (fifo_size != mdev->rs_plan_s.size) {
2896 kfree(mdev->rs_plan_s.values);
2897 mdev->rs_plan_s.values = rs_plan_s;
2898 mdev->rs_plan_s.size = fifo_size;
2899 mdev->rs_planed = 0;
2901 spin_unlock(&mdev->peer_seq_lock);
2906 /* just for completeness: actually not needed,
2907 * as this is not reached if csums_tfm was ok. */
2908 crypto_free_hash(csums_tfm);
2909 /* but free the verify_tfm again, if csums_tfm did not work out */
2910 crypto_free_hash(verify_tfm);
2911 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2915 /* warn if the arguments differ by more than 12.5% */
2916 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2917 const char *s, sector_t a, sector_t b)
2920 if (a == 0 || b == 0)
2922 d = (a > b) ? (a - b) : (b - a);
2923 if (d > (a>>3) || d > (b>>3))
2924 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2925 (unsigned long long)a, (unsigned long long)b);
2928 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2930 struct p_sizes *p = &mdev->data.rbuf.sizes;
2931 enum determine_dev_size dd = unchanged;
2932 sector_t p_size, p_usize, my_usize;
2933 int ldsc = 0; /* local disk size changed */
2934 enum dds_flags ddsf;
2936 p_size = be64_to_cpu(p->d_size);
2937 p_usize = be64_to_cpu(p->u_size);
2939 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2940 dev_err(DEV, "some backing storage is needed\n");
2941 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2945 /* just store the peer's disk size for now.
2946 * we still need to figure out whether we accept that. */
2947 mdev->p_size = p_size;
2949 if (get_ldev(mdev)) {
2950 warn_if_differ_considerably(mdev, "lower level device sizes",
2951 p_size, drbd_get_max_capacity(mdev->ldev));
2952 warn_if_differ_considerably(mdev, "user requested size",
2953 p_usize, mdev->ldev->dc.disk_size);
2955 /* if this is the first connect, or an otherwise expected
2956 * param exchange, choose the minimum */
2957 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2958 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2961 my_usize = mdev->ldev->dc.disk_size;
2963 if (mdev->ldev->dc.disk_size != p_usize) {
2964 mdev->ldev->dc.disk_size = p_usize;
2965 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2966 (unsigned long)mdev->ldev->dc.disk_size);
2969 /* Never shrink a device with usable data during connect.
2970 But allow online shrinking if we are connected. */
2971 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2972 drbd_get_capacity(mdev->this_bdev) &&
2973 mdev->state.disk >= D_OUTDATED &&
2974 mdev->state.conn < C_CONNECTED) {
2975 dev_err(DEV, "The peer's disk size is too small!\n");
2976 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2977 mdev->ldev->dc.disk_size = my_usize;
2984 ddsf = be16_to_cpu(p->dds_flags);
2985 if (get_ldev(mdev)) {
2986 dd = drbd_determine_dev_size(mdev, ddsf);
2988 if (dd == dev_size_error)
2992 /* I am diskless, need to accept the peer's size. */
2993 drbd_set_my_capacity(mdev, p_size);
2996 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
2997 drbd_reconsider_max_bio_size(mdev);
2999 if (get_ldev(mdev)) {
3000 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3001 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3008 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3009 if (be64_to_cpu(p->c_size) !=
3010 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3011 /* we have different sizes, probably peer
3012 * needs to know my new size... */
3013 drbd_send_sizes(mdev, 0, ddsf);
3015 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3016 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3017 if (mdev->state.pdsk >= D_INCONSISTENT &&
3018 mdev->state.disk >= D_INCONSISTENT) {
3019 if (ddsf & DDSF_NO_RESYNC)
3020 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3022 resync_after_online_grow(mdev);
3024 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3031 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3033 struct p_uuids *p = &mdev->data.rbuf.uuids;
3035 int i, updated_uuids = 0;
3037 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3039 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3040 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3042 kfree(mdev->p_uuid);
3043 mdev->p_uuid = p_uuid;
3045 if (mdev->state.conn < C_CONNECTED &&
3046 mdev->state.disk < D_INCONSISTENT &&
3047 mdev->state.role == R_PRIMARY &&
3048 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3049 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3050 (unsigned long long)mdev->ed_uuid);
3051 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3055 if (get_ldev(mdev)) {
3056 int skip_initial_sync =
3057 mdev->state.conn == C_CONNECTED &&
3058 mdev->agreed_pro_version >= 90 &&
3059 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3060 (p_uuid[UI_FLAGS] & 8);
3061 if (skip_initial_sync) {
3062 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3063 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3064 "clear_n_write from receive_uuids",
3065 BM_LOCKED_TEST_ALLOWED);
3066 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3067 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3068 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3074 } else if (mdev->state.disk < D_INCONSISTENT &&
3075 mdev->state.role == R_PRIMARY) {
3076 /* I am a diskless primary, the peer just created a new current UUID
3078 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3081 /* Before we test for the disk state, we should wait until an eventually
3082 ongoing cluster wide state change is finished. That is important if
3083 we are primary and are detaching from our disk. We need to see the
3084 new disk state... */
3085 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3086 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3087 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3090 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3096 * convert_state() - Converts the peer's view of the cluster state to our point of view
3097 * @ps: The state as seen by the peer.
3099 static union drbd_state convert_state(union drbd_state ps)
3101 union drbd_state ms;
3103 static enum drbd_conns c_tab[] = {
3104 [C_CONNECTED] = C_CONNECTED,
3106 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3107 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3108 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3109 [C_VERIFY_S] = C_VERIFY_T,
3115 ms.conn = c_tab[ps.conn];
3120 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3125 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3127 struct p_req_state *p = &mdev->data.rbuf.req_state;
3128 union drbd_state mask, val;
3129 enum drbd_state_rv rv;
3131 mask.i = be32_to_cpu(p->mask);
3132 val.i = be32_to_cpu(p->val);
3134 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3135 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3136 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3140 mask = convert_state(mask);
3141 val = convert_state(val);
3143 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3145 drbd_send_sr_reply(mdev, rv);
3151 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3153 struct p_state *p = &mdev->data.rbuf.state;
3154 union drbd_state os, ns, peer_state;
3155 enum drbd_disk_state real_peer_disk;
3156 enum chg_state_flags cs_flags;
3159 peer_state.i = be32_to_cpu(p->state);
3161 real_peer_disk = peer_state.disk;
3162 if (peer_state.disk == D_NEGOTIATING) {
3163 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3164 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3167 spin_lock_irq(&mdev->req_lock);
3169 os = ns = mdev->state;
3170 spin_unlock_irq(&mdev->req_lock);
3172 /* If this is the "end of sync" confirmation, usually the peer disk
3173 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3174 * set) resync started in PausedSyncT, or if the timing of pause-/
3175 * unpause-sync events has been "just right", the peer disk may
3176 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3178 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3179 real_peer_disk == D_UP_TO_DATE &&
3180 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3181 /* If we are (becoming) SyncSource, but peer is still in sync
3182 * preparation, ignore its uptodate-ness to avoid flapping, it
3183 * will change to inconsistent once the peer reaches active
3185 * It may have changed syncer-paused flags, however, so we
3186 * cannot ignore this completely. */
3187 if (peer_state.conn > C_CONNECTED &&
3188 peer_state.conn < C_SYNC_SOURCE)
3189 real_peer_disk = D_INCONSISTENT;
3191 /* if peer_state changes to connected at the same time,
3192 * it explicitly notifies us that it finished resync.
3193 * Maybe we should finish it up, too? */
3194 else if (os.conn >= C_SYNC_SOURCE &&
3195 peer_state.conn == C_CONNECTED) {
3196 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3197 drbd_resync_finished(mdev);
3202 /* peer says his disk is inconsistent, while we think it is uptodate,
3203 * and this happens while the peer still thinks we have a sync going on,
3204 * but we think we are already done with the sync.
3205 * We ignore this to avoid flapping pdsk.
3206 * This should not happen, if the peer is a recent version of drbd. */
3207 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3208 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3209 real_peer_disk = D_UP_TO_DATE;
3211 if (ns.conn == C_WF_REPORT_PARAMS)
3212 ns.conn = C_CONNECTED;
3214 if (peer_state.conn == C_AHEAD)
3217 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3218 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3219 int cr; /* consider resync */
3221 /* if we established a new connection */
3222 cr = (os.conn < C_CONNECTED);
3223 /* if we had an established connection
3224 * and one of the nodes newly attaches a disk */
3225 cr |= (os.conn == C_CONNECTED &&
3226 (peer_state.disk == D_NEGOTIATING ||
3227 os.disk == D_NEGOTIATING));
3228 /* if we have both been inconsistent, and the peer has been
3229 * forced to be UpToDate with --overwrite-data */
3230 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3231 /* if we had been plain connected, and the admin requested to
3232 * start a sync by "invalidate" or "invalidate-remote" */
3233 cr |= (os.conn == C_CONNECTED &&
3234 (peer_state.conn >= C_STARTING_SYNC_S &&
3235 peer_state.conn <= C_WF_BITMAP_T));
3238 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3241 if (ns.conn == C_MASK) {
3242 ns.conn = C_CONNECTED;
3243 if (mdev->state.disk == D_NEGOTIATING) {
3244 drbd_force_state(mdev, NS(disk, D_FAILED));
3245 } else if (peer_state.disk == D_NEGOTIATING) {
3246 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3247 peer_state.disk = D_DISKLESS;
3248 real_peer_disk = D_DISKLESS;
3250 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3252 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3253 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3259 spin_lock_irq(&mdev->req_lock);
3260 if (mdev->state.i != os.i)
3262 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3263 ns.peer = peer_state.role;
3264 ns.pdsk = real_peer_disk;
3265 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3266 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3267 ns.disk = mdev->new_state_tmp.disk;
3268 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3269 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3270 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3271 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3272 for temporal network outages! */
3273 spin_unlock_irq(&mdev->req_lock);
3274 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3276 drbd_uuid_new_current(mdev);
3277 clear_bit(NEW_CUR_UUID, &mdev->flags);
3278 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3281 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3283 spin_unlock_irq(&mdev->req_lock);
3285 if (rv < SS_SUCCESS) {
3286 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3290 if (os.conn > C_WF_REPORT_PARAMS) {
3291 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3292 peer_state.disk != D_NEGOTIATING ) {
3293 /* we want resync, peer has not yet decided to sync... */
3294 /* Nowadays only used when forcing a node into primary role and
3295 setting its disk to UpToDate with that */
3296 drbd_send_uuids(mdev);
3297 drbd_send_current_state(mdev);
3301 mdev->net_conf->want_lose = 0;
3303 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3308 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3310 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3312 wait_event(mdev->misc_wait,
3313 mdev->state.conn == C_WF_SYNC_UUID ||
3314 mdev->state.conn == C_BEHIND ||
3315 mdev->state.conn < C_CONNECTED ||
3316 mdev->state.disk < D_NEGOTIATING);
3318 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3320 /* Here the _drbd_uuid_ functions are right, current should
3321 _not_ be rotated into the history */
3322 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3323 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3324 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3326 drbd_print_uuids(mdev, "updated sync uuid");
3327 drbd_start_resync(mdev, C_SYNC_TARGET);
3331 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3337 * receive_bitmap_plain
3339 * Return 0 when done, 1 when another iteration is needed, and a negative error
3340 * code upon failure.
3343 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3344 unsigned long *buffer, struct bm_xfer_ctx *c)
3346 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3347 unsigned want = num_words * sizeof(long);
3350 if (want != data_size) {
3351 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3356 err = drbd_recv(mdev, buffer, want);
3363 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3365 c->word_offset += num_words;
3366 c->bit_offset = c->word_offset * BITS_PER_LONG;
3367 if (c->bit_offset > c->bm_bits)
3368 c->bit_offset = c->bm_bits;
3376 * Return 0 when done, 1 when another iteration is needed, and a negative error
3377 * code upon failure.
3380 recv_bm_rle_bits(struct drbd_conf *mdev,
3381 struct p_compressed_bm *p,
3382 struct bm_xfer_ctx *c)
3384 struct bitstream bs;
3388 unsigned long s = c->bit_offset;
3390 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3391 int toggle = DCBP_get_start(p);
3395 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3397 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3401 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3402 bits = vli_decode_bits(&rl, look_ahead);
3408 if (e >= c->bm_bits) {
3409 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3412 _drbd_bm_set_bits(mdev, s, e);
3416 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3417 have, bits, look_ahead,
3418 (unsigned int)(bs.cur.b - p->code),
3419 (unsigned int)bs.buf_len);
3422 look_ahead >>= bits;
3425 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3428 look_ahead |= tmp << have;
3433 bm_xfer_ctx_bit_to_word_offset(c);
3435 return (s != c->bm_bits);
3441 * Return 0 when done, 1 when another iteration is needed, and a negative error
3442 * code upon failure.
3445 decode_bitmap_c(struct drbd_conf *mdev,
3446 struct p_compressed_bm *p,
3447 struct bm_xfer_ctx *c)
3449 if (DCBP_get_code(p) == RLE_VLI_Bits)
3450 return recv_bm_rle_bits(mdev, p, c);
3452 /* other variants had been implemented for evaluation,
3453 * but have been dropped as this one turned out to be "best"
3454 * during all our tests. */
3456 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3457 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3461 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3462 const char *direction, struct bm_xfer_ctx *c)
3464 /* what would it take to transfer it "plaintext" */
3465 unsigned plain = sizeof(struct p_header80) *
3466 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3467 + c->bm_words * sizeof(long);
3468 unsigned total = c->bytes[0] + c->bytes[1];
3471 /* total can not be zero. but just in case: */
3475 /* don't report if not compressed */
3479 /* total < plain. check for overflow, still */
3480 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3481 : (1000 * total / plain);
3487 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3488 "total %u; compression: %u.%u%%\n",
3490 c->bytes[1], c->packets[1],
3491 c->bytes[0], c->packets[0],
3492 total, r/10, r % 10);
3495 /* Since we are processing the bitfield from lower addresses to higher,
3496 it does not matter if the process it in 32 bit chunks or 64 bit
3497 chunks as long as it is little endian. (Understand it as byte stream,
3498 beginning with the lowest byte...) If we would use big endian
3499 we would need to process it from the highest address to the lowest,
3500 in order to be agnostic to the 32 vs 64 bits issue.
3502 returns 0 on failure, 1 if we successfully received it. */
3503 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3505 struct bm_xfer_ctx c;
3509 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3511 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3512 /* you are supposed to send additional out-of-sync information
3513 * if you actually set bits during this phase */
3515 /* maybe we should use some per thread scratch page,
3516 * and allocate that during initial device creation? */
3517 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3519 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3523 c = (struct bm_xfer_ctx) {
3524 .bm_bits = drbd_bm_bits(mdev),
3525 .bm_words = drbd_bm_words(mdev),
3529 if (cmd == P_BITMAP) {
3530 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
3531 } else if (cmd == P_COMPRESSED_BITMAP) {
3532 /* MAYBE: sanity check that we speak proto >= 90,
3533 * and the feature is enabled! */
3534 struct p_compressed_bm *p;
3536 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3537 dev_err(DEV, "ReportCBitmap packet too large\n");
3540 /* use the page buff */
3542 memcpy(p, h, sizeof(*h));
3543 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3545 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3546 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3549 err = decode_bitmap_c(mdev, p, &c);
3551 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3555 c.packets[cmd == P_BITMAP]++;
3556 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3563 if (!drbd_recv_header(mdev, &cmd, &data_size))
3567 INFO_bm_xfer_stats(mdev, "receive", &c);
3569 if (mdev->state.conn == C_WF_BITMAP_T) {
3570 enum drbd_state_rv rv;
3572 ok = !drbd_send_bitmap(mdev);
3575 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3576 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3577 D_ASSERT(rv == SS_SUCCESS);
3578 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3579 /* admin may have requested C_DISCONNECTING,
3580 * other threads may have noticed network errors */
3581 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3582 drbd_conn_str(mdev->state.conn));
3587 drbd_bm_unlock(mdev);
3588 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3589 drbd_start_resync(mdev, C_SYNC_SOURCE);
3590 free_page((unsigned long) buffer);
3594 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3596 /* TODO zero copy sink :) */
3597 static char sink[128];
3600 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3605 want = min_t(int, size, sizeof(sink));
3606 r = drbd_recv(mdev, sink, want);
3607 ERR_IF(r <= 0) break;
3613 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3615 /* Make sure we've acked all the TCP data associated
3616 * with the data requests being unplugged */
3617 drbd_tcp_quickack(mdev->data.socket);
3622 static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3624 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3626 switch (mdev->state.conn) {
3627 case C_WF_SYNC_UUID:
3632 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3633 drbd_conn_str(mdev->state.conn));
3636 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3641 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3646 drbd_cmd_handler_f function;
3649 static struct data_cmd drbd_cmd_handler[] = {
3650 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3651 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3652 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3653 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3654 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3655 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3656 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3657 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3658 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3659 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3660 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3661 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3662 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3663 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3664 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3665 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3666 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3667 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3668 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3669 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3670 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3671 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
3672 /* anything missing from this table is in
3673 * the asender_tbl, see get_asender_cmd */
3674 [P_MAX_CMD] = { 0, 0, NULL },
3677 /* All handler functions that expect a sub-header get that sub-heder in
3678 mdev->data.rbuf.header.head.payload.
3680 Usually in mdev->data.rbuf.header.head the callback can find the usual
3681 p_header, but they may not rely on that. Since there is also p_header95 !
3684 static void drbdd(struct drbd_conf *mdev)
3686 union p_header *header = &mdev->data.rbuf.header;
3687 unsigned int packet_size;
3688 enum drbd_packets cmd;
3689 size_t shs; /* sub header size */
3692 while (get_t_state(&mdev->receiver) == Running) {
3693 drbd_thread_current_set_cpu(mdev);
3694 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3697 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3698 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3702 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3703 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3704 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3709 rv = drbd_recv(mdev, &header->h80.payload, shs);
3710 if (unlikely(rv != shs)) {
3711 if (!signal_pending(current))
3712 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
3717 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3719 if (unlikely(!rv)) {
3720 dev_err(DEV, "error receiving %s, l: %d!\n",
3721 cmdname(cmd), packet_size);
3728 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3730 /* If we leave here, we probably want to update at least the
3731 * "Connected" indicator on stable storage. Do so explicitly here. */
3735 void drbd_flush_workqueue(struct drbd_conf *mdev)
3737 struct drbd_wq_barrier barr;
3739 barr.w.cb = w_prev_work_done;
3740 init_completion(&barr.done);
3741 drbd_queue_work(&mdev->data.work, &barr.w);
3742 wait_for_completion(&barr.done);
3745 void drbd_free_tl_hash(struct drbd_conf *mdev)
3747 struct hlist_head *h;
3749 spin_lock_irq(&mdev->req_lock);
3751 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3752 spin_unlock_irq(&mdev->req_lock);
3756 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3758 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3759 (int)(h - mdev->ee_hash), h->first);
3760 kfree(mdev->ee_hash);
3761 mdev->ee_hash = NULL;
3762 mdev->ee_hash_s = 0;
3765 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3767 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3768 (int)(h - mdev->tl_hash), h->first);
3769 kfree(mdev->tl_hash);
3770 mdev->tl_hash = NULL;
3771 mdev->tl_hash_s = 0;
3772 spin_unlock_irq(&mdev->req_lock);
3775 static void drbd_disconnect(struct drbd_conf *mdev)
3777 enum drbd_fencing_p fp;
3778 union drbd_state os, ns;
3779 int rv = SS_UNKNOWN_ERROR;
3782 if (mdev->state.conn == C_STANDALONE)
3785 /* asender does not clean up anything. it must not interfere, either */
3786 drbd_thread_stop(&mdev->asender);
3787 drbd_free_sock(mdev);
3789 /* wait for current activity to cease. */
3790 spin_lock_irq(&mdev->req_lock);
3791 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3792 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3793 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3794 spin_unlock_irq(&mdev->req_lock);
3796 /* We do not have data structures that would allow us to
3797 * get the rs_pending_cnt down to 0 again.
3798 * * On C_SYNC_TARGET we do not have any data structures describing
3799 * the pending RSDataRequest's we have sent.
3800 * * On C_SYNC_SOURCE there is no data structure that tracks
3801 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3802 * And no, it is not the sum of the reference counts in the
3803 * resync_LRU. The resync_LRU tracks the whole operation including
3804 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3806 drbd_rs_cancel_all(mdev);
3808 mdev->rs_failed = 0;
3809 atomic_set(&mdev->rs_pending_cnt, 0);
3810 wake_up(&mdev->misc_wait);
3812 /* make sure syncer is stopped and w_resume_next_sg queued */
3813 del_timer_sync(&mdev->resync_timer);
3814 resync_timer_fn((unsigned long)mdev);
3816 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3817 * w_make_resync_request etc. which may still be on the worker queue
3818 * to be "canceled" */
3819 drbd_flush_workqueue(mdev);
3821 /* This also does reclaim_net_ee(). If we do this too early, we might
3822 * miss some resync ee and pages.*/
3823 drbd_process_done_ee(mdev);
3825 kfree(mdev->p_uuid);
3826 mdev->p_uuid = NULL;
3828 if (!is_susp(mdev->state))
3831 dev_info(DEV, "Connection closed\n");
3836 if (get_ldev(mdev)) {
3837 fp = mdev->ldev->dc.fencing;
3841 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3842 drbd_try_outdate_peer_async(mdev);
3844 spin_lock_irq(&mdev->req_lock);
3846 if (os.conn >= C_UNCONNECTED) {
3847 /* Do not restart in case we are C_DISCONNECTING */
3849 ns.conn = C_UNCONNECTED;
3850 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3852 spin_unlock_irq(&mdev->req_lock);
3854 if (os.conn == C_DISCONNECTING) {
3855 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3857 crypto_free_hash(mdev->cram_hmac_tfm);
3858 mdev->cram_hmac_tfm = NULL;
3860 kfree(mdev->net_conf);
3861 mdev->net_conf = NULL;
3862 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3865 /* serialize with bitmap writeout triggered by the state change,
3867 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3869 /* tcp_close and release of sendpage pages can be deferred. I don't
3870 * want to use SO_LINGER, because apparently it can be deferred for
3871 * more than 20 seconds (longest time I checked).
3873 * Actually we don't care for exactly when the network stack does its
3874 * put_page(), but release our reference on these pages right here.
3876 i = drbd_release_ee(mdev, &mdev->net_ee);
3878 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3879 i = atomic_read(&mdev->pp_in_use_by_net);
3881 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3882 i = atomic_read(&mdev->pp_in_use);
3884 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3886 D_ASSERT(list_empty(&mdev->read_ee));
3887 D_ASSERT(list_empty(&mdev->active_ee));
3888 D_ASSERT(list_empty(&mdev->sync_ee));
3889 D_ASSERT(list_empty(&mdev->done_ee));
3891 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3892 atomic_set(&mdev->current_epoch->epoch_size, 0);
3893 D_ASSERT(list_empty(&mdev->current_epoch->list));
3897 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3898 * we can agree on is stored in agreed_pro_version.
3900 * feature flags and the reserved array should be enough room for future
3901 * enhancements of the handshake protocol, and possible plugins...
3903 * for now, they are expected to be zero, but ignored.
3905 static int drbd_send_handshake(struct drbd_conf *mdev)
3907 /* ASSERT current == mdev->receiver ... */
3908 struct p_handshake *p = &mdev->data.sbuf.handshake;
3911 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3912 dev_err(DEV, "interrupted during initial handshake\n");
3913 return 0; /* interrupted. not ok. */
3916 if (mdev->data.socket == NULL) {
3917 mutex_unlock(&mdev->data.mutex);
3921 memset(p, 0, sizeof(*p));
3922 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3923 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3924 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3925 (struct p_header80 *)p, sizeof(*p), 0 );
3926 mutex_unlock(&mdev->data.mutex);
3932 * 1 yes, we have a valid connection
3933 * 0 oops, did not work out, please try again
3934 * -1 peer talks different language,
3935 * no point in trying again, please go standalone.
3937 static int drbd_do_handshake(struct drbd_conf *mdev)
3939 /* ASSERT current == mdev->receiver ... */
3940 struct p_handshake *p = &mdev->data.rbuf.handshake;
3941 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3942 unsigned int length;
3943 enum drbd_packets cmd;
3946 rv = drbd_send_handshake(mdev);
3950 rv = drbd_recv_header(mdev, &cmd, &length);
3954 if (cmd != P_HAND_SHAKE) {
3955 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3960 if (length != expect) {
3961 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3966 rv = drbd_recv(mdev, &p->head.payload, expect);
3969 if (!signal_pending(current))
3970 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
3974 p->protocol_min = be32_to_cpu(p->protocol_min);
3975 p->protocol_max = be32_to_cpu(p->protocol_max);
3976 if (p->protocol_max == 0)
3977 p->protocol_max = p->protocol_min;
3979 if (PRO_VERSION_MAX < p->protocol_min ||
3980 PRO_VERSION_MIN > p->protocol_max)
3983 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3985 dev_info(DEV, "Handshake successful: "
3986 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3991 dev_err(DEV, "incompatible DRBD dialects: "
3992 "I support %d-%d, peer supports %d-%d\n",
3993 PRO_VERSION_MIN, PRO_VERSION_MAX,
3994 p->protocol_min, p->protocol_max);
3998 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3999 static int drbd_do_auth(struct drbd_conf *mdev)
4001 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4002 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4006 #define CHALLENGE_LEN 64
4010 0 - failed, try again (network error),
4011 -1 - auth failed, don't try again.
4014 static int drbd_do_auth(struct drbd_conf *mdev)
4016 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4017 struct scatterlist sg;
4018 char *response = NULL;
4019 char *right_response = NULL;
4020 char *peers_ch = NULL;
4021 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4022 unsigned int resp_size;
4023 struct hash_desc desc;
4024 enum drbd_packets cmd;
4025 unsigned int length;
4028 desc.tfm = mdev->cram_hmac_tfm;
4031 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4032 (u8 *)mdev->net_conf->shared_secret, key_len);
4034 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4039 get_random_bytes(my_challenge, CHALLENGE_LEN);
4041 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4045 rv = drbd_recv_header(mdev, &cmd, &length);
4049 if (cmd != P_AUTH_CHALLENGE) {
4050 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4056 if (length > CHALLENGE_LEN * 2) {
4057 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4062 peers_ch = kmalloc(length, GFP_NOIO);
4063 if (peers_ch == NULL) {
4064 dev_err(DEV, "kmalloc of peers_ch failed\n");
4069 rv = drbd_recv(mdev, peers_ch, length);
4072 if (!signal_pending(current))
4073 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
4078 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4079 response = kmalloc(resp_size, GFP_NOIO);
4080 if (response == NULL) {
4081 dev_err(DEV, "kmalloc of response failed\n");
4086 sg_init_table(&sg, 1);
4087 sg_set_buf(&sg, peers_ch, length);
4089 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4091 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4096 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4100 rv = drbd_recv_header(mdev, &cmd, &length);
4104 if (cmd != P_AUTH_RESPONSE) {
4105 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4111 if (length != resp_size) {
4112 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4117 rv = drbd_recv(mdev, response , resp_size);
4119 if (rv != resp_size) {
4120 if (!signal_pending(current))
4121 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4126 right_response = kmalloc(resp_size, GFP_NOIO);
4127 if (right_response == NULL) {
4128 dev_err(DEV, "kmalloc of right_response failed\n");
4133 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4135 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4137 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4142 rv = !memcmp(response, right_response, resp_size);
4145 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4146 resp_size, mdev->net_conf->cram_hmac_alg);
4153 kfree(right_response);
4159 int drbdd_init(struct drbd_thread *thi)
4161 struct drbd_conf *mdev = thi->mdev;
4162 unsigned int minor = mdev_to_minor(mdev);
4165 sprintf(current->comm, "drbd%d_receiver", minor);
4167 dev_info(DEV, "receiver (re)started\n");
4170 h = drbd_connect(mdev);
4172 drbd_disconnect(mdev);
4173 schedule_timeout_interruptible(HZ);
4176 dev_warn(DEV, "Discarding network configuration.\n");
4177 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4182 if (get_net_conf(mdev)) {
4188 drbd_disconnect(mdev);
4190 dev_info(DEV, "receiver terminated\n");
4194 /* ********* acknowledge sender ******** */
4196 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4198 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4200 int retcode = be32_to_cpu(p->retcode);
4202 if (retcode >= SS_SUCCESS) {
4203 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4205 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4206 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4207 drbd_set_st_err_str(retcode), retcode);
4209 wake_up(&mdev->state_wait);
4214 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4216 return drbd_send_ping_ack(mdev);
4220 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4222 /* restore idle timeout */
4223 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4224 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4225 wake_up(&mdev->misc_wait);
4230 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4232 struct p_block_ack *p = (struct p_block_ack *)h;
4233 sector_t sector = be64_to_cpu(p->sector);
4234 int blksize = be32_to_cpu(p->blksize);
4236 D_ASSERT(mdev->agreed_pro_version >= 89);
4238 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4240 if (get_ldev(mdev)) {
4241 drbd_rs_complete_io(mdev, sector);
4242 drbd_set_in_sync(mdev, sector, blksize);
4243 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4244 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4247 dec_rs_pending(mdev);
4248 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4253 /* when we receive the ACK for a write request,
4254 * verify that we actually know about it */
4255 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4256 u64 id, sector_t sector)
4258 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4259 struct hlist_node *n;
4260 struct drbd_request *req;
4262 hlist_for_each_entry(req, n, slot, collision) {
4263 if ((unsigned long)req == (unsigned long)id) {
4264 if (req->sector != sector) {
4265 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4266 "wrong sector (%llus versus %llus)\n", req,
4267 (unsigned long long)req->sector,
4268 (unsigned long long)sector);
4277 typedef struct drbd_request *(req_validator_fn)
4278 (struct drbd_conf *mdev, u64 id, sector_t sector);
4280 static int validate_req_change_req_state(struct drbd_conf *mdev,
4281 u64 id, sector_t sector, req_validator_fn validator,
4282 const char *func, enum drbd_req_event what)
4284 struct drbd_request *req;
4285 struct bio_and_error m;
4287 spin_lock_irq(&mdev->req_lock);
4288 req = validator(mdev, id, sector);
4289 if (unlikely(!req)) {
4290 spin_unlock_irq(&mdev->req_lock);
4292 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4293 (void *)(unsigned long)id, (unsigned long long)sector);
4296 __req_mod(req, what, &m);
4297 spin_unlock_irq(&mdev->req_lock);
4300 complete_master_bio(mdev, &m);
4304 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4306 struct p_block_ack *p = (struct p_block_ack *)h;
4307 sector_t sector = be64_to_cpu(p->sector);
4308 int blksize = be32_to_cpu(p->blksize);
4309 enum drbd_req_event what;
4311 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4313 if (is_syncer_block_id(p->block_id)) {
4314 drbd_set_in_sync(mdev, sector, blksize);
4315 dec_rs_pending(mdev);
4318 switch (be16_to_cpu(h->command)) {
4319 case P_RS_WRITE_ACK:
4320 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4321 what = write_acked_by_peer_and_sis;
4324 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4325 what = write_acked_by_peer;
4328 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4329 what = recv_acked_by_peer;
4332 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4333 what = conflict_discarded_by_peer;
4340 return validate_req_change_req_state(mdev, p->block_id, sector,
4341 _ack_id_to_req, __func__ , what);
4344 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4346 struct p_block_ack *p = (struct p_block_ack *)h;
4347 sector_t sector = be64_to_cpu(p->sector);
4348 int size = be32_to_cpu(p->blksize);
4349 struct drbd_request *req;
4350 struct bio_and_error m;
4352 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4354 if (is_syncer_block_id(p->block_id)) {
4355 dec_rs_pending(mdev);
4356 drbd_rs_failed_io(mdev, sector, size);
4360 spin_lock_irq(&mdev->req_lock);
4361 req = _ack_id_to_req(mdev, p->block_id, sector);
4363 spin_unlock_irq(&mdev->req_lock);
4364 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4365 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4366 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4367 The master bio might already be completed, therefore the
4368 request is no longer in the collision hash.
4369 => Do not try to validate block_id as request. */
4370 /* In Protocol B we might already have got a P_RECV_ACK
4371 but then get a P_NEG_ACK after wards. */
4372 drbd_set_out_of_sync(mdev, sector, size);
4375 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4376 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4380 __req_mod(req, neg_acked, &m);
4381 spin_unlock_irq(&mdev->req_lock);
4384 complete_master_bio(mdev, &m);
4388 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4390 struct p_block_ack *p = (struct p_block_ack *)h;
4391 sector_t sector = be64_to_cpu(p->sector);
4393 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4394 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4395 (unsigned long long)sector, be32_to_cpu(p->blksize));
4397 return validate_req_change_req_state(mdev, p->block_id, sector,
4398 _ar_id_to_req, __func__ , neg_acked);
4401 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4405 struct p_block_ack *p = (struct p_block_ack *)h;
4407 sector = be64_to_cpu(p->sector);
4408 size = be32_to_cpu(p->blksize);
4410 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4412 dec_rs_pending(mdev);
4414 if (get_ldev_if_state(mdev, D_FAILED)) {
4415 drbd_rs_complete_io(mdev, sector);
4416 switch (be16_to_cpu(h->command)) {
4417 case P_NEG_RS_DREPLY:
4418 drbd_rs_failed_io(mdev, sector, size);
4432 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4434 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4436 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4438 if (mdev->state.conn == C_AHEAD &&
4439 atomic_read(&mdev->ap_in_flight) == 0 &&
4440 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4441 mdev->start_resync_timer.expires = jiffies + HZ;
4442 add_timer(&mdev->start_resync_timer);
4448 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4450 struct p_block_ack *p = (struct p_block_ack *)h;
4451 struct drbd_work *w;
4455 sector = be64_to_cpu(p->sector);
4456 size = be32_to_cpu(p->blksize);
4458 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4460 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4461 drbd_ov_oos_found(mdev, sector, size);
4465 if (!get_ldev(mdev))
4468 drbd_rs_complete_io(mdev, sector);
4469 dec_rs_pending(mdev);
4473 /* let's advance progress step marks only for every other megabyte */
4474 if ((mdev->ov_left & 0x200) == 0x200)
4475 drbd_advance_rs_marks(mdev, mdev->ov_left);
4477 if (mdev->ov_left == 0) {
4478 w = kmalloc(sizeof(*w), GFP_NOIO);
4480 w->cb = w_ov_finished;
4481 drbd_queue_work_front(&mdev->data.work, w);
4483 dev_err(DEV, "kmalloc(w) failed.");
4485 drbd_resync_finished(mdev);
4492 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4497 struct asender_cmd {
4499 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4502 static struct asender_cmd *get_asender_cmd(int cmd)
4504 static struct asender_cmd asender_tbl[] = {
4505 /* anything missing from this table is in
4506 * the drbd_cmd_handler (drbd_default_handler) table,
4507 * see the beginning of drbdd() */
4508 [P_PING] = { sizeof(struct p_header80), got_Ping },
4509 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4510 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4511 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4512 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4513 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4514 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4515 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4516 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4517 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4518 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4519 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4520 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4521 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4522 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
4523 [P_MAX_CMD] = { 0, NULL },
4525 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4527 return &asender_tbl[cmd];
4530 int drbd_asender(struct drbd_thread *thi)
4532 struct drbd_conf *mdev = thi->mdev;
4533 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4534 struct asender_cmd *cmd = NULL;
4539 int expect = sizeof(struct p_header80);
4541 int ping_timeout_active = 0;
4543 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4545 current->policy = SCHED_RR; /* Make this a realtime task! */
4546 current->rt_priority = 2; /* more important than all other tasks */
4548 while (get_t_state(thi) == Running) {
4549 drbd_thread_current_set_cpu(mdev);
4550 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4551 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4552 mdev->meta.socket->sk->sk_rcvtimeo =
4553 mdev->net_conf->ping_timeo*HZ/10;
4554 ping_timeout_active = 1;
4557 /* conditionally cork;
4558 * it may hurt latency if we cork without much to send */
4559 if (!mdev->net_conf->no_cork &&
4560 3 < atomic_read(&mdev->unacked_cnt))
4561 drbd_tcp_cork(mdev->meta.socket);
4563 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4564 flush_signals(current);
4565 if (!drbd_process_done_ee(mdev))
4567 /* to avoid race with newly queued ACKs */
4568 set_bit(SIGNAL_ASENDER, &mdev->flags);
4569 spin_lock_irq(&mdev->req_lock);
4570 empty = list_empty(&mdev->done_ee);
4571 spin_unlock_irq(&mdev->req_lock);
4572 /* new ack may have been queued right here,
4573 * but then there is also a signal pending,
4574 * and we start over... */
4578 /* but unconditionally uncork unless disabled */
4579 if (!mdev->net_conf->no_cork)
4580 drbd_tcp_uncork(mdev->meta.socket);
4582 /* short circuit, recv_msg would return EINTR anyways. */
4583 if (signal_pending(current))
4586 rv = drbd_recv_short(mdev, mdev->meta.socket,
4587 buf, expect-received, 0);
4588 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4590 flush_signals(current);
4593 * -EINTR (on meta) we got a signal
4594 * -EAGAIN (on meta) rcvtimeo expired
4595 * -ECONNRESET other side closed the connection
4596 * -ERESTARTSYS (on data) we got a signal
4597 * rv < 0 other than above: unexpected error!
4598 * rv == expected: full header or command
4599 * rv < expected: "woken" by signal during receive
4600 * rv == 0 : "connection shut down by peer"
4602 if (likely(rv > 0)) {
4605 } else if (rv == 0) {
4606 dev_err(DEV, "meta connection shut down by peer.\n");
4608 } else if (rv == -EAGAIN) {
4609 /* If the data socket received something meanwhile,
4610 * that is good enough: peer is still alive. */
4611 if (time_after(mdev->last_received,
4612 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4614 if (ping_timeout_active) {
4615 dev_err(DEV, "PingAck did not arrive in time.\n");
4618 set_bit(SEND_PING, &mdev->flags);
4620 } else if (rv == -EINTR) {
4623 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4627 if (received == expect && cmd == NULL) {
4628 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4629 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4630 be32_to_cpu(h->magic),
4631 be16_to_cpu(h->command),
4632 be16_to_cpu(h->length));
4635 cmd = get_asender_cmd(be16_to_cpu(h->command));
4636 len = be16_to_cpu(h->length);
4637 if (unlikely(cmd == NULL)) {
4638 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4639 be32_to_cpu(h->magic),
4640 be16_to_cpu(h->command),
4641 be16_to_cpu(h->length));
4644 expect = cmd->pkt_size;
4645 ERR_IF(len != expect-sizeof(struct p_header80))
4648 if (received == expect) {
4649 mdev->last_received = jiffies;
4650 D_ASSERT(cmd != NULL);
4651 if (!cmd->process(mdev, h))
4654 /* the idle_timeout (ping-int)
4655 * has been restored in got_PingAck() */
4656 if (cmd == get_asender_cmd(P_PING_ACK))
4657 ping_timeout_active = 0;
4661 expect = sizeof(struct p_header80);
4668 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4673 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4676 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4678 D_ASSERT(mdev->state.conn < C_CONNECTED);
4679 dev_info(DEV, "asender terminated\n");