4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf *mdev);
58 static int drbd_do_auth(struct drbd_conf *mdev);
60 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page *page_chain_del(struct page **head, int n)
89 tmp = page_chain_next(page);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page *page_chain_tail(struct page *page, int *len)
113 while ((tmp = page_chain_next(page)))
120 static int page_chain_free(struct page *page)
124 page_chain_for_each_safe(page, tmp) {
131 static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
145 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
147 struct page *page = NULL;
148 struct page *tmp = NULL;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant >= number) {
154 spin_lock(&drbd_pp_lock);
155 page = page_chain_del(&drbd_pp_pool, number);
157 drbd_pp_vacant -= number;
158 spin_unlock(&drbd_pp_lock);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
170 set_page_private(tmp, (unsigned long)page);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
185 spin_unlock(&drbd_pp_lock);
190 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
202 if (drbd_ee_has_active_page(e))
204 list_move(le, to_be_freed);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
235 struct page *page = NULL;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
243 while (page == NULL) {
244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
246 drbd_kick_lo_and_reclaim_net(mdev);
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait, &wait);
267 atomic_add(number, &mdev->pp_in_use);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page);
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
288 spin_unlock(&drbd_pp_lock);
290 i = atomic_sub_return(i, a);
292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
294 wake_up(&drbd_pp_wait);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
318 struct drbd_epoch_entry *e;
320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
336 INIT_HLIST_NODE(&e->collision);
340 atomic_set(&e->pending_bios, 0);
345 * The block_id is opaque to the receiver. It is not endianness
346 * converted, and sent back to the sender unchanged.
353 mempool_free(e, drbd_ee_mempool);
357 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
359 if (e->flags & EE_HAS_DIGEST)
361 drbd_pp_free(mdev, e->pages, is_net);
362 D_ASSERT(atomic_read(&e->pending_bios) == 0);
363 D_ASSERT(hlist_unhashed(&e->collision));
364 mempool_free(e, drbd_ee_mempool);
367 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
369 LIST_HEAD(work_list);
370 struct drbd_epoch_entry *e, *t;
372 int is_net = list == &mdev->net_ee;
374 spin_lock_irq(&mdev->req_lock);
375 list_splice_init(list, &work_list);
376 spin_unlock_irq(&mdev->req_lock);
378 list_for_each_entry_safe(e, t, &work_list, w.list) {
379 drbd_free_some_ee(mdev, e, is_net);
387 * This function is called from _asender only_
388 * but see also comments in _req_mod(,barrier_acked)
389 * and receive_Barrier.
391 * Move entries from net_ee to done_ee, if ready.
392 * Grab done_ee, call all callbacks, free the entries.
393 * The callbacks typically send out ACKs.
395 static int drbd_process_done_ee(struct drbd_conf *mdev)
397 LIST_HEAD(work_list);
398 LIST_HEAD(reclaimed);
399 struct drbd_epoch_entry *e, *t;
400 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
402 spin_lock_irq(&mdev->req_lock);
403 reclaim_net_ee(mdev, &reclaimed);
404 list_splice_init(&mdev->done_ee, &work_list);
405 spin_unlock_irq(&mdev->req_lock);
407 list_for_each_entry_safe(e, t, &reclaimed, w.list)
408 drbd_free_net_ee(mdev, e);
410 /* possible callbacks here:
411 * e_end_block, and e_end_resync_block, e_send_discard_ack.
412 * all ignore the last argument.
414 list_for_each_entry_safe(e, t, &work_list, w.list) {
415 /* list_del not necessary, next/prev members not touched */
416 ok = e->w.cb(mdev, &e->w, !ok) && ok;
417 drbd_free_ee(mdev, e);
419 wake_up(&mdev->ee_wait);
424 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
428 /* avoids spin_lock/unlock
429 * and calling prepare_to_wait in the fast path */
430 while (!list_empty(head)) {
431 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
432 spin_unlock_irq(&mdev->req_lock);
434 finish_wait(&mdev->ee_wait, &wait);
435 spin_lock_irq(&mdev->req_lock);
439 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
441 spin_lock_irq(&mdev->req_lock);
442 _drbd_wait_ee_list_empty(mdev, head);
443 spin_unlock_irq(&mdev->req_lock);
446 /* see also kernel_accept; which is only present since 2.6.18.
447 * also we want to log which part of it failed, exactly */
448 static int drbd_accept(struct drbd_conf *mdev, const char **what,
449 struct socket *sock, struct socket **newsock)
451 struct sock *sk = sock->sk;
455 err = sock->ops->listen(sock, 5);
459 *what = "sock_create_lite";
460 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
466 err = sock->ops->accept(sock, *newsock, 0);
468 sock_release(*newsock);
472 (*newsock)->ops = sock->ops;
478 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
479 void *buf, size_t size, int flags)
486 struct msghdr msg = {
488 .msg_iov = (struct iovec *)&iov,
489 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
495 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
501 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
508 struct msghdr msg = {
510 .msg_iov = (struct iovec *)&iov,
511 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
519 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
524 * ECONNRESET other side closed the connection
525 * ERESTARTSYS (on sock) we got a signal
529 if (rv == -ECONNRESET)
530 dev_info(DEV, "sock was reset by peer\n");
531 else if (rv != -ERESTARTSYS)
532 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
534 } else if (rv == 0) {
535 dev_info(DEV, "sock was shut down by peer\n");
538 /* signal came in, or peer/link went down,
539 * after we read a partial message
541 /* D_ASSERT(signal_pending(current)); */
549 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
555 * On individual connections, the socket buffer size must be set prior to the
556 * listen(2) or connect(2) calls in order to have it take effect.
557 * This is our wrapper to do so.
559 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
562 /* open coded SO_SNDBUF, SO_RCVBUF */
564 sock->sk->sk_sndbuf = snd;
565 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
568 sock->sk->sk_rcvbuf = rcv;
569 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
573 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
577 struct sockaddr_in6 src_in6;
579 int disconnect_on_error = 1;
581 if (!get_net_conf(mdev))
584 what = "sock_create_kern";
585 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
586 SOCK_STREAM, IPPROTO_TCP, &sock);
592 sock->sk->sk_rcvtimeo =
593 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
594 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
595 mdev->net_conf->rcvbuf_size);
597 /* explicitly bind to the configured IP as source IP
598 * for the outgoing connections.
599 * This is needed for multihomed hosts and to be
600 * able to use lo: interfaces for drbd.
601 * Make sure to use 0 as port number, so linux selects
602 * a free one dynamically.
604 memcpy(&src_in6, mdev->net_conf->my_addr,
605 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
606 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
607 src_in6.sin6_port = 0;
609 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
611 what = "bind before connect";
612 err = sock->ops->bind(sock,
613 (struct sockaddr *) &src_in6,
614 mdev->net_conf->my_addr_len);
618 /* connect may fail, peer not yet available.
619 * stay C_WF_CONNECTION, don't go Disconnecting! */
620 disconnect_on_error = 0;
622 err = sock->ops->connect(sock,
623 (struct sockaddr *)mdev->net_conf->peer_addr,
624 mdev->net_conf->peer_addr_len, 0);
633 /* timeout, busy, signal pending */
634 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
635 case EINTR: case ERESTARTSYS:
636 /* peer not (yet) available, network problem */
637 case ECONNREFUSED: case ENETUNREACH:
638 case EHOSTDOWN: case EHOSTUNREACH:
639 disconnect_on_error = 0;
642 dev_err(DEV, "%s failed, err = %d\n", what, err);
644 if (disconnect_on_error)
645 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
651 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
654 struct socket *s_estab = NULL, *s_listen;
657 if (!get_net_conf(mdev))
660 what = "sock_create_kern";
661 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
662 SOCK_STREAM, IPPROTO_TCP, &s_listen);
668 timeo = mdev->net_conf->try_connect_int * HZ;
669 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
671 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
672 s_listen->sk->sk_rcvtimeo = timeo;
673 s_listen->sk->sk_sndtimeo = timeo;
674 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
675 mdev->net_conf->rcvbuf_size);
677 what = "bind before listen";
678 err = s_listen->ops->bind(s_listen,
679 (struct sockaddr *) mdev->net_conf->my_addr,
680 mdev->net_conf->my_addr_len);
684 err = drbd_accept(mdev, &what, s_listen, &s_estab);
688 sock_release(s_listen);
690 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
691 dev_err(DEV, "%s failed, err = %d\n", what, err);
692 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
700 static int drbd_send_fp(struct drbd_conf *mdev,
701 struct socket *sock, enum drbd_packets cmd)
703 struct p_header80 *h = &mdev->data.sbuf.header.h80;
705 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
708 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
710 struct p_header80 *h = &mdev->data.rbuf.header.h80;
713 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
715 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
716 return be16_to_cpu(h->command);
722 * drbd_socket_okay() - Free the socket if its connection is not okay
723 * @mdev: DRBD device.
724 * @sock: pointer to the pointer to the socket.
726 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
734 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
736 if (rr > 0 || rr == -EAGAIN) {
747 * 1 yes, we have a valid connection
748 * 0 oops, did not work out, please try again
749 * -1 peer talks different language,
750 * no point in trying again, please go standalone.
751 * -2 We do not have a network config...
753 static int drbd_connect(struct drbd_conf *mdev)
755 struct socket *s, *sock, *msock;
758 D_ASSERT(!mdev->data.socket);
760 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
763 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
770 /* 3 tries, this should take less than a second! */
771 s = drbd_try_connect(mdev);
774 /* give the other side time to call bind() & listen() */
775 schedule_timeout_interruptible(HZ / 10);
780 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
784 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
788 dev_err(DEV, "Logic error in drbd_connect()\n");
789 goto out_release_sockets;
794 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
795 ok = drbd_socket_okay(mdev, &sock);
796 ok = drbd_socket_okay(mdev, &msock) && ok;
802 s = drbd_wait_for_connect(mdev);
804 try = drbd_recv_fp(mdev, s);
805 drbd_socket_okay(mdev, &sock);
806 drbd_socket_okay(mdev, &msock);
810 dev_warn(DEV, "initial packet S crossed\n");
817 dev_warn(DEV, "initial packet M crossed\n");
821 set_bit(DISCARD_CONCURRENT, &mdev->flags);
824 dev_warn(DEV, "Error receiving initial packet\n");
831 if (mdev->state.conn <= C_DISCONNECTING)
832 goto out_release_sockets;
833 if (signal_pending(current)) {
834 flush_signals(current);
836 if (get_t_state(&mdev->receiver) == Exiting)
837 goto out_release_sockets;
841 ok = drbd_socket_okay(mdev, &sock);
842 ok = drbd_socket_okay(mdev, &msock) && ok;
848 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
849 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
851 sock->sk->sk_allocation = GFP_NOIO;
852 msock->sk->sk_allocation = GFP_NOIO;
854 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
855 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
858 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
859 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
860 * first set it to the P_HAND_SHAKE timeout,
861 * which we set to 4x the configured ping_timeout. */
862 sock->sk->sk_sndtimeo =
863 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
865 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
866 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
868 /* we don't want delays.
869 * we use TCP_CORK where appropriate, though */
870 drbd_tcp_nodelay(sock);
871 drbd_tcp_nodelay(msock);
873 mdev->data.socket = sock;
874 mdev->meta.socket = msock;
875 mdev->last_received = jiffies;
877 D_ASSERT(mdev->asender.task == NULL);
879 h = drbd_do_handshake(mdev);
883 if (mdev->cram_hmac_tfm) {
884 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
885 switch (drbd_do_auth(mdev)) {
887 dev_err(DEV, "Authentication of peer failed\n");
890 dev_err(DEV, "Authentication of peer failed, trying again.\n");
895 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
898 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
899 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
901 atomic_set(&mdev->packet_seq, 0);
904 drbd_thread_start(&mdev->asender);
906 if (drbd_send_protocol(mdev) == -1)
908 drbd_send_sync_param(mdev, &mdev->sync_conf);
909 drbd_send_sizes(mdev, 0, 0);
910 drbd_send_uuids(mdev);
911 drbd_send_state(mdev);
912 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
913 clear_bit(RESIZE_PENDING, &mdev->flags);
914 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
926 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
928 union p_header *h = &mdev->data.rbuf.header;
931 r = drbd_recv(mdev, h, sizeof(*h));
932 if (unlikely(r != sizeof(*h))) {
933 if (!signal_pending(current))
934 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
938 if (likely(h->h80.magic == cpu_to_be32(DRBD_MAGIC))) {
939 *cmd = be16_to_cpu(h->h80.command);
940 *packet_size = be16_to_cpu(h->h80.length);
941 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
942 *cmd = be16_to_cpu(h->h95.command);
943 *packet_size = be32_to_cpu(h->h95.length);
945 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
946 be32_to_cpu(h->h80.magic),
947 be16_to_cpu(h->h80.command),
948 be16_to_cpu(h->h80.length));
951 mdev->last_received = jiffies;
956 static void drbd_flush(struct drbd_conf *mdev)
960 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
961 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
964 dev_err(DEV, "local disk flush failed with status %d\n", rv);
965 /* would rather check on EOPNOTSUPP, but that is not reliable.
966 * don't try again for ANY return value != 0
967 * if (rv == -EOPNOTSUPP) */
968 drbd_bump_write_ordering(mdev, WO_drain_io);
975 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
976 * @mdev: DRBD device.
977 * @epoch: Epoch object.
980 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
981 struct drbd_epoch *epoch,
985 struct drbd_epoch *next_epoch;
986 enum finish_epoch rv = FE_STILL_LIVE;
988 spin_lock(&mdev->epoch_lock);
992 epoch_size = atomic_read(&epoch->epoch_size);
994 switch (ev & ~EV_CLEANUP) {
996 atomic_dec(&epoch->active);
998 case EV_GOT_BARRIER_NR:
999 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1001 case EV_BECAME_LAST:
1006 if (epoch_size != 0 &&
1007 atomic_read(&epoch->active) == 0 &&
1008 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1009 if (!(ev & EV_CLEANUP)) {
1010 spin_unlock(&mdev->epoch_lock);
1011 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1012 spin_lock(&mdev->epoch_lock);
1016 if (mdev->current_epoch != epoch) {
1017 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1018 list_del(&epoch->list);
1019 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1023 if (rv == FE_STILL_LIVE)
1027 atomic_set(&epoch->epoch_size, 0);
1028 /* atomic_set(&epoch->active, 0); is already zero */
1029 if (rv == FE_STILL_LIVE)
1031 wake_up(&mdev->ee_wait);
1041 spin_unlock(&mdev->epoch_lock);
1047 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1048 * @mdev: DRBD device.
1049 * @wo: Write ordering method to try.
1051 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1053 enum write_ordering_e pwo;
1054 static char *write_ordering_str[] = {
1056 [WO_drain_io] = "drain",
1057 [WO_bdev_flush] = "flush",
1060 pwo = mdev->write_ordering;
1062 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1064 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1066 mdev->write_ordering = wo;
1067 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1068 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1073 * @mdev: DRBD device.
1075 * @rw: flag field, see bio->bi_rw
1077 * May spread the pages to multiple bios,
1078 * depending on bio_add_page restrictions.
1080 * Returns 0 if all bios have been submitted,
1081 * -ENOMEM if we could not allocate enough bios,
1082 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1083 * single page to an empty bio (which should never happen and likely indicates
1084 * that the lower level IO stack is in some way broken). This has been observed
1085 * on certain Xen deployments.
1087 /* TODO allocate from our own bio_set. */
1088 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1089 const unsigned rw, const int fault_type)
1091 struct bio *bios = NULL;
1093 struct page *page = e->pages;
1094 sector_t sector = e->sector;
1095 unsigned ds = e->size;
1096 unsigned n_bios = 0;
1097 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1100 /* In most cases, we will only need one bio. But in case the lower
1101 * level restrictions happen to be different at this offset on this
1102 * side than those of the sending peer, we may need to submit the
1103 * request in more than one bio. */
1105 bio = bio_alloc(GFP_NOIO, nr_pages);
1107 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1110 /* > e->sector, unless this is the first bio */
1111 bio->bi_sector = sector;
1112 bio->bi_bdev = mdev->ldev->backing_bdev;
1114 bio->bi_private = e;
1115 bio->bi_end_io = drbd_endio_sec;
1117 bio->bi_next = bios;
1121 page_chain_for_each(page) {
1122 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1123 if (!bio_add_page(bio, page, len, 0)) {
1124 /* A single page must always be possible!
1125 * But in case it fails anyways,
1126 * we deal with it, and complain (below). */
1127 if (bio->bi_vcnt == 0) {
1129 "bio_add_page failed for len=%u, "
1130 "bi_vcnt=0 (bi_sector=%llu)\n",
1131 len, (unsigned long long)bio->bi_sector);
1141 D_ASSERT(page == NULL);
1144 atomic_set(&e->pending_bios, n_bios);
1147 bios = bios->bi_next;
1148 bio->bi_next = NULL;
1150 drbd_generic_make_request(mdev, fault_type, bio);
1157 bios = bios->bi_next;
1163 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1166 struct p_barrier *p = &mdev->data.rbuf.barrier;
1167 struct drbd_epoch *epoch;
1171 mdev->current_epoch->barrier_nr = p->barrier;
1172 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1174 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1175 * the activity log, which means it would not be resynced in case the
1176 * R_PRIMARY crashes now.
1177 * Therefore we must send the barrier_ack after the barrier request was
1179 switch (mdev->write_ordering) {
1181 if (rv == FE_RECYCLED)
1184 /* receiver context, in the writeout path of the other node.
1185 * avoid potential distributed deadlock */
1186 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1190 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1195 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1198 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1199 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1204 epoch = mdev->current_epoch;
1205 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1207 D_ASSERT(atomic_read(&epoch->active) == 0);
1208 D_ASSERT(epoch->flags == 0);
1212 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1217 atomic_set(&epoch->epoch_size, 0);
1218 atomic_set(&epoch->active, 0);
1220 spin_lock(&mdev->epoch_lock);
1221 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1222 list_add(&epoch->list, &mdev->current_epoch->list);
1223 mdev->current_epoch = epoch;
1226 /* The current_epoch got recycled while we allocated this one... */
1229 spin_unlock(&mdev->epoch_lock);
1234 /* used from receive_RSDataReply (recv_resync_read)
1235 * and from receive_Data */
1236 static struct drbd_epoch_entry *
1237 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1239 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1240 struct drbd_epoch_entry *e;
1243 void *dig_in = mdev->int_dig_in;
1244 void *dig_vv = mdev->int_dig_vv;
1245 unsigned long *data;
1247 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1248 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1251 rr = drbd_recv(mdev, dig_in, dgs);
1253 if (!signal_pending(current))
1255 "short read receiving data digest: read %d expected %d\n",
1263 ERR_IF(data_size == 0) return NULL;
1264 ERR_IF(data_size & 0x1ff) return NULL;
1265 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1267 /* even though we trust out peer,
1268 * we sometimes have to double check. */
1269 if (sector + (data_size>>9) > capacity) {
1270 dev_err(DEV, "request from peer beyond end of local disk: "
1271 "capacity: %llus < sector: %llus + size: %u\n",
1272 (unsigned long long)capacity,
1273 (unsigned long long)sector, data_size);
1277 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1278 * "criss-cross" setup, that might cause write-out on some other DRBD,
1279 * which in turn might block on the other node at this very place. */
1280 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1286 page_chain_for_each(page) {
1287 unsigned len = min_t(int, ds, PAGE_SIZE);
1289 rr = drbd_recv(mdev, data, len);
1290 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1291 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1292 data[0] = data[0] ^ (unsigned long)-1;
1296 drbd_free_ee(mdev, e);
1297 if (!signal_pending(current))
1298 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1306 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1307 if (memcmp(dig_in, dig_vv, dgs)) {
1308 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1309 (unsigned long long)sector, data_size);
1310 drbd_bcast_ee(mdev, "digest failed",
1311 dgs, dig_in, dig_vv, e);
1312 drbd_free_ee(mdev, e);
1316 mdev->recv_cnt += data_size>>9;
1320 /* drbd_drain_block() just takes a data block
1321 * out of the socket input buffer, and discards it.
1323 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1332 page = drbd_pp_alloc(mdev, 1, 1);
1336 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1337 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1339 if (!signal_pending(current))
1341 "short read receiving data: read %d expected %d\n",
1342 rr, min_t(int, data_size, PAGE_SIZE));
1348 drbd_pp_free(mdev, page, 0);
1352 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1353 sector_t sector, int data_size)
1355 struct bio_vec *bvec;
1357 int dgs, rr, i, expect;
1358 void *dig_in = mdev->int_dig_in;
1359 void *dig_vv = mdev->int_dig_vv;
1361 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1362 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1365 rr = drbd_recv(mdev, dig_in, dgs);
1367 if (!signal_pending(current))
1369 "short read receiving data reply digest: read %d expected %d\n",
1377 /* optimistically update recv_cnt. if receiving fails below,
1378 * we disconnect anyways, and counters will be reset. */
1379 mdev->recv_cnt += data_size>>9;
1381 bio = req->master_bio;
1382 D_ASSERT(sector == bio->bi_sector);
1384 bio_for_each_segment(bvec, bio, i) {
1385 expect = min_t(int, data_size, bvec->bv_len);
1386 rr = drbd_recv(mdev,
1387 kmap(bvec->bv_page)+bvec->bv_offset,
1389 kunmap(bvec->bv_page);
1391 if (!signal_pending(current))
1392 dev_warn(DEV, "short read receiving data reply: "
1393 "read %d expected %d\n",
1401 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1402 if (memcmp(dig_in, dig_vv, dgs)) {
1403 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1408 D_ASSERT(data_size == 0);
1412 /* e_end_resync_block() is called via
1413 * drbd_process_done_ee() by asender only */
1414 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1416 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1417 sector_t sector = e->sector;
1420 D_ASSERT(hlist_unhashed(&e->collision));
1422 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1423 drbd_set_in_sync(mdev, sector, e->size);
1424 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1426 /* Record failure to sync */
1427 drbd_rs_failed_io(mdev, sector, e->size);
1429 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1436 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1438 struct drbd_epoch_entry *e;
1440 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1444 dec_rs_pending(mdev);
1447 /* corresponding dec_unacked() in e_end_resync_block()
1448 * respective _drbd_clear_done_ee */
1450 e->w.cb = e_end_resync_block;
1452 spin_lock_irq(&mdev->req_lock);
1453 list_add(&e->w.list, &mdev->sync_ee);
1454 spin_unlock_irq(&mdev->req_lock);
1456 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1457 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1460 /* don't care for the reason here */
1461 dev_err(DEV, "submit failed, triggering re-connect\n");
1462 spin_lock_irq(&mdev->req_lock);
1463 list_del(&e->w.list);
1464 spin_unlock_irq(&mdev->req_lock);
1466 drbd_free_ee(mdev, e);
1472 static struct drbd_request *
1473 find_request(struct drbd_conf *mdev,
1474 struct hlist_head *(*hash_slot)(struct drbd_conf *, sector_t),
1475 u64 id, sector_t sector, const char *func)
1477 struct hlist_head *slot = hash_slot(mdev, sector);
1478 struct hlist_node *n;
1479 struct drbd_request *req;
1481 hlist_for_each_entry(req, n, slot, collision) {
1482 if ((unsigned long)req != (unsigned long)id)
1484 if (req->sector != sector) {
1485 dev_err(DEV, "%s: found request %lu but it has "
1486 "wrong sector (%llus versus %llus)\n",
1487 func, (unsigned long)req,
1488 (unsigned long long)req->sector,
1489 (unsigned long long)sector);
1497 /* when we receive the answer for a read request,
1498 * verify that we actually know about it */
1499 static struct drbd_request *ar_id_to_req(struct drbd_conf *mdev, u64 id,
1502 return find_request(mdev, ar_hash_slot, id, sector, __func__);
1505 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1507 struct drbd_request *req;
1510 struct p_data *p = &mdev->data.rbuf.data;
1512 sector = be64_to_cpu(p->sector);
1514 spin_lock_irq(&mdev->req_lock);
1515 req = ar_id_to_req(mdev, p->block_id, sector);
1516 spin_unlock_irq(&mdev->req_lock);
1517 if (unlikely(!req)) {
1518 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1522 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1523 * special casing it there for the various failure cases.
1524 * still no race with drbd_fail_pending_reads */
1525 ok = recv_dless_read(mdev, req, sector, data_size);
1528 req_mod(req, data_received);
1529 /* else: nothing. handled from drbd_disconnect...
1530 * I don't think we may complete this just yet
1531 * in case we are "on-disconnect: freeze" */
1536 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1540 struct p_data *p = &mdev->data.rbuf.data;
1542 sector = be64_to_cpu(p->sector);
1543 D_ASSERT(p->block_id == ID_SYNCER);
1545 if (get_ldev(mdev)) {
1546 /* data is submitted to disk within recv_resync_read.
1547 * corresponding put_ldev done below on error,
1548 * or in drbd_endio_sec. */
1549 ok = recv_resync_read(mdev, sector, data_size);
1551 if (__ratelimit(&drbd_ratelimit_state))
1552 dev_err(DEV, "Can not write resync data to local disk.\n");
1554 ok = drbd_drain_block(mdev, data_size);
1556 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1559 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1564 /* e_end_block() is called via drbd_process_done_ee().
1565 * this means this function only runs in the asender thread
1567 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1569 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1570 sector_t sector = e->sector;
1573 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1574 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1575 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1576 mdev->state.conn <= C_PAUSED_SYNC_T &&
1577 e->flags & EE_MAY_SET_IN_SYNC) ?
1578 P_RS_WRITE_ACK : P_WRITE_ACK;
1579 ok &= drbd_send_ack(mdev, pcmd, e);
1580 if (pcmd == P_RS_WRITE_ACK)
1581 drbd_set_in_sync(mdev, sector, e->size);
1583 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1584 /* we expect it to be marked out of sync anyways...
1585 * maybe assert this? */
1589 /* we delete from the conflict detection hash _after_ we sent out the
1590 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1591 if (mdev->net_conf->two_primaries) {
1592 spin_lock_irq(&mdev->req_lock);
1593 D_ASSERT(!hlist_unhashed(&e->collision));
1594 hlist_del_init(&e->collision);
1595 spin_unlock_irq(&mdev->req_lock);
1597 D_ASSERT(hlist_unhashed(&e->collision));
1600 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1605 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1607 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1610 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1611 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1613 spin_lock_irq(&mdev->req_lock);
1614 D_ASSERT(!hlist_unhashed(&e->collision));
1615 hlist_del_init(&e->collision);
1616 spin_unlock_irq(&mdev->req_lock);
1623 /* Called from receive_Data.
1624 * Synchronize packets on sock with packets on msock.
1626 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1627 * packet traveling on msock, they are still processed in the order they have
1630 * Note: we don't care for Ack packets overtaking P_DATA packets.
1632 * In case packet_seq is larger than mdev->peer_seq number, there are
1633 * outstanding packets on the msock. We wait for them to arrive.
1634 * In case we are the logically next packet, we update mdev->peer_seq
1635 * ourselves. Correctly handles 32bit wrap around.
1637 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1638 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1639 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1640 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1642 * returns 0 if we may process the packet,
1643 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1644 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1650 spin_lock(&mdev->peer_seq_lock);
1652 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1653 if (seq_le(packet_seq, mdev->peer_seq+1))
1655 if (signal_pending(current)) {
1659 p_seq = mdev->peer_seq;
1660 spin_unlock(&mdev->peer_seq_lock);
1661 timeout = schedule_timeout(30*HZ);
1662 spin_lock(&mdev->peer_seq_lock);
1663 if (timeout == 0 && p_seq == mdev->peer_seq) {
1665 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1669 finish_wait(&mdev->seq_wait, &wait);
1670 if (mdev->peer_seq+1 == packet_seq)
1672 spin_unlock(&mdev->peer_seq_lock);
1676 /* see also bio_flags_to_wire()
1677 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1678 * flags and back. We may replicate to other kernel versions. */
1679 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1681 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1682 (dpf & DP_FUA ? REQ_FUA : 0) |
1683 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1684 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1687 /* mirrored write */
1688 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1691 struct drbd_epoch_entry *e;
1692 struct p_data *p = &mdev->data.rbuf.data;
1696 if (!get_ldev(mdev)) {
1697 spin_lock(&mdev->peer_seq_lock);
1698 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1700 spin_unlock(&mdev->peer_seq_lock);
1702 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1703 atomic_inc(&mdev->current_epoch->epoch_size);
1704 return drbd_drain_block(mdev, data_size);
1707 /* get_ldev(mdev) successful.
1708 * Corresponding put_ldev done either below (on various errors),
1709 * or in drbd_endio_sec, if we successfully submit the data at
1710 * the end of this function. */
1712 sector = be64_to_cpu(p->sector);
1713 e = read_in_block(mdev, p->block_id, sector, data_size);
1719 e->w.cb = e_end_block;
1721 dp_flags = be32_to_cpu(p->dp_flags);
1722 rw |= wire_flags_to_bio(mdev, dp_flags);
1724 if (dp_flags & DP_MAY_SET_IN_SYNC)
1725 e->flags |= EE_MAY_SET_IN_SYNC;
1727 spin_lock(&mdev->epoch_lock);
1728 e->epoch = mdev->current_epoch;
1729 atomic_inc(&e->epoch->epoch_size);
1730 atomic_inc(&e->epoch->active);
1731 spin_unlock(&mdev->epoch_lock);
1733 /* I'm the receiver, I do hold a net_cnt reference. */
1734 if (!mdev->net_conf->two_primaries) {
1735 spin_lock_irq(&mdev->req_lock);
1737 /* don't get the req_lock yet,
1738 * we may sleep in drbd_wait_peer_seq */
1739 const int size = e->size;
1740 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1742 struct drbd_request *i;
1743 struct hlist_node *n;
1744 struct hlist_head *slot;
1747 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1748 BUG_ON(mdev->ee_hash == NULL);
1749 BUG_ON(mdev->tl_hash == NULL);
1751 /* conflict detection and handling:
1752 * 1. wait on the sequence number,
1753 * in case this data packet overtook ACK packets.
1754 * 2. check our hash tables for conflicting requests.
1755 * we only need to walk the tl_hash, since an ee can not
1756 * have a conflict with an other ee: on the submitting
1757 * node, the corresponding req had already been conflicting,
1758 * and a conflicting req is never sent.
1760 * Note: for two_primaries, we are protocol C,
1761 * so there cannot be any request that is DONE
1762 * but still on the transfer log.
1764 * unconditionally add to the ee_hash.
1766 * if no conflicting request is found:
1769 * if any conflicting request is found
1770 * that has not yet been acked,
1771 * AND I have the "discard concurrent writes" flag:
1772 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1774 * if any conflicting request is found:
1775 * block the receiver, waiting on misc_wait
1776 * until no more conflicting requests are there,
1777 * or we get interrupted (disconnect).
1779 * we do not just write after local io completion of those
1780 * requests, but only after req is done completely, i.e.
1781 * we wait for the P_DISCARD_ACK to arrive!
1783 * then proceed normally, i.e. submit.
1785 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1786 goto out_interrupted;
1788 spin_lock_irq(&mdev->req_lock);
1790 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
1792 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1793 slot = tl_hash_slot(mdev, sector);
1796 int have_unacked = 0;
1797 int have_conflict = 0;
1798 prepare_to_wait(&mdev->misc_wait, &wait,
1799 TASK_INTERRUPTIBLE);
1800 hlist_for_each_entry(i, n, slot, collision) {
1802 /* only ALERT on first iteration,
1803 * we may be woken up early... */
1805 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1806 " new: %llus +%u; pending: %llus +%u\n",
1807 current->comm, current->pid,
1808 (unsigned long long)sector, size,
1809 (unsigned long long)i->sector, i->size);
1810 if (i->rq_state & RQ_NET_PENDING)
1819 /* Discard Ack only for the _first_ iteration */
1820 if (first && discard && have_unacked) {
1821 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1822 (unsigned long long)sector);
1824 e->w.cb = e_send_discard_ack;
1825 list_add_tail(&e->w.list, &mdev->done_ee);
1827 spin_unlock_irq(&mdev->req_lock);
1829 /* we could probably send that P_DISCARD_ACK ourselves,
1830 * but I don't like the receiver using the msock */
1834 finish_wait(&mdev->misc_wait, &wait);
1838 if (signal_pending(current)) {
1839 hlist_del_init(&e->collision);
1841 spin_unlock_irq(&mdev->req_lock);
1843 finish_wait(&mdev->misc_wait, &wait);
1844 goto out_interrupted;
1847 spin_unlock_irq(&mdev->req_lock);
1850 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1851 "sec=%llus\n", (unsigned long long)sector);
1852 } else if (discard) {
1853 /* we had none on the first iteration.
1854 * there must be none now. */
1855 D_ASSERT(have_unacked == 0);
1858 spin_lock_irq(&mdev->req_lock);
1860 finish_wait(&mdev->misc_wait, &wait);
1863 list_add(&e->w.list, &mdev->active_ee);
1864 spin_unlock_irq(&mdev->req_lock);
1866 switch (mdev->net_conf->wire_protocol) {
1869 /* corresponding dec_unacked() in e_end_block()
1870 * respective _drbd_clear_done_ee */
1873 /* I really don't like it that the receiver thread
1874 * sends on the msock, but anyways */
1875 drbd_send_ack(mdev, P_RECV_ACK, e);
1882 if (mdev->state.pdsk < D_INCONSISTENT) {
1883 /* In case we have the only disk of the cluster, */
1884 drbd_set_out_of_sync(mdev, e->sector, e->size);
1885 e->flags |= EE_CALL_AL_COMPLETE_IO;
1886 e->flags &= ~EE_MAY_SET_IN_SYNC;
1887 drbd_al_begin_io(mdev, e->sector);
1890 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1893 /* don't care for the reason here */
1894 dev_err(DEV, "submit failed, triggering re-connect\n");
1895 spin_lock_irq(&mdev->req_lock);
1896 list_del(&e->w.list);
1897 hlist_del_init(&e->collision);
1898 spin_unlock_irq(&mdev->req_lock);
1899 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1900 drbd_al_complete_io(mdev, e->sector);
1903 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
1905 drbd_free_ee(mdev, e);
1909 /* We may throttle resync, if the lower device seems to be busy,
1910 * and current sync rate is above c_min_rate.
1912 * To decide whether or not the lower device is busy, we use a scheme similar
1913 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1914 * (more than 64 sectors) of activity we cannot account for with our own resync
1915 * activity, it obviously is "busy".
1917 * The current sync rate used here uses only the most recent two step marks,
1918 * to have a short time average so we can react faster.
1920 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
1922 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1923 unsigned long db, dt, dbdt;
1924 struct lc_element *tmp;
1928 /* feature disabled? */
1929 if (mdev->sync_conf.c_min_rate == 0)
1932 spin_lock_irq(&mdev->al_lock);
1933 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1935 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1936 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1937 spin_unlock_irq(&mdev->al_lock);
1940 /* Do not slow down if app IO is already waiting for this extent */
1942 spin_unlock_irq(&mdev->al_lock);
1944 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1945 (int)part_stat_read(&disk->part0, sectors[1]) -
1946 atomic_read(&mdev->rs_sect_ev);
1948 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1949 unsigned long rs_left;
1952 mdev->rs_last_events = curr_events;
1954 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1956 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1958 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1959 rs_left = mdev->ov_left;
1961 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
1963 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1966 db = mdev->rs_mark_left[i] - rs_left;
1967 dbdt = Bit2KB(db/dt);
1969 if (dbdt > mdev->sync_conf.c_min_rate)
1976 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
1979 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1980 struct drbd_epoch_entry *e;
1981 struct digest_info *di = NULL;
1983 unsigned int fault_type;
1984 struct p_block_req *p = &mdev->data.rbuf.block_req;
1986 sector = be64_to_cpu(p->sector);
1987 size = be32_to_cpu(p->blksize);
1989 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1990 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1991 (unsigned long long)sector, size);
1994 if (sector + (size>>9) > capacity) {
1995 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1996 (unsigned long long)sector, size);
2000 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2003 case P_DATA_REQUEST:
2004 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2006 case P_RS_DATA_REQUEST:
2007 case P_CSUM_RS_REQUEST:
2009 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2013 dec_rs_pending(mdev);
2014 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2017 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2020 if (verb && __ratelimit(&drbd_ratelimit_state))
2021 dev_err(DEV, "Can not satisfy peer's read request, "
2022 "no local data.\n");
2024 /* drain possibly payload */
2025 return drbd_drain_block(mdev, digest_size);
2028 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2029 * "criss-cross" setup, that might cause write-out on some other DRBD,
2030 * which in turn might block on the other node at this very place. */
2031 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2038 case P_DATA_REQUEST:
2039 e->w.cb = w_e_end_data_req;
2040 fault_type = DRBD_FAULT_DT_RD;
2041 /* application IO, don't drbd_rs_begin_io */
2044 case P_RS_DATA_REQUEST:
2045 e->w.cb = w_e_end_rsdata_req;
2046 fault_type = DRBD_FAULT_RS_RD;
2047 /* used in the sector offset progress display */
2048 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2052 case P_CSUM_RS_REQUEST:
2053 fault_type = DRBD_FAULT_RS_RD;
2054 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2058 di->digest_size = digest_size;
2059 di->digest = (((char *)di)+sizeof(struct digest_info));
2062 e->flags |= EE_HAS_DIGEST;
2064 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2067 if (cmd == P_CSUM_RS_REQUEST) {
2068 D_ASSERT(mdev->agreed_pro_version >= 89);
2069 e->w.cb = w_e_end_csum_rs_req;
2070 /* used in the sector offset progress display */
2071 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2072 } else if (cmd == P_OV_REPLY) {
2073 /* track progress, we may need to throttle */
2074 atomic_add(size >> 9, &mdev->rs_sect_in);
2075 e->w.cb = w_e_end_ov_reply;
2076 dec_rs_pending(mdev);
2077 /* drbd_rs_begin_io done when we sent this request,
2078 * but accounting still needs to be done. */
2079 goto submit_for_resync;
2084 if (mdev->ov_start_sector == ~(sector_t)0 &&
2085 mdev->agreed_pro_version >= 90) {
2086 unsigned long now = jiffies;
2088 mdev->ov_start_sector = sector;
2089 mdev->ov_position = sector;
2090 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2091 mdev->rs_total = mdev->ov_left;
2092 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2093 mdev->rs_mark_left[i] = mdev->ov_left;
2094 mdev->rs_mark_time[i] = now;
2096 dev_info(DEV, "Online Verify start sector: %llu\n",
2097 (unsigned long long)sector);
2099 e->w.cb = w_e_end_ov_req;
2100 fault_type = DRBD_FAULT_RS_RD;
2104 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2106 fault_type = DRBD_FAULT_MAX;
2110 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2111 * wrt the receiver, but it is not as straightforward as it may seem.
2112 * Various places in the resync start and stop logic assume resync
2113 * requests are processed in order, requeuing this on the worker thread
2114 * introduces a bunch of new code for synchronization between threads.
2116 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2117 * "forever", throttling after drbd_rs_begin_io will lock that extent
2118 * for application writes for the same time. For now, just throttle
2119 * here, where the rest of the code expects the receiver to sleep for
2123 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2124 * this defers syncer requests for some time, before letting at least
2125 * on request through. The resync controller on the receiving side
2126 * will adapt to the incoming rate accordingly.
2128 * We cannot throttle here if remote is Primary/SyncTarget:
2129 * we would also throttle its application reads.
2130 * In that case, throttling is done on the SyncTarget only.
2132 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2133 schedule_timeout_uninterruptible(HZ/10);
2134 if (drbd_rs_begin_io(mdev, sector))
2138 atomic_add(size >> 9, &mdev->rs_sect_ev);
2142 spin_lock_irq(&mdev->req_lock);
2143 list_add_tail(&e->w.list, &mdev->read_ee);
2144 spin_unlock_irq(&mdev->req_lock);
2146 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2149 /* don't care for the reason here */
2150 dev_err(DEV, "submit failed, triggering re-connect\n");
2151 spin_lock_irq(&mdev->req_lock);
2152 list_del(&e->w.list);
2153 spin_unlock_irq(&mdev->req_lock);
2154 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2158 drbd_free_ee(mdev, e);
2162 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2164 int self, peer, rv = -100;
2165 unsigned long ch_self, ch_peer;
2167 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2168 peer = mdev->p_uuid[UI_BITMAP] & 1;
2170 ch_peer = mdev->p_uuid[UI_SIZE];
2171 ch_self = mdev->comm_bm_set;
2173 switch (mdev->net_conf->after_sb_0p) {
2175 case ASB_DISCARD_SECONDARY:
2176 case ASB_CALL_HELPER:
2177 dev_err(DEV, "Configuration error.\n");
2179 case ASB_DISCONNECT:
2181 case ASB_DISCARD_YOUNGER_PRI:
2182 if (self == 0 && peer == 1) {
2186 if (self == 1 && peer == 0) {
2190 /* Else fall through to one of the other strategies... */
2191 case ASB_DISCARD_OLDER_PRI:
2192 if (self == 0 && peer == 1) {
2196 if (self == 1 && peer == 0) {
2200 /* Else fall through to one of the other strategies... */
2201 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2202 "Using discard-least-changes instead\n");
2203 case ASB_DISCARD_ZERO_CHG:
2204 if (ch_peer == 0 && ch_self == 0) {
2205 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2209 if (ch_peer == 0) { rv = 1; break; }
2210 if (ch_self == 0) { rv = -1; break; }
2212 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2214 case ASB_DISCARD_LEAST_CHG:
2215 if (ch_self < ch_peer)
2217 else if (ch_self > ch_peer)
2219 else /* ( ch_self == ch_peer ) */
2220 /* Well, then use something else. */
2221 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2224 case ASB_DISCARD_LOCAL:
2227 case ASB_DISCARD_REMOTE:
2234 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2238 switch (mdev->net_conf->after_sb_1p) {
2239 case ASB_DISCARD_YOUNGER_PRI:
2240 case ASB_DISCARD_OLDER_PRI:
2241 case ASB_DISCARD_LEAST_CHG:
2242 case ASB_DISCARD_LOCAL:
2243 case ASB_DISCARD_REMOTE:
2244 dev_err(DEV, "Configuration error.\n");
2246 case ASB_DISCONNECT:
2249 hg = drbd_asb_recover_0p(mdev);
2250 if (hg == -1 && mdev->state.role == R_SECONDARY)
2252 if (hg == 1 && mdev->state.role == R_PRIMARY)
2256 rv = drbd_asb_recover_0p(mdev);
2258 case ASB_DISCARD_SECONDARY:
2259 return mdev->state.role == R_PRIMARY ? 1 : -1;
2260 case ASB_CALL_HELPER:
2261 hg = drbd_asb_recover_0p(mdev);
2262 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2263 enum drbd_state_rv rv2;
2265 drbd_set_role(mdev, R_SECONDARY, 0);
2266 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2267 * we might be here in C_WF_REPORT_PARAMS which is transient.
2268 * we do not need to wait for the after state change work either. */
2269 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2270 if (rv2 != SS_SUCCESS) {
2271 drbd_khelper(mdev, "pri-lost-after-sb");
2273 dev_warn(DEV, "Successfully gave up primary role.\n");
2283 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2287 switch (mdev->net_conf->after_sb_2p) {
2288 case ASB_DISCARD_YOUNGER_PRI:
2289 case ASB_DISCARD_OLDER_PRI:
2290 case ASB_DISCARD_LEAST_CHG:
2291 case ASB_DISCARD_LOCAL:
2292 case ASB_DISCARD_REMOTE:
2294 case ASB_DISCARD_SECONDARY:
2295 dev_err(DEV, "Configuration error.\n");
2298 rv = drbd_asb_recover_0p(mdev);
2300 case ASB_DISCONNECT:
2302 case ASB_CALL_HELPER:
2303 hg = drbd_asb_recover_0p(mdev);
2305 enum drbd_state_rv rv2;
2307 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2308 * we might be here in C_WF_REPORT_PARAMS which is transient.
2309 * we do not need to wait for the after state change work either. */
2310 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2311 if (rv2 != SS_SUCCESS) {
2312 drbd_khelper(mdev, "pri-lost-after-sb");
2314 dev_warn(DEV, "Successfully gave up primary role.\n");
2324 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2325 u64 bits, u64 flags)
2328 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2331 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2333 (unsigned long long)uuid[UI_CURRENT],
2334 (unsigned long long)uuid[UI_BITMAP],
2335 (unsigned long long)uuid[UI_HISTORY_START],
2336 (unsigned long long)uuid[UI_HISTORY_END],
2337 (unsigned long long)bits,
2338 (unsigned long long)flags);
2342 100 after split brain try auto recover
2343 2 C_SYNC_SOURCE set BitMap
2344 1 C_SYNC_SOURCE use BitMap
2346 -1 C_SYNC_TARGET use BitMap
2347 -2 C_SYNC_TARGET set BitMap
2348 -100 after split brain, disconnect
2349 -1000 unrelated data
2350 -1091 requires proto 91
2351 -1096 requires proto 96
2353 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2358 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2359 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2362 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2366 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2367 peer != UUID_JUST_CREATED)
2371 if (self != UUID_JUST_CREATED &&
2372 (peer == UUID_JUST_CREATED || peer == (u64)0))
2376 int rct, dc; /* roles at crash time */
2378 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2380 if (mdev->agreed_pro_version < 91)
2383 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2384 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2385 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2386 drbd_uuid_set_bm(mdev, 0UL);
2388 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2389 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2392 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2399 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2401 if (mdev->agreed_pro_version < 91)
2404 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2405 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2406 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2408 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2409 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2410 mdev->p_uuid[UI_BITMAP] = 0UL;
2412 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2415 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2422 /* Common power [off|failure] */
2423 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2424 (mdev->p_uuid[UI_FLAGS] & 2);
2425 /* lowest bit is set when we were primary,
2426 * next bit (weight 2) is set when peer was primary */
2430 case 0: /* !self_pri && !peer_pri */ return 0;
2431 case 1: /* self_pri && !peer_pri */ return 1;
2432 case 2: /* !self_pri && peer_pri */ return -1;
2433 case 3: /* self_pri && peer_pri */
2434 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2440 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2445 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2447 if (mdev->agreed_pro_version < 96 ?
2448 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2449 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2450 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2451 /* The last P_SYNC_UUID did not get though. Undo the last start of
2452 resync as sync source modifications of the peer's UUIDs. */
2454 if (mdev->agreed_pro_version < 91)
2457 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2458 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2460 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2461 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2468 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2469 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2470 peer = mdev->p_uuid[i] & ~((u64)1);
2476 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2477 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2482 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2484 if (mdev->agreed_pro_version < 96 ?
2485 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2486 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2487 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2488 /* The last P_SYNC_UUID did not get though. Undo the last start of
2489 resync as sync source modifications of our UUIDs. */
2491 if (mdev->agreed_pro_version < 91)
2494 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2495 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2497 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2498 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2499 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2507 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2508 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2509 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2515 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2516 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2517 if (self == peer && self != ((u64)0))
2521 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2522 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2523 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2524 peer = mdev->p_uuid[j] & ~((u64)1);
2533 /* drbd_sync_handshake() returns the new conn state on success, or
2534 CONN_MASK (-1) on failure.
2536 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2537 enum drbd_disk_state peer_disk) __must_hold(local)
2540 enum drbd_conns rv = C_MASK;
2541 enum drbd_disk_state mydisk;
2543 mydisk = mdev->state.disk;
2544 if (mydisk == D_NEGOTIATING)
2545 mydisk = mdev->new_state_tmp.disk;
2547 dev_info(DEV, "drbd_sync_handshake:\n");
2548 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2549 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2550 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2552 hg = drbd_uuid_compare(mdev, &rule_nr);
2554 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2557 dev_alert(DEV, "Unrelated data, aborting!\n");
2561 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2565 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2566 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2567 int f = (hg == -100) || abs(hg) == 2;
2568 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2571 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2572 hg > 0 ? "source" : "target");
2576 drbd_khelper(mdev, "initial-split-brain");
2578 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2579 int pcount = (mdev->state.role == R_PRIMARY)
2580 + (peer_role == R_PRIMARY);
2581 int forced = (hg == -100);
2585 hg = drbd_asb_recover_0p(mdev);
2588 hg = drbd_asb_recover_1p(mdev);
2591 hg = drbd_asb_recover_2p(mdev);
2594 if (abs(hg) < 100) {
2595 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2596 "automatically solved. Sync from %s node\n",
2597 pcount, (hg < 0) ? "peer" : "this");
2599 dev_warn(DEV, "Doing a full sync, since"
2600 " UUIDs where ambiguous.\n");
2607 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2609 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2613 dev_warn(DEV, "Split-Brain detected, manually solved. "
2614 "Sync from %s node\n",
2615 (hg < 0) ? "peer" : "this");
2619 /* FIXME this log message is not correct if we end up here
2620 * after an attempted attach on a diskless node.
2621 * We just refuse to attach -- well, we drop the "connection"
2622 * to that disk, in a way... */
2623 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2624 drbd_khelper(mdev, "split-brain");
2628 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2629 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2633 if (hg < 0 && /* by intention we do not use mydisk here. */
2634 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2635 switch (mdev->net_conf->rr_conflict) {
2636 case ASB_CALL_HELPER:
2637 drbd_khelper(mdev, "pri-lost");
2639 case ASB_DISCONNECT:
2640 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2643 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2648 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2650 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2652 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2653 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2654 abs(hg) >= 2 ? "full" : "bit-map based");
2659 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2660 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2661 BM_LOCKED_SET_ALLOWED))
2665 if (hg > 0) { /* become sync source. */
2667 } else if (hg < 0) { /* become sync target */
2671 if (drbd_bm_total_weight(mdev)) {
2672 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2673 drbd_bm_total_weight(mdev));
2680 /* returns 1 if invalid */
2681 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2683 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2684 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2685 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2688 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2689 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2690 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2693 /* everything else is valid if they are equal on both sides. */
2697 /* everything es is invalid. */
2701 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2703 struct p_protocol *p = &mdev->data.rbuf.protocol;
2704 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2705 int p_want_lose, p_two_primaries, cf;
2706 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2708 p_proto = be32_to_cpu(p->protocol);
2709 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2710 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2711 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2712 p_two_primaries = be32_to_cpu(p->two_primaries);
2713 cf = be32_to_cpu(p->conn_flags);
2714 p_want_lose = cf & CF_WANT_LOSE;
2716 clear_bit(CONN_DRY_RUN, &mdev->flags);
2718 if (cf & CF_DRY_RUN)
2719 set_bit(CONN_DRY_RUN, &mdev->flags);
2721 if (p_proto != mdev->net_conf->wire_protocol) {
2722 dev_err(DEV, "incompatible communication protocols\n");
2726 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2727 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2731 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2732 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2736 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2737 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2741 if (p_want_lose && mdev->net_conf->want_lose) {
2742 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2746 if (p_two_primaries != mdev->net_conf->two_primaries) {
2747 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2751 if (mdev->agreed_pro_version >= 87) {
2752 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2754 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2757 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2758 if (strcmp(p_integrity_alg, my_alg)) {
2759 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2762 dev_info(DEV, "data-integrity-alg: %s\n",
2763 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2769 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2774 * input: alg name, feature name
2775 * return: NULL (alg name was "")
2776 * ERR_PTR(error) if something goes wrong
2777 * or the crypto hash ptr, if it worked out ok. */
2778 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2779 const char *alg, const char *name)
2781 struct crypto_hash *tfm;
2786 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2788 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2789 alg, name, PTR_ERR(tfm));
2792 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2793 crypto_free_hash(tfm);
2794 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2795 return ERR_PTR(-EINVAL);
2800 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2803 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2804 unsigned int header_size, data_size, exp_max_sz;
2805 struct crypto_hash *verify_tfm = NULL;
2806 struct crypto_hash *csums_tfm = NULL;
2807 const int apv = mdev->agreed_pro_version;
2808 int *rs_plan_s = NULL;
2811 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2812 : apv == 88 ? sizeof(struct p_rs_param)
2814 : apv <= 94 ? sizeof(struct p_rs_param_89)
2815 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2817 if (packet_size > exp_max_sz) {
2818 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2819 packet_size, exp_max_sz);
2824 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2825 data_size = packet_size - header_size;
2826 } else if (apv <= 94) {
2827 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2828 data_size = packet_size - header_size;
2829 D_ASSERT(data_size == 0);
2831 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2832 data_size = packet_size - header_size;
2833 D_ASSERT(data_size == 0);
2836 /* initialize verify_alg and csums_alg */
2837 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2839 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2842 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2846 if (data_size > SHARED_SECRET_MAX) {
2847 dev_err(DEV, "verify-alg too long, "
2848 "peer wants %u, accepting only %u byte\n",
2849 data_size, SHARED_SECRET_MAX);
2853 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2856 /* we expect NUL terminated string */
2857 /* but just in case someone tries to be evil */
2858 D_ASSERT(p->verify_alg[data_size-1] == 0);
2859 p->verify_alg[data_size-1] = 0;
2861 } else /* apv >= 89 */ {
2862 /* we still expect NUL terminated strings */
2863 /* but just in case someone tries to be evil */
2864 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2865 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2866 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2867 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2870 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2871 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2872 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2873 mdev->sync_conf.verify_alg, p->verify_alg);
2876 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2877 p->verify_alg, "verify-alg");
2878 if (IS_ERR(verify_tfm)) {
2884 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2885 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2886 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2887 mdev->sync_conf.csums_alg, p->csums_alg);
2890 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2891 p->csums_alg, "csums-alg");
2892 if (IS_ERR(csums_tfm)) {
2899 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2900 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2901 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2902 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2903 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2905 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2906 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2907 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2909 dev_err(DEV, "kmalloc of fifo_buffer failed");
2915 spin_lock(&mdev->peer_seq_lock);
2916 /* lock against drbd_nl_syncer_conf() */
2918 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2919 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2920 crypto_free_hash(mdev->verify_tfm);
2921 mdev->verify_tfm = verify_tfm;
2922 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2925 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2926 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2927 crypto_free_hash(mdev->csums_tfm);
2928 mdev->csums_tfm = csums_tfm;
2929 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2931 if (fifo_size != mdev->rs_plan_s.size) {
2932 kfree(mdev->rs_plan_s.values);
2933 mdev->rs_plan_s.values = rs_plan_s;
2934 mdev->rs_plan_s.size = fifo_size;
2935 mdev->rs_planed = 0;
2937 spin_unlock(&mdev->peer_seq_lock);
2942 /* just for completeness: actually not needed,
2943 * as this is not reached if csums_tfm was ok. */
2944 crypto_free_hash(csums_tfm);
2945 /* but free the verify_tfm again, if csums_tfm did not work out */
2946 crypto_free_hash(verify_tfm);
2947 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2951 /* warn if the arguments differ by more than 12.5% */
2952 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2953 const char *s, sector_t a, sector_t b)
2956 if (a == 0 || b == 0)
2958 d = (a > b) ? (a - b) : (b - a);
2959 if (d > (a>>3) || d > (b>>3))
2960 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2961 (unsigned long long)a, (unsigned long long)b);
2964 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2966 struct p_sizes *p = &mdev->data.rbuf.sizes;
2967 enum determine_dev_size dd = unchanged;
2968 sector_t p_size, p_usize, my_usize;
2969 int ldsc = 0; /* local disk size changed */
2970 enum dds_flags ddsf;
2972 p_size = be64_to_cpu(p->d_size);
2973 p_usize = be64_to_cpu(p->u_size);
2975 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2976 dev_err(DEV, "some backing storage is needed\n");
2977 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2981 /* just store the peer's disk size for now.
2982 * we still need to figure out whether we accept that. */
2983 mdev->p_size = p_size;
2985 if (get_ldev(mdev)) {
2986 warn_if_differ_considerably(mdev, "lower level device sizes",
2987 p_size, drbd_get_max_capacity(mdev->ldev));
2988 warn_if_differ_considerably(mdev, "user requested size",
2989 p_usize, mdev->ldev->dc.disk_size);
2991 /* if this is the first connect, or an otherwise expected
2992 * param exchange, choose the minimum */
2993 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2994 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2997 my_usize = mdev->ldev->dc.disk_size;
2999 if (mdev->ldev->dc.disk_size != p_usize) {
3000 mdev->ldev->dc.disk_size = p_usize;
3001 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3002 (unsigned long)mdev->ldev->dc.disk_size);
3005 /* Never shrink a device with usable data during connect.
3006 But allow online shrinking if we are connected. */
3007 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
3008 drbd_get_capacity(mdev->this_bdev) &&
3009 mdev->state.disk >= D_OUTDATED &&
3010 mdev->state.conn < C_CONNECTED) {
3011 dev_err(DEV, "The peer's disk size is too small!\n");
3012 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3013 mdev->ldev->dc.disk_size = my_usize;
3020 ddsf = be16_to_cpu(p->dds_flags);
3021 if (get_ldev(mdev)) {
3022 dd = drbd_determine_dev_size(mdev, ddsf);
3024 if (dd == dev_size_error)
3028 /* I am diskless, need to accept the peer's size. */
3029 drbd_set_my_capacity(mdev, p_size);
3032 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3033 drbd_reconsider_max_bio_size(mdev);
3035 if (get_ldev(mdev)) {
3036 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3037 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3044 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3045 if (be64_to_cpu(p->c_size) !=
3046 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3047 /* we have different sizes, probably peer
3048 * needs to know my new size... */
3049 drbd_send_sizes(mdev, 0, ddsf);
3051 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3052 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3053 if (mdev->state.pdsk >= D_INCONSISTENT &&
3054 mdev->state.disk >= D_INCONSISTENT) {
3055 if (ddsf & DDSF_NO_RESYNC)
3056 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3058 resync_after_online_grow(mdev);
3060 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3067 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3069 struct p_uuids *p = &mdev->data.rbuf.uuids;
3071 int i, updated_uuids = 0;
3073 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3075 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3076 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3078 kfree(mdev->p_uuid);
3079 mdev->p_uuid = p_uuid;
3081 if (mdev->state.conn < C_CONNECTED &&
3082 mdev->state.disk < D_INCONSISTENT &&
3083 mdev->state.role == R_PRIMARY &&
3084 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3085 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3086 (unsigned long long)mdev->ed_uuid);
3087 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3091 if (get_ldev(mdev)) {
3092 int skip_initial_sync =
3093 mdev->state.conn == C_CONNECTED &&
3094 mdev->agreed_pro_version >= 90 &&
3095 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3096 (p_uuid[UI_FLAGS] & 8);
3097 if (skip_initial_sync) {
3098 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3099 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3100 "clear_n_write from receive_uuids",
3101 BM_LOCKED_TEST_ALLOWED);
3102 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3103 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3104 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3110 } else if (mdev->state.disk < D_INCONSISTENT &&
3111 mdev->state.role == R_PRIMARY) {
3112 /* I am a diskless primary, the peer just created a new current UUID
3114 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3117 /* Before we test for the disk state, we should wait until an eventually
3118 ongoing cluster wide state change is finished. That is important if
3119 we are primary and are detaching from our disk. We need to see the
3120 new disk state... */
3121 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3122 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3123 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3126 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3132 * convert_state() - Converts the peer's view of the cluster state to our point of view
3133 * @ps: The state as seen by the peer.
3135 static union drbd_state convert_state(union drbd_state ps)
3137 union drbd_state ms;
3139 static enum drbd_conns c_tab[] = {
3140 [C_CONNECTED] = C_CONNECTED,
3142 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3143 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3144 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3145 [C_VERIFY_S] = C_VERIFY_T,
3151 ms.conn = c_tab[ps.conn];
3156 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3161 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3163 struct p_req_state *p = &mdev->data.rbuf.req_state;
3164 union drbd_state mask, val;
3165 enum drbd_state_rv rv;
3167 mask.i = be32_to_cpu(p->mask);
3168 val.i = be32_to_cpu(p->val);
3170 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3171 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3172 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3176 mask = convert_state(mask);
3177 val = convert_state(val);
3179 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3181 drbd_send_sr_reply(mdev, rv);
3187 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3189 struct p_state *p = &mdev->data.rbuf.state;
3190 union drbd_state os, ns, peer_state;
3191 enum drbd_disk_state real_peer_disk;
3192 enum chg_state_flags cs_flags;
3195 peer_state.i = be32_to_cpu(p->state);
3197 real_peer_disk = peer_state.disk;
3198 if (peer_state.disk == D_NEGOTIATING) {
3199 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3200 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3203 spin_lock_irq(&mdev->req_lock);
3205 os = ns = mdev->state;
3206 spin_unlock_irq(&mdev->req_lock);
3208 /* peer says his disk is uptodate, while we think it is inconsistent,
3209 * and this happens while we think we have a sync going on. */
3210 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3211 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3212 /* If we are (becoming) SyncSource, but peer is still in sync
3213 * preparation, ignore its uptodate-ness to avoid flapping, it
3214 * will change to inconsistent once the peer reaches active
3216 * It may have changed syncer-paused flags, however, so we
3217 * cannot ignore this completely. */
3218 if (peer_state.conn > C_CONNECTED &&
3219 peer_state.conn < C_SYNC_SOURCE)
3220 real_peer_disk = D_INCONSISTENT;
3222 /* if peer_state changes to connected at the same time,
3223 * it explicitly notifies us that it finished resync.
3224 * Maybe we should finish it up, too? */
3225 else if (os.conn >= C_SYNC_SOURCE &&
3226 peer_state.conn == C_CONNECTED) {
3227 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3228 drbd_resync_finished(mdev);
3233 /* peer says his disk is inconsistent, while we think it is uptodate,
3234 * and this happens while the peer still thinks we have a sync going on,
3235 * but we think we are already done with the sync.
3236 * We ignore this to avoid flapping pdsk.
3237 * This should not happen, if the peer is a recent version of drbd. */
3238 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3239 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3240 real_peer_disk = D_UP_TO_DATE;
3242 if (ns.conn == C_WF_REPORT_PARAMS)
3243 ns.conn = C_CONNECTED;
3245 if (peer_state.conn == C_AHEAD)
3248 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3249 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3250 int cr; /* consider resync */
3252 /* if we established a new connection */
3253 cr = (os.conn < C_CONNECTED);
3254 /* if we had an established connection
3255 * and one of the nodes newly attaches a disk */
3256 cr |= (os.conn == C_CONNECTED &&
3257 (peer_state.disk == D_NEGOTIATING ||
3258 os.disk == D_NEGOTIATING));
3259 /* if we have both been inconsistent, and the peer has been
3260 * forced to be UpToDate with --overwrite-data */
3261 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3262 /* if we had been plain connected, and the admin requested to
3263 * start a sync by "invalidate" or "invalidate-remote" */
3264 cr |= (os.conn == C_CONNECTED &&
3265 (peer_state.conn >= C_STARTING_SYNC_S &&
3266 peer_state.conn <= C_WF_BITMAP_T));
3269 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3272 if (ns.conn == C_MASK) {
3273 ns.conn = C_CONNECTED;
3274 if (mdev->state.disk == D_NEGOTIATING) {
3275 drbd_force_state(mdev, NS(disk, D_FAILED));
3276 } else if (peer_state.disk == D_NEGOTIATING) {
3277 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3278 peer_state.disk = D_DISKLESS;
3279 real_peer_disk = D_DISKLESS;
3281 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3283 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3284 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3290 spin_lock_irq(&mdev->req_lock);
3291 if (mdev->state.i != os.i)
3293 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3294 ns.peer = peer_state.role;
3295 ns.pdsk = real_peer_disk;
3296 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3297 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3298 ns.disk = mdev->new_state_tmp.disk;
3299 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3300 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3301 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3302 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3303 for temporal network outages! */
3304 spin_unlock_irq(&mdev->req_lock);
3305 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3307 drbd_uuid_new_current(mdev);
3308 clear_bit(NEW_CUR_UUID, &mdev->flags);
3309 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3312 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3314 spin_unlock_irq(&mdev->req_lock);
3316 if (rv < SS_SUCCESS) {
3317 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3321 if (os.conn > C_WF_REPORT_PARAMS) {
3322 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3323 peer_state.disk != D_NEGOTIATING ) {
3324 /* we want resync, peer has not yet decided to sync... */
3325 /* Nowadays only used when forcing a node into primary role and
3326 setting its disk to UpToDate with that */
3327 drbd_send_uuids(mdev);
3328 drbd_send_state(mdev);
3332 mdev->net_conf->want_lose = 0;
3334 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3339 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3341 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3343 wait_event(mdev->misc_wait,
3344 mdev->state.conn == C_WF_SYNC_UUID ||
3345 mdev->state.conn == C_BEHIND ||
3346 mdev->state.conn < C_CONNECTED ||
3347 mdev->state.disk < D_NEGOTIATING);
3349 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3351 /* Here the _drbd_uuid_ functions are right, current should
3352 _not_ be rotated into the history */
3353 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3354 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3355 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3357 drbd_print_uuids(mdev, "updated sync uuid");
3358 drbd_start_resync(mdev, C_SYNC_TARGET);
3362 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3368 * receive_bitmap_plain
3370 * Return 0 when done, 1 when another iteration is needed, and a negative error
3371 * code upon failure.
3374 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3375 unsigned long *buffer, struct bm_xfer_ctx *c)
3377 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3378 unsigned want = num_words * sizeof(long);
3381 if (want != data_size) {
3382 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3387 err = drbd_recv(mdev, buffer, want);
3394 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3396 c->word_offset += num_words;
3397 c->bit_offset = c->word_offset * BITS_PER_LONG;
3398 if (c->bit_offset > c->bm_bits)
3399 c->bit_offset = c->bm_bits;
3407 * Return 0 when done, 1 when another iteration is needed, and a negative error
3408 * code upon failure.
3411 recv_bm_rle_bits(struct drbd_conf *mdev,
3412 struct p_compressed_bm *p,
3413 struct bm_xfer_ctx *c)
3415 struct bitstream bs;
3419 unsigned long s = c->bit_offset;
3421 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3422 int toggle = DCBP_get_start(p);
3426 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3428 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3432 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3433 bits = vli_decode_bits(&rl, look_ahead);
3439 if (e >= c->bm_bits) {
3440 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3443 _drbd_bm_set_bits(mdev, s, e);
3447 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3448 have, bits, look_ahead,
3449 (unsigned int)(bs.cur.b - p->code),
3450 (unsigned int)bs.buf_len);
3453 look_ahead >>= bits;
3456 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3459 look_ahead |= tmp << have;
3464 bm_xfer_ctx_bit_to_word_offset(c);
3466 return (s != c->bm_bits);
3472 * Return 0 when done, 1 when another iteration is needed, and a negative error
3473 * code upon failure.
3476 decode_bitmap_c(struct drbd_conf *mdev,
3477 struct p_compressed_bm *p,
3478 struct bm_xfer_ctx *c)
3480 if (DCBP_get_code(p) == RLE_VLI_Bits)
3481 return recv_bm_rle_bits(mdev, p, c);
3483 /* other variants had been implemented for evaluation,
3484 * but have been dropped as this one turned out to be "best"
3485 * during all our tests. */
3487 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3488 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3492 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3493 const char *direction, struct bm_xfer_ctx *c)
3495 /* what would it take to transfer it "plaintext" */
3496 unsigned plain = sizeof(struct p_header80) *
3497 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3498 + c->bm_words * sizeof(long);
3499 unsigned total = c->bytes[0] + c->bytes[1];
3502 /* total can not be zero. but just in case: */
3506 /* don't report if not compressed */
3510 /* total < plain. check for overflow, still */
3511 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3512 : (1000 * total / plain);
3518 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3519 "total %u; compression: %u.%u%%\n",
3521 c->bytes[1], c->packets[1],
3522 c->bytes[0], c->packets[0],
3523 total, r/10, r % 10);
3526 /* Since we are processing the bitfield from lower addresses to higher,
3527 it does not matter if the process it in 32 bit chunks or 64 bit
3528 chunks as long as it is little endian. (Understand it as byte stream,
3529 beginning with the lowest byte...) If we would use big endian
3530 we would need to process it from the highest address to the lowest,
3531 in order to be agnostic to the 32 vs 64 bits issue.
3533 returns 0 on failure, 1 if we successfully received it. */
3534 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3536 struct bm_xfer_ctx c;
3540 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3542 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3543 /* you are supposed to send additional out-of-sync information
3544 * if you actually set bits during this phase */
3546 /* maybe we should use some per thread scratch page,
3547 * and allocate that during initial device creation? */
3548 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3550 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3554 c = (struct bm_xfer_ctx) {
3555 .bm_bits = drbd_bm_bits(mdev),
3556 .bm_words = drbd_bm_words(mdev),
3560 if (cmd == P_BITMAP) {
3561 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
3562 } else if (cmd == P_COMPRESSED_BITMAP) {
3563 /* MAYBE: sanity check that we speak proto >= 90,
3564 * and the feature is enabled! */
3565 struct p_compressed_bm *p;
3567 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3568 dev_err(DEV, "ReportCBitmap packet too large\n");
3571 /* use the page buff */
3573 memcpy(p, h, sizeof(*h));
3574 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3576 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3577 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3580 err = decode_bitmap_c(mdev, p, &c);
3582 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3586 c.packets[cmd == P_BITMAP]++;
3587 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3594 if (!drbd_recv_header(mdev, &cmd, &data_size))
3598 INFO_bm_xfer_stats(mdev, "receive", &c);
3600 if (mdev->state.conn == C_WF_BITMAP_T) {
3601 enum drbd_state_rv rv;
3603 ok = !drbd_send_bitmap(mdev);
3606 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3607 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3608 D_ASSERT(rv == SS_SUCCESS);
3609 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3610 /* admin may have requested C_DISCONNECTING,
3611 * other threads may have noticed network errors */
3612 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3613 drbd_conn_str(mdev->state.conn));
3618 drbd_bm_unlock(mdev);
3619 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3620 drbd_start_resync(mdev, C_SYNC_SOURCE);
3621 free_page((unsigned long) buffer);
3625 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3627 /* TODO zero copy sink :) */
3628 static char sink[128];
3631 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3636 want = min_t(int, size, sizeof(sink));
3637 r = drbd_recv(mdev, sink, want);
3638 ERR_IF(r <= 0) break;
3644 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3646 /* Make sure we've acked all the TCP data associated
3647 * with the data requests being unplugged */
3648 drbd_tcp_quickack(mdev->data.socket);
3653 static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3655 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3657 switch (mdev->state.conn) {
3658 case C_WF_SYNC_UUID:
3663 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3664 drbd_conn_str(mdev->state.conn));
3667 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3672 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3677 drbd_cmd_handler_f function;
3680 static struct data_cmd drbd_cmd_handler[] = {
3681 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3682 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3683 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3684 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3685 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3686 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3687 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3688 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3689 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3690 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3691 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3692 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3693 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3694 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3695 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3696 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3697 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3698 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3699 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3700 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3701 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3702 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
3703 /* anything missing from this table is in
3704 * the asender_tbl, see get_asender_cmd */
3705 [P_MAX_CMD] = { 0, 0, NULL },
3708 /* All handler functions that expect a sub-header get that sub-heder in
3709 mdev->data.rbuf.header.head.payload.
3711 Usually in mdev->data.rbuf.header.head the callback can find the usual
3712 p_header, but they may not rely on that. Since there is also p_header95 !
3715 static void drbdd(struct drbd_conf *mdev)
3717 union p_header *header = &mdev->data.rbuf.header;
3718 unsigned int packet_size;
3719 enum drbd_packets cmd;
3720 size_t shs; /* sub header size */
3723 while (get_t_state(&mdev->receiver) == Running) {
3724 drbd_thread_current_set_cpu(mdev);
3725 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3728 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3729 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3733 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3734 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3735 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3740 rv = drbd_recv(mdev, &header->h80.payload, shs);
3741 if (unlikely(rv != shs)) {
3742 if (!signal_pending(current))
3743 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
3748 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3750 if (unlikely(!rv)) {
3751 dev_err(DEV, "error receiving %s, l: %d!\n",
3752 cmdname(cmd), packet_size);
3759 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3761 /* If we leave here, we probably want to update at least the
3762 * "Connected" indicator on stable storage. Do so explicitly here. */
3766 void drbd_flush_workqueue(struct drbd_conf *mdev)
3768 struct drbd_wq_barrier barr;
3770 barr.w.cb = w_prev_work_done;
3771 init_completion(&barr.done);
3772 drbd_queue_work(&mdev->data.work, &barr.w);
3773 wait_for_completion(&barr.done);
3776 static void drbd_disconnect(struct drbd_conf *mdev)
3778 enum drbd_fencing_p fp;
3779 union drbd_state os, ns;
3780 int rv = SS_UNKNOWN_ERROR;
3783 if (mdev->state.conn == C_STANDALONE)
3786 /* asender does not clean up anything. it must not interfere, either */
3787 drbd_thread_stop(&mdev->asender);
3788 drbd_free_sock(mdev);
3790 /* wait for current activity to cease. */
3791 spin_lock_irq(&mdev->req_lock);
3792 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3793 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3794 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3795 spin_unlock_irq(&mdev->req_lock);
3797 /* We do not have data structures that would allow us to
3798 * get the rs_pending_cnt down to 0 again.
3799 * * On C_SYNC_TARGET we do not have any data structures describing
3800 * the pending RSDataRequest's we have sent.
3801 * * On C_SYNC_SOURCE there is no data structure that tracks
3802 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3803 * And no, it is not the sum of the reference counts in the
3804 * resync_LRU. The resync_LRU tracks the whole operation including
3805 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3807 drbd_rs_cancel_all(mdev);
3809 mdev->rs_failed = 0;
3810 atomic_set(&mdev->rs_pending_cnt, 0);
3811 wake_up(&mdev->misc_wait);
3813 del_timer(&mdev->request_timer);
3815 /* make sure syncer is stopped and w_resume_next_sg queued */
3816 del_timer_sync(&mdev->resync_timer);
3817 resync_timer_fn((unsigned long)mdev);
3819 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3820 * w_make_resync_request etc. which may still be on the worker queue
3821 * to be "canceled" */
3822 drbd_flush_workqueue(mdev);
3824 /* This also does reclaim_net_ee(). If we do this too early, we might
3825 * miss some resync ee and pages.*/
3826 drbd_process_done_ee(mdev);
3828 kfree(mdev->p_uuid);
3829 mdev->p_uuid = NULL;
3831 if (!is_susp(mdev->state))
3834 dev_info(DEV, "Connection closed\n");
3839 if (get_ldev(mdev)) {
3840 fp = mdev->ldev->dc.fencing;
3844 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3845 drbd_try_outdate_peer_async(mdev);
3847 spin_lock_irq(&mdev->req_lock);
3849 if (os.conn >= C_UNCONNECTED) {
3850 /* Do not restart in case we are C_DISCONNECTING */
3852 ns.conn = C_UNCONNECTED;
3853 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3855 spin_unlock_irq(&mdev->req_lock);
3857 if (os.conn == C_DISCONNECTING) {
3858 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3860 crypto_free_hash(mdev->cram_hmac_tfm);
3861 mdev->cram_hmac_tfm = NULL;
3863 kfree(mdev->net_conf);
3864 mdev->net_conf = NULL;
3865 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3868 /* serialize with bitmap writeout triggered by the state change,
3870 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3872 /* tcp_close and release of sendpage pages can be deferred. I don't
3873 * want to use SO_LINGER, because apparently it can be deferred for
3874 * more than 20 seconds (longest time I checked).
3876 * Actually we don't care for exactly when the network stack does its
3877 * put_page(), but release our reference on these pages right here.
3879 i = drbd_release_ee(mdev, &mdev->net_ee);
3881 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3882 i = atomic_read(&mdev->pp_in_use_by_net);
3884 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3885 i = atomic_read(&mdev->pp_in_use);
3887 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3889 D_ASSERT(list_empty(&mdev->read_ee));
3890 D_ASSERT(list_empty(&mdev->active_ee));
3891 D_ASSERT(list_empty(&mdev->sync_ee));
3892 D_ASSERT(list_empty(&mdev->done_ee));
3894 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3895 atomic_set(&mdev->current_epoch->epoch_size, 0);
3896 D_ASSERT(list_empty(&mdev->current_epoch->list));
3900 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3901 * we can agree on is stored in agreed_pro_version.
3903 * feature flags and the reserved array should be enough room for future
3904 * enhancements of the handshake protocol, and possible plugins...
3906 * for now, they are expected to be zero, but ignored.
3908 static int drbd_send_handshake(struct drbd_conf *mdev)
3910 /* ASSERT current == mdev->receiver ... */
3911 struct p_handshake *p = &mdev->data.sbuf.handshake;
3914 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3915 dev_err(DEV, "interrupted during initial handshake\n");
3916 return 0; /* interrupted. not ok. */
3919 if (mdev->data.socket == NULL) {
3920 mutex_unlock(&mdev->data.mutex);
3924 memset(p, 0, sizeof(*p));
3925 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3926 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3927 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3928 (struct p_header80 *)p, sizeof(*p), 0 );
3929 mutex_unlock(&mdev->data.mutex);
3935 * 1 yes, we have a valid connection
3936 * 0 oops, did not work out, please try again
3937 * -1 peer talks different language,
3938 * no point in trying again, please go standalone.
3940 static int drbd_do_handshake(struct drbd_conf *mdev)
3942 /* ASSERT current == mdev->receiver ... */
3943 struct p_handshake *p = &mdev->data.rbuf.handshake;
3944 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3945 unsigned int length;
3946 enum drbd_packets cmd;
3949 rv = drbd_send_handshake(mdev);
3953 rv = drbd_recv_header(mdev, &cmd, &length);
3957 if (cmd != P_HAND_SHAKE) {
3958 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3963 if (length != expect) {
3964 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3969 rv = drbd_recv(mdev, &p->head.payload, expect);
3972 if (!signal_pending(current))
3973 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
3977 p->protocol_min = be32_to_cpu(p->protocol_min);
3978 p->protocol_max = be32_to_cpu(p->protocol_max);
3979 if (p->protocol_max == 0)
3980 p->protocol_max = p->protocol_min;
3982 if (PRO_VERSION_MAX < p->protocol_min ||
3983 PRO_VERSION_MIN > p->protocol_max)
3986 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3988 dev_info(DEV, "Handshake successful: "
3989 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3994 dev_err(DEV, "incompatible DRBD dialects: "
3995 "I support %d-%d, peer supports %d-%d\n",
3996 PRO_VERSION_MIN, PRO_VERSION_MAX,
3997 p->protocol_min, p->protocol_max);
4001 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4002 static int drbd_do_auth(struct drbd_conf *mdev)
4004 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4005 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4009 #define CHALLENGE_LEN 64
4013 0 - failed, try again (network error),
4014 -1 - auth failed, don't try again.
4017 static int drbd_do_auth(struct drbd_conf *mdev)
4019 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4020 struct scatterlist sg;
4021 char *response = NULL;
4022 char *right_response = NULL;
4023 char *peers_ch = NULL;
4024 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4025 unsigned int resp_size;
4026 struct hash_desc desc;
4027 enum drbd_packets cmd;
4028 unsigned int length;
4031 desc.tfm = mdev->cram_hmac_tfm;
4034 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4035 (u8 *)mdev->net_conf->shared_secret, key_len);
4037 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4042 get_random_bytes(my_challenge, CHALLENGE_LEN);
4044 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4048 rv = drbd_recv_header(mdev, &cmd, &length);
4052 if (cmd != P_AUTH_CHALLENGE) {
4053 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4059 if (length > CHALLENGE_LEN * 2) {
4060 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4065 peers_ch = kmalloc(length, GFP_NOIO);
4066 if (peers_ch == NULL) {
4067 dev_err(DEV, "kmalloc of peers_ch failed\n");
4072 rv = drbd_recv(mdev, peers_ch, length);
4075 if (!signal_pending(current))
4076 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
4081 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4082 response = kmalloc(resp_size, GFP_NOIO);
4083 if (response == NULL) {
4084 dev_err(DEV, "kmalloc of response failed\n");
4089 sg_init_table(&sg, 1);
4090 sg_set_buf(&sg, peers_ch, length);
4092 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4094 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4099 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4103 rv = drbd_recv_header(mdev, &cmd, &length);
4107 if (cmd != P_AUTH_RESPONSE) {
4108 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4114 if (length != resp_size) {
4115 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4120 rv = drbd_recv(mdev, response , resp_size);
4122 if (rv != resp_size) {
4123 if (!signal_pending(current))
4124 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4129 right_response = kmalloc(resp_size, GFP_NOIO);
4130 if (right_response == NULL) {
4131 dev_err(DEV, "kmalloc of right_response failed\n");
4136 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4138 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4140 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4145 rv = !memcmp(response, right_response, resp_size);
4148 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4149 resp_size, mdev->net_conf->cram_hmac_alg);
4156 kfree(right_response);
4162 int drbdd_init(struct drbd_thread *thi)
4164 struct drbd_conf *mdev = thi->mdev;
4165 unsigned int minor = mdev_to_minor(mdev);
4168 sprintf(current->comm, "drbd%d_receiver", minor);
4170 dev_info(DEV, "receiver (re)started\n");
4173 h = drbd_connect(mdev);
4175 drbd_disconnect(mdev);
4176 schedule_timeout_interruptible(HZ);
4179 dev_warn(DEV, "Discarding network configuration.\n");
4180 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4185 if (get_net_conf(mdev)) {
4191 drbd_disconnect(mdev);
4193 dev_info(DEV, "receiver terminated\n");
4197 /* ********* acknowledge sender ******** */
4199 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4201 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4203 int retcode = be32_to_cpu(p->retcode);
4205 if (retcode >= SS_SUCCESS) {
4206 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4208 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4209 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4210 drbd_set_st_err_str(retcode), retcode);
4212 wake_up(&mdev->state_wait);
4217 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4219 return drbd_send_ping_ack(mdev);
4223 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4225 /* restore idle timeout */
4226 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4227 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4228 wake_up(&mdev->misc_wait);
4233 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4235 struct p_block_ack *p = (struct p_block_ack *)h;
4236 sector_t sector = be64_to_cpu(p->sector);
4237 int blksize = be32_to_cpu(p->blksize);
4239 D_ASSERT(mdev->agreed_pro_version >= 89);
4241 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4243 if (get_ldev(mdev)) {
4244 drbd_rs_complete_io(mdev, sector);
4245 drbd_set_in_sync(mdev, sector, blksize);
4246 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4247 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4250 dec_rs_pending(mdev);
4251 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4256 /* when we receive the ACK for a write request,
4257 * verify that we actually know about it */
4258 static struct drbd_request *ack_id_to_req(struct drbd_conf *mdev, u64 id,
4261 return find_request(mdev, tl_hash_slot, id, sector, __func__);
4264 static int validate_req_change_req_state(struct drbd_conf *mdev,
4265 u64 id, sector_t sector,
4266 struct drbd_request *(*validator)(struct drbd_conf *, u64, sector_t),
4267 const char *func, enum drbd_req_event what)
4269 struct drbd_request *req;
4270 struct bio_and_error m;
4272 spin_lock_irq(&mdev->req_lock);
4273 req = validator(mdev, id, sector);
4274 if (unlikely(!req)) {
4275 spin_unlock_irq(&mdev->req_lock);
4277 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4278 (void *)(unsigned long)id, (unsigned long long)sector);
4281 __req_mod(req, what, &m);
4282 spin_unlock_irq(&mdev->req_lock);
4285 complete_master_bio(mdev, &m);
4289 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4291 struct p_block_ack *p = (struct p_block_ack *)h;
4292 sector_t sector = be64_to_cpu(p->sector);
4293 int blksize = be32_to_cpu(p->blksize);
4294 enum drbd_req_event what;
4296 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4298 if (p->block_id == ID_SYNCER) {
4299 drbd_set_in_sync(mdev, sector, blksize);
4300 dec_rs_pending(mdev);
4303 switch (be16_to_cpu(h->command)) {
4304 case P_RS_WRITE_ACK:
4305 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4306 what = write_acked_by_peer_and_sis;
4309 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4310 what = write_acked_by_peer;
4313 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4314 what = recv_acked_by_peer;
4317 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4318 what = conflict_discarded_by_peer;
4325 return validate_req_change_req_state(mdev, p->block_id, sector,
4326 ack_id_to_req, __func__, what);
4329 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4331 struct p_block_ack *p = (struct p_block_ack *)h;
4332 sector_t sector = be64_to_cpu(p->sector);
4333 int size = be32_to_cpu(p->blksize);
4334 struct drbd_request *req;
4335 struct bio_and_error m;
4337 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4339 if (p->block_id == ID_SYNCER) {
4340 dec_rs_pending(mdev);
4341 drbd_rs_failed_io(mdev, sector, size);
4345 spin_lock_irq(&mdev->req_lock);
4346 req = ack_id_to_req(mdev, p->block_id, sector);
4348 spin_unlock_irq(&mdev->req_lock);
4349 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4350 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4351 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4352 The master bio might already be completed, therefore the
4353 request is no longer in the collision hash.
4354 => Do not try to validate block_id as request. */
4355 /* In Protocol B we might already have got a P_RECV_ACK
4356 but then get a P_NEG_ACK after wards. */
4357 drbd_set_out_of_sync(mdev, sector, size);
4360 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4361 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4365 __req_mod(req, neg_acked, &m);
4366 spin_unlock_irq(&mdev->req_lock);
4369 complete_master_bio(mdev, &m);
4373 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4375 struct p_block_ack *p = (struct p_block_ack *)h;
4376 sector_t sector = be64_to_cpu(p->sector);
4378 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4379 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4380 (unsigned long long)sector, be32_to_cpu(p->blksize));
4382 return validate_req_change_req_state(mdev, p->block_id, sector,
4383 ar_id_to_req, __func__ , neg_acked);
4386 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4390 struct p_block_ack *p = (struct p_block_ack *)h;
4392 sector = be64_to_cpu(p->sector);
4393 size = be32_to_cpu(p->blksize);
4395 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4397 dec_rs_pending(mdev);
4399 if (get_ldev_if_state(mdev, D_FAILED)) {
4400 drbd_rs_complete_io(mdev, sector);
4401 switch (be16_to_cpu(h->command)) {
4402 case P_NEG_RS_DREPLY:
4403 drbd_rs_failed_io(mdev, sector, size);
4417 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4419 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4421 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4423 if (mdev->state.conn == C_AHEAD &&
4424 atomic_read(&mdev->ap_in_flight) == 0 &&
4425 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4426 mdev->start_resync_timer.expires = jiffies + HZ;
4427 add_timer(&mdev->start_resync_timer);
4433 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4435 struct p_block_ack *p = (struct p_block_ack *)h;
4436 struct drbd_work *w;
4440 sector = be64_to_cpu(p->sector);
4441 size = be32_to_cpu(p->blksize);
4443 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4445 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4446 drbd_ov_oos_found(mdev, sector, size);
4450 if (!get_ldev(mdev))
4453 drbd_rs_complete_io(mdev, sector);
4454 dec_rs_pending(mdev);
4458 /* let's advance progress step marks only for every other megabyte */
4459 if ((mdev->ov_left & 0x200) == 0x200)
4460 drbd_advance_rs_marks(mdev, mdev->ov_left);
4462 if (mdev->ov_left == 0) {
4463 w = kmalloc(sizeof(*w), GFP_NOIO);
4465 w->cb = w_ov_finished;
4466 drbd_queue_work_front(&mdev->data.work, w);
4468 dev_err(DEV, "kmalloc(w) failed.");
4470 drbd_resync_finished(mdev);
4477 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4482 struct asender_cmd {
4484 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4487 static struct asender_cmd *get_asender_cmd(int cmd)
4489 static struct asender_cmd asender_tbl[] = {
4490 /* anything missing from this table is in
4491 * the drbd_cmd_handler (drbd_default_handler) table,
4492 * see the beginning of drbdd() */
4493 [P_PING] = { sizeof(struct p_header80), got_Ping },
4494 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4495 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4496 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4497 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4498 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4499 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4500 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4501 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4502 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4503 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4504 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4505 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4506 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4507 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
4508 [P_MAX_CMD] = { 0, NULL },
4510 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4512 return &asender_tbl[cmd];
4515 int drbd_asender(struct drbd_thread *thi)
4517 struct drbd_conf *mdev = thi->mdev;
4518 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4519 struct asender_cmd *cmd = NULL;
4524 int expect = sizeof(struct p_header80);
4526 int ping_timeout_active = 0;
4528 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4530 current->policy = SCHED_RR; /* Make this a realtime task! */
4531 current->rt_priority = 2; /* more important than all other tasks */
4533 while (get_t_state(thi) == Running) {
4534 drbd_thread_current_set_cpu(mdev);
4535 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4536 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4537 mdev->meta.socket->sk->sk_rcvtimeo =
4538 mdev->net_conf->ping_timeo*HZ/10;
4539 ping_timeout_active = 1;
4542 /* conditionally cork;
4543 * it may hurt latency if we cork without much to send */
4544 if (!mdev->net_conf->no_cork &&
4545 3 < atomic_read(&mdev->unacked_cnt))
4546 drbd_tcp_cork(mdev->meta.socket);
4548 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4549 flush_signals(current);
4550 if (!drbd_process_done_ee(mdev))
4552 /* to avoid race with newly queued ACKs */
4553 set_bit(SIGNAL_ASENDER, &mdev->flags);
4554 spin_lock_irq(&mdev->req_lock);
4555 empty = list_empty(&mdev->done_ee);
4556 spin_unlock_irq(&mdev->req_lock);
4557 /* new ack may have been queued right here,
4558 * but then there is also a signal pending,
4559 * and we start over... */
4563 /* but unconditionally uncork unless disabled */
4564 if (!mdev->net_conf->no_cork)
4565 drbd_tcp_uncork(mdev->meta.socket);
4567 /* short circuit, recv_msg would return EINTR anyways. */
4568 if (signal_pending(current))
4571 rv = drbd_recv_short(mdev, mdev->meta.socket,
4572 buf, expect-received, 0);
4573 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4575 flush_signals(current);
4578 * -EINTR (on meta) we got a signal
4579 * -EAGAIN (on meta) rcvtimeo expired
4580 * -ECONNRESET other side closed the connection
4581 * -ERESTARTSYS (on data) we got a signal
4582 * rv < 0 other than above: unexpected error!
4583 * rv == expected: full header or command
4584 * rv < expected: "woken" by signal during receive
4585 * rv == 0 : "connection shut down by peer"
4587 if (likely(rv > 0)) {
4590 } else if (rv == 0) {
4591 dev_err(DEV, "meta connection shut down by peer.\n");
4593 } else if (rv == -EAGAIN) {
4594 /* If the data socket received something meanwhile,
4595 * that is good enough: peer is still alive. */
4596 if (time_after(mdev->last_received,
4597 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4599 if (ping_timeout_active) {
4600 dev_err(DEV, "PingAck did not arrive in time.\n");
4603 set_bit(SEND_PING, &mdev->flags);
4605 } else if (rv == -EINTR) {
4608 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4612 if (received == expect && cmd == NULL) {
4613 if (unlikely(h->magic != cpu_to_be32(DRBD_MAGIC))) {
4614 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4615 be32_to_cpu(h->magic),
4616 be16_to_cpu(h->command),
4617 be16_to_cpu(h->length));
4620 cmd = get_asender_cmd(be16_to_cpu(h->command));
4621 len = be16_to_cpu(h->length);
4622 if (unlikely(cmd == NULL)) {
4623 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4624 be32_to_cpu(h->magic),
4625 be16_to_cpu(h->command),
4626 be16_to_cpu(h->length));
4629 expect = cmd->pkt_size;
4630 ERR_IF(len != expect-sizeof(struct p_header80))
4633 if (received == expect) {
4634 mdev->last_received = jiffies;
4635 D_ASSERT(cmd != NULL);
4636 if (!cmd->process(mdev, h))
4639 /* the idle_timeout (ping-int)
4640 * has been restored in got_PingAck() */
4641 if (cmd == get_asender_cmd(P_PING_ACK))
4642 ping_timeout_active = 0;
4646 expect = sizeof(struct p_header80);
4653 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4658 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4661 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4663 D_ASSERT(mdev->state.conn < C_CONNECTED);
4664 dev_info(DEV, "asender terminated\n");