4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf *mdev);
58 static int drbd_do_auth(struct drbd_conf *mdev);
60 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page *page_chain_del(struct page **head, int n)
89 tmp = page_chain_next(page);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page *page_chain_tail(struct page *page, int *len)
113 while ((tmp = page_chain_next(page)))
120 static int page_chain_free(struct page *page)
124 page_chain_for_each_safe(page, tmp) {
131 static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
145 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
147 struct page *page = NULL;
148 struct page *tmp = NULL;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant >= number) {
154 spin_lock(&drbd_pp_lock);
155 page = page_chain_del(&drbd_pp_pool, number);
157 drbd_pp_vacant -= number;
158 spin_unlock(&drbd_pp_lock);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
170 set_page_private(tmp, (unsigned long)page);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
185 spin_unlock(&drbd_pp_lock);
190 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
202 if (drbd_ee_has_active_page(e))
204 list_move(le, to_be_freed);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
235 struct page *page = NULL;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
243 while (page == NULL) {
244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
246 drbd_kick_lo_and_reclaim_net(mdev);
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait, &wait);
267 atomic_add(number, &mdev->pp_in_use);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page);
284 tmp = page_chain_tail(page, &i);
285 spin_lock(&drbd_pp_lock);
286 page_chain_add(&drbd_pp_pool, page, tmp);
288 spin_unlock(&drbd_pp_lock);
290 i = atomic_sub_return(i, a);
292 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
294 wake_up(&drbd_pp_wait);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
315 unsigned int data_size,
316 gfp_t gfp_mask) __must_hold(local)
318 struct drbd_epoch_entry *e;
320 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
322 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
325 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
327 if (!(gfp_mask & __GFP_NOWARN))
328 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
332 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
336 INIT_HLIST_NODE(&e->collision);
337 drbd_clear_interval(&e->i);
341 atomic_set(&e->pending_bios, 0);
342 e->i.size = data_size;
344 e->i.sector = sector;
346 * The block_id is opaque to the receiver. It is not endianness
347 * converted, and sent back to the sender unchanged.
354 mempool_free(e, drbd_ee_mempool);
358 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
360 if (e->flags & EE_HAS_DIGEST)
362 drbd_pp_free(mdev, e->pages, is_net);
363 D_ASSERT(atomic_read(&e->pending_bios) == 0);
364 D_ASSERT(hlist_unhashed(&e->collision));
365 D_ASSERT(drbd_interval_empty(&e->i));
366 mempool_free(e, drbd_ee_mempool);
369 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
371 LIST_HEAD(work_list);
372 struct drbd_epoch_entry *e, *t;
374 int is_net = list == &mdev->net_ee;
376 spin_lock_irq(&mdev->req_lock);
377 list_splice_init(list, &work_list);
378 spin_unlock_irq(&mdev->req_lock);
380 list_for_each_entry_safe(e, t, &work_list, w.list) {
381 drbd_free_some_ee(mdev, e, is_net);
389 * This function is called from _asender only_
390 * but see also comments in _req_mod(,barrier_acked)
391 * and receive_Barrier.
393 * Move entries from net_ee to done_ee, if ready.
394 * Grab done_ee, call all callbacks, free the entries.
395 * The callbacks typically send out ACKs.
397 static int drbd_process_done_ee(struct drbd_conf *mdev)
399 LIST_HEAD(work_list);
400 LIST_HEAD(reclaimed);
401 struct drbd_epoch_entry *e, *t;
402 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
404 spin_lock_irq(&mdev->req_lock);
405 reclaim_net_ee(mdev, &reclaimed);
406 list_splice_init(&mdev->done_ee, &work_list);
407 spin_unlock_irq(&mdev->req_lock);
409 list_for_each_entry_safe(e, t, &reclaimed, w.list)
410 drbd_free_net_ee(mdev, e);
412 /* possible callbacks here:
413 * e_end_block, and e_end_resync_block, e_send_discard_ack.
414 * all ignore the last argument.
416 list_for_each_entry_safe(e, t, &work_list, w.list) {
417 /* list_del not necessary, next/prev members not touched */
418 ok = e->w.cb(mdev, &e->w, !ok) && ok;
419 drbd_free_ee(mdev, e);
421 wake_up(&mdev->ee_wait);
426 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
430 /* avoids spin_lock/unlock
431 * and calling prepare_to_wait in the fast path */
432 while (!list_empty(head)) {
433 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
434 spin_unlock_irq(&mdev->req_lock);
436 finish_wait(&mdev->ee_wait, &wait);
437 spin_lock_irq(&mdev->req_lock);
441 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
443 spin_lock_irq(&mdev->req_lock);
444 _drbd_wait_ee_list_empty(mdev, head);
445 spin_unlock_irq(&mdev->req_lock);
448 /* see also kernel_accept; which is only present since 2.6.18.
449 * also we want to log which part of it failed, exactly */
450 static int drbd_accept(struct drbd_conf *mdev, const char **what,
451 struct socket *sock, struct socket **newsock)
453 struct sock *sk = sock->sk;
457 err = sock->ops->listen(sock, 5);
461 *what = "sock_create_lite";
462 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
468 err = sock->ops->accept(sock, *newsock, 0);
470 sock_release(*newsock);
474 (*newsock)->ops = sock->ops;
480 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
481 void *buf, size_t size, int flags)
488 struct msghdr msg = {
490 .msg_iov = (struct iovec *)&iov,
491 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
497 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
503 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
510 struct msghdr msg = {
512 .msg_iov = (struct iovec *)&iov,
513 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
521 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
526 * ECONNRESET other side closed the connection
527 * ERESTARTSYS (on sock) we got a signal
531 if (rv == -ECONNRESET)
532 dev_info(DEV, "sock was reset by peer\n");
533 else if (rv != -ERESTARTSYS)
534 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
536 } else if (rv == 0) {
537 dev_info(DEV, "sock was shut down by peer\n");
540 /* signal came in, or peer/link went down,
541 * after we read a partial message
543 /* D_ASSERT(signal_pending(current)); */
551 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
557 * On individual connections, the socket buffer size must be set prior to the
558 * listen(2) or connect(2) calls in order to have it take effect.
559 * This is our wrapper to do so.
561 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
564 /* open coded SO_SNDBUF, SO_RCVBUF */
566 sock->sk->sk_sndbuf = snd;
567 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
570 sock->sk->sk_rcvbuf = rcv;
571 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
575 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
579 struct sockaddr_in6 src_in6;
581 int disconnect_on_error = 1;
583 if (!get_net_conf(mdev))
586 what = "sock_create_kern";
587 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
588 SOCK_STREAM, IPPROTO_TCP, &sock);
594 sock->sk->sk_rcvtimeo =
595 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
596 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
597 mdev->net_conf->rcvbuf_size);
599 /* explicitly bind to the configured IP as source IP
600 * for the outgoing connections.
601 * This is needed for multihomed hosts and to be
602 * able to use lo: interfaces for drbd.
603 * Make sure to use 0 as port number, so linux selects
604 * a free one dynamically.
606 memcpy(&src_in6, mdev->net_conf->my_addr,
607 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
608 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
609 src_in6.sin6_port = 0;
611 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
613 what = "bind before connect";
614 err = sock->ops->bind(sock,
615 (struct sockaddr *) &src_in6,
616 mdev->net_conf->my_addr_len);
620 /* connect may fail, peer not yet available.
621 * stay C_WF_CONNECTION, don't go Disconnecting! */
622 disconnect_on_error = 0;
624 err = sock->ops->connect(sock,
625 (struct sockaddr *)mdev->net_conf->peer_addr,
626 mdev->net_conf->peer_addr_len, 0);
635 /* timeout, busy, signal pending */
636 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
637 case EINTR: case ERESTARTSYS:
638 /* peer not (yet) available, network problem */
639 case ECONNREFUSED: case ENETUNREACH:
640 case EHOSTDOWN: case EHOSTUNREACH:
641 disconnect_on_error = 0;
644 dev_err(DEV, "%s failed, err = %d\n", what, err);
646 if (disconnect_on_error)
647 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
653 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
656 struct socket *s_estab = NULL, *s_listen;
659 if (!get_net_conf(mdev))
662 what = "sock_create_kern";
663 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
664 SOCK_STREAM, IPPROTO_TCP, &s_listen);
670 timeo = mdev->net_conf->try_connect_int * HZ;
671 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
673 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
674 s_listen->sk->sk_rcvtimeo = timeo;
675 s_listen->sk->sk_sndtimeo = timeo;
676 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
677 mdev->net_conf->rcvbuf_size);
679 what = "bind before listen";
680 err = s_listen->ops->bind(s_listen,
681 (struct sockaddr *) mdev->net_conf->my_addr,
682 mdev->net_conf->my_addr_len);
686 err = drbd_accept(mdev, &what, s_listen, &s_estab);
690 sock_release(s_listen);
692 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
693 dev_err(DEV, "%s failed, err = %d\n", what, err);
694 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
702 static int drbd_send_fp(struct drbd_conf *mdev,
703 struct socket *sock, enum drbd_packets cmd)
705 struct p_header80 *h = &mdev->data.sbuf.header.h80;
707 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
710 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
712 struct p_header80 *h = &mdev->data.rbuf.header.h80;
715 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
717 if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
718 return be16_to_cpu(h->command);
724 * drbd_socket_okay() - Free the socket if its connection is not okay
725 * @mdev: DRBD device.
726 * @sock: pointer to the pointer to the socket.
728 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
736 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
738 if (rr > 0 || rr == -EAGAIN) {
749 * 1 yes, we have a valid connection
750 * 0 oops, did not work out, please try again
751 * -1 peer talks different language,
752 * no point in trying again, please go standalone.
753 * -2 We do not have a network config...
755 static int drbd_connect(struct drbd_conf *mdev)
757 struct socket *s, *sock, *msock;
760 D_ASSERT(!mdev->data.socket);
762 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
765 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
772 /* 3 tries, this should take less than a second! */
773 s = drbd_try_connect(mdev);
776 /* give the other side time to call bind() & listen() */
777 schedule_timeout_interruptible(HZ / 10);
782 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
786 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
790 dev_err(DEV, "Logic error in drbd_connect()\n");
791 goto out_release_sockets;
796 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
797 ok = drbd_socket_okay(mdev, &sock);
798 ok = drbd_socket_okay(mdev, &msock) && ok;
804 s = drbd_wait_for_connect(mdev);
806 try = drbd_recv_fp(mdev, s);
807 drbd_socket_okay(mdev, &sock);
808 drbd_socket_okay(mdev, &msock);
812 dev_warn(DEV, "initial packet S crossed\n");
819 dev_warn(DEV, "initial packet M crossed\n");
823 set_bit(DISCARD_CONCURRENT, &mdev->flags);
826 dev_warn(DEV, "Error receiving initial packet\n");
833 if (mdev->state.conn <= C_DISCONNECTING)
834 goto out_release_sockets;
835 if (signal_pending(current)) {
836 flush_signals(current);
838 if (get_t_state(&mdev->receiver) == Exiting)
839 goto out_release_sockets;
843 ok = drbd_socket_okay(mdev, &sock);
844 ok = drbd_socket_okay(mdev, &msock) && ok;
850 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
851 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
853 sock->sk->sk_allocation = GFP_NOIO;
854 msock->sk->sk_allocation = GFP_NOIO;
856 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
857 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
860 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
861 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
862 * first set it to the P_HAND_SHAKE timeout,
863 * which we set to 4x the configured ping_timeout. */
864 sock->sk->sk_sndtimeo =
865 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
867 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
868 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
870 /* we don't want delays.
871 * we use TCP_CORK where appropriate, though */
872 drbd_tcp_nodelay(sock);
873 drbd_tcp_nodelay(msock);
875 mdev->data.socket = sock;
876 mdev->meta.socket = msock;
877 mdev->last_received = jiffies;
879 D_ASSERT(mdev->asender.task == NULL);
881 h = drbd_do_handshake(mdev);
885 if (mdev->cram_hmac_tfm) {
886 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
887 switch (drbd_do_auth(mdev)) {
889 dev_err(DEV, "Authentication of peer failed\n");
892 dev_err(DEV, "Authentication of peer failed, trying again.\n");
897 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
900 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
901 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
903 atomic_set(&mdev->packet_seq, 0);
906 drbd_thread_start(&mdev->asender);
908 if (drbd_send_protocol(mdev) == -1)
910 drbd_send_sync_param(mdev, &mdev->sync_conf);
911 drbd_send_sizes(mdev, 0, 0);
912 drbd_send_uuids(mdev);
913 drbd_send_state(mdev);
914 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
915 clear_bit(RESIZE_PENDING, &mdev->flags);
916 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
928 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
930 union p_header *h = &mdev->data.rbuf.header;
933 r = drbd_recv(mdev, h, sizeof(*h));
934 if (unlikely(r != sizeof(*h))) {
935 if (!signal_pending(current))
936 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
940 if (likely(h->h80.magic == cpu_to_be32(DRBD_MAGIC))) {
941 *cmd = be16_to_cpu(h->h80.command);
942 *packet_size = be16_to_cpu(h->h80.length);
943 } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
944 *cmd = be16_to_cpu(h->h95.command);
945 *packet_size = be32_to_cpu(h->h95.length);
947 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
948 be32_to_cpu(h->h80.magic),
949 be16_to_cpu(h->h80.command),
950 be16_to_cpu(h->h80.length));
953 mdev->last_received = jiffies;
958 static void drbd_flush(struct drbd_conf *mdev)
962 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
963 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
966 dev_err(DEV, "local disk flush failed with status %d\n", rv);
967 /* would rather check on EOPNOTSUPP, but that is not reliable.
968 * don't try again for ANY return value != 0
969 * if (rv == -EOPNOTSUPP) */
970 drbd_bump_write_ordering(mdev, WO_drain_io);
977 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
978 * @mdev: DRBD device.
979 * @epoch: Epoch object.
982 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
983 struct drbd_epoch *epoch,
987 struct drbd_epoch *next_epoch;
988 enum finish_epoch rv = FE_STILL_LIVE;
990 spin_lock(&mdev->epoch_lock);
994 epoch_size = atomic_read(&epoch->epoch_size);
996 switch (ev & ~EV_CLEANUP) {
998 atomic_dec(&epoch->active);
1000 case EV_GOT_BARRIER_NR:
1001 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1003 case EV_BECAME_LAST:
1008 if (epoch_size != 0 &&
1009 atomic_read(&epoch->active) == 0 &&
1010 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
1011 if (!(ev & EV_CLEANUP)) {
1012 spin_unlock(&mdev->epoch_lock);
1013 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1014 spin_lock(&mdev->epoch_lock);
1018 if (mdev->current_epoch != epoch) {
1019 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1020 list_del(&epoch->list);
1021 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1025 if (rv == FE_STILL_LIVE)
1029 atomic_set(&epoch->epoch_size, 0);
1030 /* atomic_set(&epoch->active, 0); is already zero */
1031 if (rv == FE_STILL_LIVE)
1033 wake_up(&mdev->ee_wait);
1043 spin_unlock(&mdev->epoch_lock);
1049 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1050 * @mdev: DRBD device.
1051 * @wo: Write ordering method to try.
1053 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1055 enum write_ordering_e pwo;
1056 static char *write_ordering_str[] = {
1058 [WO_drain_io] = "drain",
1059 [WO_bdev_flush] = "flush",
1062 pwo = mdev->write_ordering;
1064 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1066 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1068 mdev->write_ordering = wo;
1069 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1070 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1075 * @mdev: DRBD device.
1077 * @rw: flag field, see bio->bi_rw
1079 * May spread the pages to multiple bios,
1080 * depending on bio_add_page restrictions.
1082 * Returns 0 if all bios have been submitted,
1083 * -ENOMEM if we could not allocate enough bios,
1084 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1085 * single page to an empty bio (which should never happen and likely indicates
1086 * that the lower level IO stack is in some way broken). This has been observed
1087 * on certain Xen deployments.
1089 /* TODO allocate from our own bio_set. */
1090 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1091 const unsigned rw, const int fault_type)
1093 struct bio *bios = NULL;
1095 struct page *page = e->pages;
1096 sector_t sector = e->i.sector;
1097 unsigned ds = e->i.size;
1098 unsigned n_bios = 0;
1099 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1102 /* In most cases, we will only need one bio. But in case the lower
1103 * level restrictions happen to be different at this offset on this
1104 * side than those of the sending peer, we may need to submit the
1105 * request in more than one bio. */
1107 bio = bio_alloc(GFP_NOIO, nr_pages);
1109 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1112 /* > e->i.sector, unless this is the first bio */
1113 bio->bi_sector = sector;
1114 bio->bi_bdev = mdev->ldev->backing_bdev;
1116 bio->bi_private = e;
1117 bio->bi_end_io = drbd_endio_sec;
1119 bio->bi_next = bios;
1123 page_chain_for_each(page) {
1124 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1125 if (!bio_add_page(bio, page, len, 0)) {
1126 /* A single page must always be possible!
1127 * But in case it fails anyways,
1128 * we deal with it, and complain (below). */
1129 if (bio->bi_vcnt == 0) {
1131 "bio_add_page failed for len=%u, "
1132 "bi_vcnt=0 (bi_sector=%llu)\n",
1133 len, (unsigned long long)bio->bi_sector);
1143 D_ASSERT(page == NULL);
1146 atomic_set(&e->pending_bios, n_bios);
1149 bios = bios->bi_next;
1150 bio->bi_next = NULL;
1152 drbd_generic_make_request(mdev, fault_type, bio);
1159 bios = bios->bi_next;
1165 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1168 struct p_barrier *p = &mdev->data.rbuf.barrier;
1169 struct drbd_epoch *epoch;
1173 mdev->current_epoch->barrier_nr = p->barrier;
1174 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1176 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1177 * the activity log, which means it would not be resynced in case the
1178 * R_PRIMARY crashes now.
1179 * Therefore we must send the barrier_ack after the barrier request was
1181 switch (mdev->write_ordering) {
1183 if (rv == FE_RECYCLED)
1186 /* receiver context, in the writeout path of the other node.
1187 * avoid potential distributed deadlock */
1188 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1192 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1197 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1200 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1201 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1206 epoch = mdev->current_epoch;
1207 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1209 D_ASSERT(atomic_read(&epoch->active) == 0);
1210 D_ASSERT(epoch->flags == 0);
1214 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1219 atomic_set(&epoch->epoch_size, 0);
1220 atomic_set(&epoch->active, 0);
1222 spin_lock(&mdev->epoch_lock);
1223 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1224 list_add(&epoch->list, &mdev->current_epoch->list);
1225 mdev->current_epoch = epoch;
1228 /* The current_epoch got recycled while we allocated this one... */
1231 spin_unlock(&mdev->epoch_lock);
1236 /* used from receive_RSDataReply (recv_resync_read)
1237 * and from receive_Data */
1238 static struct drbd_epoch_entry *
1239 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1241 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1242 struct drbd_epoch_entry *e;
1245 void *dig_in = mdev->int_dig_in;
1246 void *dig_vv = mdev->int_dig_vv;
1247 unsigned long *data;
1249 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1250 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1253 rr = drbd_recv(mdev, dig_in, dgs);
1255 if (!signal_pending(current))
1257 "short read receiving data digest: read %d expected %d\n",
1265 ERR_IF(data_size == 0) return NULL;
1266 ERR_IF(data_size & 0x1ff) return NULL;
1267 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1269 /* even though we trust out peer,
1270 * we sometimes have to double check. */
1271 if (sector + (data_size>>9) > capacity) {
1272 dev_err(DEV, "request from peer beyond end of local disk: "
1273 "capacity: %llus < sector: %llus + size: %u\n",
1274 (unsigned long long)capacity,
1275 (unsigned long long)sector, data_size);
1279 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1280 * "criss-cross" setup, that might cause write-out on some other DRBD,
1281 * which in turn might block on the other node at this very place. */
1282 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1288 page_chain_for_each(page) {
1289 unsigned len = min_t(int, ds, PAGE_SIZE);
1291 rr = drbd_recv(mdev, data, len);
1292 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1293 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1294 data[0] = data[0] ^ (unsigned long)-1;
1298 drbd_free_ee(mdev, e);
1299 if (!signal_pending(current))
1300 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1308 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1309 if (memcmp(dig_in, dig_vv, dgs)) {
1310 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1311 (unsigned long long)sector, data_size);
1312 drbd_bcast_ee(mdev, "digest failed",
1313 dgs, dig_in, dig_vv, e);
1314 drbd_free_ee(mdev, e);
1318 mdev->recv_cnt += data_size>>9;
1322 /* drbd_drain_block() just takes a data block
1323 * out of the socket input buffer, and discards it.
1325 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1334 page = drbd_pp_alloc(mdev, 1, 1);
1338 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1339 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1341 if (!signal_pending(current))
1343 "short read receiving data: read %d expected %d\n",
1344 rr, min_t(int, data_size, PAGE_SIZE));
1350 drbd_pp_free(mdev, page, 0);
1354 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1355 sector_t sector, int data_size)
1357 struct bio_vec *bvec;
1359 int dgs, rr, i, expect;
1360 void *dig_in = mdev->int_dig_in;
1361 void *dig_vv = mdev->int_dig_vv;
1363 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1364 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1367 rr = drbd_recv(mdev, dig_in, dgs);
1369 if (!signal_pending(current))
1371 "short read receiving data reply digest: read %d expected %d\n",
1379 /* optimistically update recv_cnt. if receiving fails below,
1380 * we disconnect anyways, and counters will be reset. */
1381 mdev->recv_cnt += data_size>>9;
1383 bio = req->master_bio;
1384 D_ASSERT(sector == bio->bi_sector);
1386 bio_for_each_segment(bvec, bio, i) {
1387 expect = min_t(int, data_size, bvec->bv_len);
1388 rr = drbd_recv(mdev,
1389 kmap(bvec->bv_page)+bvec->bv_offset,
1391 kunmap(bvec->bv_page);
1393 if (!signal_pending(current))
1394 dev_warn(DEV, "short read receiving data reply: "
1395 "read %d expected %d\n",
1403 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1404 if (memcmp(dig_in, dig_vv, dgs)) {
1405 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1410 D_ASSERT(data_size == 0);
1414 /* e_end_resync_block() is called via
1415 * drbd_process_done_ee() by asender only */
1416 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1418 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1419 sector_t sector = e->i.sector;
1422 D_ASSERT(hlist_unhashed(&e->collision));
1423 D_ASSERT(drbd_interval_empty(&e->i));
1425 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1426 drbd_set_in_sync(mdev, sector, e->i.size);
1427 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1429 /* Record failure to sync */
1430 drbd_rs_failed_io(mdev, sector, e->i.size);
1432 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1439 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1441 struct drbd_epoch_entry *e;
1443 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1447 dec_rs_pending(mdev);
1450 /* corresponding dec_unacked() in e_end_resync_block()
1451 * respective _drbd_clear_done_ee */
1453 e->w.cb = e_end_resync_block;
1455 spin_lock_irq(&mdev->req_lock);
1456 list_add(&e->w.list, &mdev->sync_ee);
1457 spin_unlock_irq(&mdev->req_lock);
1459 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1460 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1463 /* don't care for the reason here */
1464 dev_err(DEV, "submit failed, triggering re-connect\n");
1465 spin_lock_irq(&mdev->req_lock);
1466 list_del(&e->w.list);
1467 spin_unlock_irq(&mdev->req_lock);
1469 drbd_free_ee(mdev, e);
1475 static struct drbd_request *
1476 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1477 sector_t sector, bool missing_ok, const char *func)
1479 struct drbd_request *req;
1481 /* Request object according to our peer */
1482 req = (struct drbd_request *)(unsigned long)id;
1483 if (drbd_contains_interval(root, sector, &req->i))
1486 dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
1487 (unsigned long)id, (unsigned long long)sector);
1492 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1494 struct drbd_request *req;
1497 struct p_data *p = &mdev->data.rbuf.data;
1499 sector = be64_to_cpu(p->sector);
1501 spin_lock_irq(&mdev->req_lock);
1502 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1503 spin_unlock_irq(&mdev->req_lock);
1507 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1508 * special casing it there for the various failure cases.
1509 * still no race with drbd_fail_pending_reads */
1510 ok = recv_dless_read(mdev, req, sector, data_size);
1513 req_mod(req, data_received);
1514 /* else: nothing. handled from drbd_disconnect...
1515 * I don't think we may complete this just yet
1516 * in case we are "on-disconnect: freeze" */
1521 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1525 struct p_data *p = &mdev->data.rbuf.data;
1527 sector = be64_to_cpu(p->sector);
1528 D_ASSERT(p->block_id == ID_SYNCER);
1530 if (get_ldev(mdev)) {
1531 /* data is submitted to disk within recv_resync_read.
1532 * corresponding put_ldev done below on error,
1533 * or in drbd_endio_sec. */
1534 ok = recv_resync_read(mdev, sector, data_size);
1536 if (__ratelimit(&drbd_ratelimit_state))
1537 dev_err(DEV, "Can not write resync data to local disk.\n");
1539 ok = drbd_drain_block(mdev, data_size);
1541 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1544 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1549 /* e_end_block() is called via drbd_process_done_ee().
1550 * this means this function only runs in the asender thread
1552 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1554 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1555 sector_t sector = e->i.sector;
1558 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1559 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1560 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1561 mdev->state.conn <= C_PAUSED_SYNC_T &&
1562 e->flags & EE_MAY_SET_IN_SYNC) ?
1563 P_RS_WRITE_ACK : P_WRITE_ACK;
1564 ok &= drbd_send_ack(mdev, pcmd, e);
1565 if (pcmd == P_RS_WRITE_ACK)
1566 drbd_set_in_sync(mdev, sector, e->i.size);
1568 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1569 /* we expect it to be marked out of sync anyways...
1570 * maybe assert this? */
1574 /* we delete from the conflict detection hash _after_ we sent out the
1575 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1576 if (mdev->net_conf->two_primaries) {
1577 spin_lock_irq(&mdev->req_lock);
1578 D_ASSERT(!hlist_unhashed(&e->collision));
1579 hlist_del_init(&e->collision);
1580 D_ASSERT(!drbd_interval_empty(&e->i));
1581 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1582 drbd_clear_interval(&e->i);
1583 spin_unlock_irq(&mdev->req_lock);
1585 D_ASSERT(hlist_unhashed(&e->collision));
1586 D_ASSERT(drbd_interval_empty(&e->i));
1589 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1594 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1596 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1599 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1600 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1602 spin_lock_irq(&mdev->req_lock);
1603 D_ASSERT(!hlist_unhashed(&e->collision));
1604 hlist_del_init(&e->collision);
1605 D_ASSERT(!drbd_interval_empty(&e->i));
1606 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1607 drbd_clear_interval(&e->i);
1608 spin_unlock_irq(&mdev->req_lock);
1615 /* Called from receive_Data.
1616 * Synchronize packets on sock with packets on msock.
1618 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1619 * packet traveling on msock, they are still processed in the order they have
1622 * Note: we don't care for Ack packets overtaking P_DATA packets.
1624 * In case packet_seq is larger than mdev->peer_seq number, there are
1625 * outstanding packets on the msock. We wait for them to arrive.
1626 * In case we are the logically next packet, we update mdev->peer_seq
1627 * ourselves. Correctly handles 32bit wrap around.
1629 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1630 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1631 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1632 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1634 * returns 0 if we may process the packet,
1635 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1636 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1642 spin_lock(&mdev->peer_seq_lock);
1644 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1645 if (seq_le(packet_seq, mdev->peer_seq+1))
1647 if (signal_pending(current)) {
1651 p_seq = mdev->peer_seq;
1652 spin_unlock(&mdev->peer_seq_lock);
1653 timeout = schedule_timeout(30*HZ);
1654 spin_lock(&mdev->peer_seq_lock);
1655 if (timeout == 0 && p_seq == mdev->peer_seq) {
1657 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1661 finish_wait(&mdev->seq_wait, &wait);
1662 if (mdev->peer_seq+1 == packet_seq)
1664 spin_unlock(&mdev->peer_seq_lock);
1668 /* see also bio_flags_to_wire()
1669 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1670 * flags and back. We may replicate to other kernel versions. */
1671 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1673 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1674 (dpf & DP_FUA ? REQ_FUA : 0) |
1675 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1676 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1679 /* mirrored write */
1680 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1683 struct drbd_epoch_entry *e;
1684 struct p_data *p = &mdev->data.rbuf.data;
1688 if (!get_ldev(mdev)) {
1689 spin_lock(&mdev->peer_seq_lock);
1690 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1692 spin_unlock(&mdev->peer_seq_lock);
1694 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1695 atomic_inc(&mdev->current_epoch->epoch_size);
1696 return drbd_drain_block(mdev, data_size);
1699 /* get_ldev(mdev) successful.
1700 * Corresponding put_ldev done either below (on various errors),
1701 * or in drbd_endio_sec, if we successfully submit the data at
1702 * the end of this function. */
1704 sector = be64_to_cpu(p->sector);
1705 e = read_in_block(mdev, p->block_id, sector, data_size);
1711 e->w.cb = e_end_block;
1713 dp_flags = be32_to_cpu(p->dp_flags);
1714 rw |= wire_flags_to_bio(mdev, dp_flags);
1716 if (dp_flags & DP_MAY_SET_IN_SYNC)
1717 e->flags |= EE_MAY_SET_IN_SYNC;
1719 spin_lock(&mdev->epoch_lock);
1720 e->epoch = mdev->current_epoch;
1721 atomic_inc(&e->epoch->epoch_size);
1722 atomic_inc(&e->epoch->active);
1723 spin_unlock(&mdev->epoch_lock);
1725 /* I'm the receiver, I do hold a net_cnt reference. */
1726 if (!mdev->net_conf->two_primaries) {
1727 spin_lock_irq(&mdev->req_lock);
1729 /* don't get the req_lock yet,
1730 * we may sleep in drbd_wait_peer_seq */
1731 const int size = e->i.size;
1732 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1736 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1737 BUG_ON(mdev->ee_hash == NULL);
1738 BUG_ON(mdev->tl_hash == NULL);
1740 /* conflict detection and handling:
1741 * 1. wait on the sequence number,
1742 * in case this data packet overtook ACK packets.
1743 * 2. check our hash tables for conflicting requests.
1744 * we only need to walk the tl_hash, since an ee can not
1745 * have a conflict with an other ee: on the submitting
1746 * node, the corresponding req had already been conflicting,
1747 * and a conflicting req is never sent.
1749 * Note: for two_primaries, we are protocol C,
1750 * so there cannot be any request that is DONE
1751 * but still on the transfer log.
1753 * unconditionally add to the ee_hash.
1755 * if no conflicting request is found:
1758 * if any conflicting request is found
1759 * that has not yet been acked,
1760 * AND I have the "discard concurrent writes" flag:
1761 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1763 * if any conflicting request is found:
1764 * block the receiver, waiting on misc_wait
1765 * until no more conflicting requests are there,
1766 * or we get interrupted (disconnect).
1768 * we do not just write after local io completion of those
1769 * requests, but only after req is done completely, i.e.
1770 * we wait for the P_DISCARD_ACK to arrive!
1772 * then proceed normally, i.e. submit.
1774 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1775 goto out_interrupted;
1777 spin_lock_irq(&mdev->req_lock);
1779 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
1780 drbd_insert_interval(&mdev->epoch_entries, &e->i);
1784 struct drbd_interval *i;
1785 int have_unacked = 0;
1786 int have_conflict = 0;
1787 prepare_to_wait(&mdev->misc_wait, &wait,
1788 TASK_INTERRUPTIBLE);
1790 i = drbd_find_overlap(&mdev->write_requests, sector, size);
1792 struct drbd_request *req2 =
1793 container_of(i, struct drbd_request, i);
1795 /* only ALERT on first iteration,
1796 * we may be woken up early... */
1798 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1799 " new: %llus +%u; pending: %llus +%u\n",
1800 current->comm, current->pid,
1801 (unsigned long long)sector, size,
1802 (unsigned long long)req2->i.sector, req2->i.size);
1803 if (req2->rq_state & RQ_NET_PENDING)
1810 /* Discard Ack only for the _first_ iteration */
1811 if (first && discard && have_unacked) {
1812 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1813 (unsigned long long)sector);
1815 e->w.cb = e_send_discard_ack;
1816 list_add_tail(&e->w.list, &mdev->done_ee);
1818 spin_unlock_irq(&mdev->req_lock);
1820 /* we could probably send that P_DISCARD_ACK ourselves,
1821 * but I don't like the receiver using the msock */
1825 finish_wait(&mdev->misc_wait, &wait);
1829 if (signal_pending(current)) {
1830 hlist_del_init(&e->collision);
1831 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1832 drbd_clear_interval(&e->i);
1834 spin_unlock_irq(&mdev->req_lock);
1836 finish_wait(&mdev->misc_wait, &wait);
1837 goto out_interrupted;
1840 spin_unlock_irq(&mdev->req_lock);
1843 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1844 "sec=%llus\n", (unsigned long long)sector);
1845 } else if (discard) {
1846 /* we had none on the first iteration.
1847 * there must be none now. */
1848 D_ASSERT(have_unacked == 0);
1851 spin_lock_irq(&mdev->req_lock);
1853 finish_wait(&mdev->misc_wait, &wait);
1856 list_add(&e->w.list, &mdev->active_ee);
1857 spin_unlock_irq(&mdev->req_lock);
1859 switch (mdev->net_conf->wire_protocol) {
1862 /* corresponding dec_unacked() in e_end_block()
1863 * respective _drbd_clear_done_ee */
1866 /* I really don't like it that the receiver thread
1867 * sends on the msock, but anyways */
1868 drbd_send_ack(mdev, P_RECV_ACK, e);
1875 if (mdev->state.pdsk < D_INCONSISTENT) {
1876 /* In case we have the only disk of the cluster, */
1877 drbd_set_out_of_sync(mdev, e->i.sector, e->i.size);
1878 e->flags |= EE_CALL_AL_COMPLETE_IO;
1879 e->flags &= ~EE_MAY_SET_IN_SYNC;
1880 drbd_al_begin_io(mdev, e->i.sector);
1883 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1886 /* don't care for the reason here */
1887 dev_err(DEV, "submit failed, triggering re-connect\n");
1888 spin_lock_irq(&mdev->req_lock);
1889 list_del(&e->w.list);
1890 hlist_del_init(&e->collision);
1891 drbd_remove_interval(&mdev->epoch_entries, &e->i);
1892 drbd_clear_interval(&e->i);
1893 spin_unlock_irq(&mdev->req_lock);
1894 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1895 drbd_al_complete_io(mdev, e->i.sector);
1898 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
1900 drbd_free_ee(mdev, e);
1904 /* We may throttle resync, if the lower device seems to be busy,
1905 * and current sync rate is above c_min_rate.
1907 * To decide whether or not the lower device is busy, we use a scheme similar
1908 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1909 * (more than 64 sectors) of activity we cannot account for with our own resync
1910 * activity, it obviously is "busy".
1912 * The current sync rate used here uses only the most recent two step marks,
1913 * to have a short time average so we can react faster.
1915 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
1917 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1918 unsigned long db, dt, dbdt;
1919 struct lc_element *tmp;
1923 /* feature disabled? */
1924 if (mdev->sync_conf.c_min_rate == 0)
1927 spin_lock_irq(&mdev->al_lock);
1928 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1930 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1931 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1932 spin_unlock_irq(&mdev->al_lock);
1935 /* Do not slow down if app IO is already waiting for this extent */
1937 spin_unlock_irq(&mdev->al_lock);
1939 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1940 (int)part_stat_read(&disk->part0, sectors[1]) -
1941 atomic_read(&mdev->rs_sect_ev);
1943 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1944 unsigned long rs_left;
1947 mdev->rs_last_events = curr_events;
1949 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1951 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1953 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1954 rs_left = mdev->ov_left;
1956 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
1958 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1961 db = mdev->rs_mark_left[i] - rs_left;
1962 dbdt = Bit2KB(db/dt);
1964 if (dbdt > mdev->sync_conf.c_min_rate)
1971 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
1974 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1975 struct drbd_epoch_entry *e;
1976 struct digest_info *di = NULL;
1978 unsigned int fault_type;
1979 struct p_block_req *p = &mdev->data.rbuf.block_req;
1981 sector = be64_to_cpu(p->sector);
1982 size = be32_to_cpu(p->blksize);
1984 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1985 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1986 (unsigned long long)sector, size);
1989 if (sector + (size>>9) > capacity) {
1990 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1991 (unsigned long long)sector, size);
1995 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1998 case P_DATA_REQUEST:
1999 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2001 case P_RS_DATA_REQUEST:
2002 case P_CSUM_RS_REQUEST:
2004 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2008 dec_rs_pending(mdev);
2009 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2012 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2015 if (verb && __ratelimit(&drbd_ratelimit_state))
2016 dev_err(DEV, "Can not satisfy peer's read request, "
2017 "no local data.\n");
2019 /* drain possibly payload */
2020 return drbd_drain_block(mdev, digest_size);
2023 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2024 * "criss-cross" setup, that might cause write-out on some other DRBD,
2025 * which in turn might block on the other node at this very place. */
2026 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2033 case P_DATA_REQUEST:
2034 e->w.cb = w_e_end_data_req;
2035 fault_type = DRBD_FAULT_DT_RD;
2036 /* application IO, don't drbd_rs_begin_io */
2039 case P_RS_DATA_REQUEST:
2040 e->w.cb = w_e_end_rsdata_req;
2041 fault_type = DRBD_FAULT_RS_RD;
2042 /* used in the sector offset progress display */
2043 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2047 case P_CSUM_RS_REQUEST:
2048 fault_type = DRBD_FAULT_RS_RD;
2049 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2053 di->digest_size = digest_size;
2054 di->digest = (((char *)di)+sizeof(struct digest_info));
2057 e->flags |= EE_HAS_DIGEST;
2059 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2062 if (cmd == P_CSUM_RS_REQUEST) {
2063 D_ASSERT(mdev->agreed_pro_version >= 89);
2064 e->w.cb = w_e_end_csum_rs_req;
2065 /* used in the sector offset progress display */
2066 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2067 } else if (cmd == P_OV_REPLY) {
2068 /* track progress, we may need to throttle */
2069 atomic_add(size >> 9, &mdev->rs_sect_in);
2070 e->w.cb = w_e_end_ov_reply;
2071 dec_rs_pending(mdev);
2072 /* drbd_rs_begin_io done when we sent this request,
2073 * but accounting still needs to be done. */
2074 goto submit_for_resync;
2079 if (mdev->ov_start_sector == ~(sector_t)0 &&
2080 mdev->agreed_pro_version >= 90) {
2081 unsigned long now = jiffies;
2083 mdev->ov_start_sector = sector;
2084 mdev->ov_position = sector;
2085 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2086 mdev->rs_total = mdev->ov_left;
2087 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2088 mdev->rs_mark_left[i] = mdev->ov_left;
2089 mdev->rs_mark_time[i] = now;
2091 dev_info(DEV, "Online Verify start sector: %llu\n",
2092 (unsigned long long)sector);
2094 e->w.cb = w_e_end_ov_req;
2095 fault_type = DRBD_FAULT_RS_RD;
2099 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2101 fault_type = DRBD_FAULT_MAX;
2105 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2106 * wrt the receiver, but it is not as straightforward as it may seem.
2107 * Various places in the resync start and stop logic assume resync
2108 * requests are processed in order, requeuing this on the worker thread
2109 * introduces a bunch of new code for synchronization between threads.
2111 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2112 * "forever", throttling after drbd_rs_begin_io will lock that extent
2113 * for application writes for the same time. For now, just throttle
2114 * here, where the rest of the code expects the receiver to sleep for
2118 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2119 * this defers syncer requests for some time, before letting at least
2120 * on request through. The resync controller on the receiving side
2121 * will adapt to the incoming rate accordingly.
2123 * We cannot throttle here if remote is Primary/SyncTarget:
2124 * we would also throttle its application reads.
2125 * In that case, throttling is done on the SyncTarget only.
2127 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2128 schedule_timeout_uninterruptible(HZ/10);
2129 if (drbd_rs_begin_io(mdev, sector))
2133 atomic_add(size >> 9, &mdev->rs_sect_ev);
2137 spin_lock_irq(&mdev->req_lock);
2138 list_add_tail(&e->w.list, &mdev->read_ee);
2139 spin_unlock_irq(&mdev->req_lock);
2141 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2144 /* don't care for the reason here */
2145 dev_err(DEV, "submit failed, triggering re-connect\n");
2146 spin_lock_irq(&mdev->req_lock);
2147 list_del(&e->w.list);
2148 spin_unlock_irq(&mdev->req_lock);
2149 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2153 drbd_free_ee(mdev, e);
2157 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2159 int self, peer, rv = -100;
2160 unsigned long ch_self, ch_peer;
2162 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2163 peer = mdev->p_uuid[UI_BITMAP] & 1;
2165 ch_peer = mdev->p_uuid[UI_SIZE];
2166 ch_self = mdev->comm_bm_set;
2168 switch (mdev->net_conf->after_sb_0p) {
2170 case ASB_DISCARD_SECONDARY:
2171 case ASB_CALL_HELPER:
2172 dev_err(DEV, "Configuration error.\n");
2174 case ASB_DISCONNECT:
2176 case ASB_DISCARD_YOUNGER_PRI:
2177 if (self == 0 && peer == 1) {
2181 if (self == 1 && peer == 0) {
2185 /* Else fall through to one of the other strategies... */
2186 case ASB_DISCARD_OLDER_PRI:
2187 if (self == 0 && peer == 1) {
2191 if (self == 1 && peer == 0) {
2195 /* Else fall through to one of the other strategies... */
2196 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2197 "Using discard-least-changes instead\n");
2198 case ASB_DISCARD_ZERO_CHG:
2199 if (ch_peer == 0 && ch_self == 0) {
2200 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2204 if (ch_peer == 0) { rv = 1; break; }
2205 if (ch_self == 0) { rv = -1; break; }
2207 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2209 case ASB_DISCARD_LEAST_CHG:
2210 if (ch_self < ch_peer)
2212 else if (ch_self > ch_peer)
2214 else /* ( ch_self == ch_peer ) */
2215 /* Well, then use something else. */
2216 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2219 case ASB_DISCARD_LOCAL:
2222 case ASB_DISCARD_REMOTE:
2229 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2233 switch (mdev->net_conf->after_sb_1p) {
2234 case ASB_DISCARD_YOUNGER_PRI:
2235 case ASB_DISCARD_OLDER_PRI:
2236 case ASB_DISCARD_LEAST_CHG:
2237 case ASB_DISCARD_LOCAL:
2238 case ASB_DISCARD_REMOTE:
2239 dev_err(DEV, "Configuration error.\n");
2241 case ASB_DISCONNECT:
2244 hg = drbd_asb_recover_0p(mdev);
2245 if (hg == -1 && mdev->state.role == R_SECONDARY)
2247 if (hg == 1 && mdev->state.role == R_PRIMARY)
2251 rv = drbd_asb_recover_0p(mdev);
2253 case ASB_DISCARD_SECONDARY:
2254 return mdev->state.role == R_PRIMARY ? 1 : -1;
2255 case ASB_CALL_HELPER:
2256 hg = drbd_asb_recover_0p(mdev);
2257 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2258 enum drbd_state_rv rv2;
2260 drbd_set_role(mdev, R_SECONDARY, 0);
2261 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2262 * we might be here in C_WF_REPORT_PARAMS which is transient.
2263 * we do not need to wait for the after state change work either. */
2264 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2265 if (rv2 != SS_SUCCESS) {
2266 drbd_khelper(mdev, "pri-lost-after-sb");
2268 dev_warn(DEV, "Successfully gave up primary role.\n");
2278 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2282 switch (mdev->net_conf->after_sb_2p) {
2283 case ASB_DISCARD_YOUNGER_PRI:
2284 case ASB_DISCARD_OLDER_PRI:
2285 case ASB_DISCARD_LEAST_CHG:
2286 case ASB_DISCARD_LOCAL:
2287 case ASB_DISCARD_REMOTE:
2289 case ASB_DISCARD_SECONDARY:
2290 dev_err(DEV, "Configuration error.\n");
2293 rv = drbd_asb_recover_0p(mdev);
2295 case ASB_DISCONNECT:
2297 case ASB_CALL_HELPER:
2298 hg = drbd_asb_recover_0p(mdev);
2300 enum drbd_state_rv rv2;
2302 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2303 * we might be here in C_WF_REPORT_PARAMS which is transient.
2304 * we do not need to wait for the after state change work either. */
2305 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2306 if (rv2 != SS_SUCCESS) {
2307 drbd_khelper(mdev, "pri-lost-after-sb");
2309 dev_warn(DEV, "Successfully gave up primary role.\n");
2319 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2320 u64 bits, u64 flags)
2323 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2326 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2328 (unsigned long long)uuid[UI_CURRENT],
2329 (unsigned long long)uuid[UI_BITMAP],
2330 (unsigned long long)uuid[UI_HISTORY_START],
2331 (unsigned long long)uuid[UI_HISTORY_END],
2332 (unsigned long long)bits,
2333 (unsigned long long)flags);
2337 100 after split brain try auto recover
2338 2 C_SYNC_SOURCE set BitMap
2339 1 C_SYNC_SOURCE use BitMap
2341 -1 C_SYNC_TARGET use BitMap
2342 -2 C_SYNC_TARGET set BitMap
2343 -100 after split brain, disconnect
2344 -1000 unrelated data
2345 -1091 requires proto 91
2346 -1096 requires proto 96
2348 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2353 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2354 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2357 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2361 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2362 peer != UUID_JUST_CREATED)
2366 if (self != UUID_JUST_CREATED &&
2367 (peer == UUID_JUST_CREATED || peer == (u64)0))
2371 int rct, dc; /* roles at crash time */
2373 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2375 if (mdev->agreed_pro_version < 91)
2378 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2379 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2380 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2381 drbd_uuid_set_bm(mdev, 0UL);
2383 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2384 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2387 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2394 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2396 if (mdev->agreed_pro_version < 91)
2399 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2400 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2401 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2403 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2404 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2405 mdev->p_uuid[UI_BITMAP] = 0UL;
2407 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2410 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2417 /* Common power [off|failure] */
2418 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2419 (mdev->p_uuid[UI_FLAGS] & 2);
2420 /* lowest bit is set when we were primary,
2421 * next bit (weight 2) is set when peer was primary */
2425 case 0: /* !self_pri && !peer_pri */ return 0;
2426 case 1: /* self_pri && !peer_pri */ return 1;
2427 case 2: /* !self_pri && peer_pri */ return -1;
2428 case 3: /* self_pri && peer_pri */
2429 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2435 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2440 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2442 if (mdev->agreed_pro_version < 96 ?
2443 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2444 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2445 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2446 /* The last P_SYNC_UUID did not get though. Undo the last start of
2447 resync as sync source modifications of the peer's UUIDs. */
2449 if (mdev->agreed_pro_version < 91)
2452 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2453 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2455 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2456 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2463 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2464 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2465 peer = mdev->p_uuid[i] & ~((u64)1);
2471 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2472 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2477 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2479 if (mdev->agreed_pro_version < 96 ?
2480 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2481 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2482 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2483 /* The last P_SYNC_UUID did not get though. Undo the last start of
2484 resync as sync source modifications of our UUIDs. */
2486 if (mdev->agreed_pro_version < 91)
2489 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2490 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2492 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2493 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2494 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2502 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2503 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2504 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2510 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2511 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2512 if (self == peer && self != ((u64)0))
2516 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2517 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2518 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2519 peer = mdev->p_uuid[j] & ~((u64)1);
2528 /* drbd_sync_handshake() returns the new conn state on success, or
2529 CONN_MASK (-1) on failure.
2531 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2532 enum drbd_disk_state peer_disk) __must_hold(local)
2535 enum drbd_conns rv = C_MASK;
2536 enum drbd_disk_state mydisk;
2538 mydisk = mdev->state.disk;
2539 if (mydisk == D_NEGOTIATING)
2540 mydisk = mdev->new_state_tmp.disk;
2542 dev_info(DEV, "drbd_sync_handshake:\n");
2543 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2544 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2545 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2547 hg = drbd_uuid_compare(mdev, &rule_nr);
2549 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2552 dev_alert(DEV, "Unrelated data, aborting!\n");
2556 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2560 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2561 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2562 int f = (hg == -100) || abs(hg) == 2;
2563 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2566 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2567 hg > 0 ? "source" : "target");
2571 drbd_khelper(mdev, "initial-split-brain");
2573 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2574 int pcount = (mdev->state.role == R_PRIMARY)
2575 + (peer_role == R_PRIMARY);
2576 int forced = (hg == -100);
2580 hg = drbd_asb_recover_0p(mdev);
2583 hg = drbd_asb_recover_1p(mdev);
2586 hg = drbd_asb_recover_2p(mdev);
2589 if (abs(hg) < 100) {
2590 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2591 "automatically solved. Sync from %s node\n",
2592 pcount, (hg < 0) ? "peer" : "this");
2594 dev_warn(DEV, "Doing a full sync, since"
2595 " UUIDs where ambiguous.\n");
2602 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2604 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2608 dev_warn(DEV, "Split-Brain detected, manually solved. "
2609 "Sync from %s node\n",
2610 (hg < 0) ? "peer" : "this");
2614 /* FIXME this log message is not correct if we end up here
2615 * after an attempted attach on a diskless node.
2616 * We just refuse to attach -- well, we drop the "connection"
2617 * to that disk, in a way... */
2618 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2619 drbd_khelper(mdev, "split-brain");
2623 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2624 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2628 if (hg < 0 && /* by intention we do not use mydisk here. */
2629 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2630 switch (mdev->net_conf->rr_conflict) {
2631 case ASB_CALL_HELPER:
2632 drbd_khelper(mdev, "pri-lost");
2634 case ASB_DISCONNECT:
2635 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2638 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2643 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2645 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2647 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2648 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2649 abs(hg) >= 2 ? "full" : "bit-map based");
2654 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2655 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2656 BM_LOCKED_SET_ALLOWED))
2660 if (hg > 0) { /* become sync source. */
2662 } else if (hg < 0) { /* become sync target */
2666 if (drbd_bm_total_weight(mdev)) {
2667 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2668 drbd_bm_total_weight(mdev));
2675 /* returns 1 if invalid */
2676 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2678 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2679 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2680 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2683 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2684 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2685 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2688 /* everything else is valid if they are equal on both sides. */
2692 /* everything es is invalid. */
2696 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2698 struct p_protocol *p = &mdev->data.rbuf.protocol;
2699 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2700 int p_want_lose, p_two_primaries, cf;
2701 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2703 p_proto = be32_to_cpu(p->protocol);
2704 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2705 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2706 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2707 p_two_primaries = be32_to_cpu(p->two_primaries);
2708 cf = be32_to_cpu(p->conn_flags);
2709 p_want_lose = cf & CF_WANT_LOSE;
2711 clear_bit(CONN_DRY_RUN, &mdev->flags);
2713 if (cf & CF_DRY_RUN)
2714 set_bit(CONN_DRY_RUN, &mdev->flags);
2716 if (p_proto != mdev->net_conf->wire_protocol) {
2717 dev_err(DEV, "incompatible communication protocols\n");
2721 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2722 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2726 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2727 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2731 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2732 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2736 if (p_want_lose && mdev->net_conf->want_lose) {
2737 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2741 if (p_two_primaries != mdev->net_conf->two_primaries) {
2742 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2746 if (mdev->agreed_pro_version >= 87) {
2747 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2749 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2752 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2753 if (strcmp(p_integrity_alg, my_alg)) {
2754 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2757 dev_info(DEV, "data-integrity-alg: %s\n",
2758 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2764 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2769 * input: alg name, feature name
2770 * return: NULL (alg name was "")
2771 * ERR_PTR(error) if something goes wrong
2772 * or the crypto hash ptr, if it worked out ok. */
2773 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2774 const char *alg, const char *name)
2776 struct crypto_hash *tfm;
2781 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2783 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2784 alg, name, PTR_ERR(tfm));
2787 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2788 crypto_free_hash(tfm);
2789 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2790 return ERR_PTR(-EINVAL);
2795 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2798 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2799 unsigned int header_size, data_size, exp_max_sz;
2800 struct crypto_hash *verify_tfm = NULL;
2801 struct crypto_hash *csums_tfm = NULL;
2802 const int apv = mdev->agreed_pro_version;
2803 int *rs_plan_s = NULL;
2806 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2807 : apv == 88 ? sizeof(struct p_rs_param)
2809 : apv <= 94 ? sizeof(struct p_rs_param_89)
2810 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2812 if (packet_size > exp_max_sz) {
2813 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2814 packet_size, exp_max_sz);
2819 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2820 data_size = packet_size - header_size;
2821 } else if (apv <= 94) {
2822 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2823 data_size = packet_size - header_size;
2824 D_ASSERT(data_size == 0);
2826 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2827 data_size = packet_size - header_size;
2828 D_ASSERT(data_size == 0);
2831 /* initialize verify_alg and csums_alg */
2832 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2834 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2837 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2841 if (data_size > SHARED_SECRET_MAX) {
2842 dev_err(DEV, "verify-alg too long, "
2843 "peer wants %u, accepting only %u byte\n",
2844 data_size, SHARED_SECRET_MAX);
2848 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2851 /* we expect NUL terminated string */
2852 /* but just in case someone tries to be evil */
2853 D_ASSERT(p->verify_alg[data_size-1] == 0);
2854 p->verify_alg[data_size-1] = 0;
2856 } else /* apv >= 89 */ {
2857 /* we still expect NUL terminated strings */
2858 /* but just in case someone tries to be evil */
2859 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2860 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2861 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2862 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2865 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2866 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2867 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2868 mdev->sync_conf.verify_alg, p->verify_alg);
2871 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2872 p->verify_alg, "verify-alg");
2873 if (IS_ERR(verify_tfm)) {
2879 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2880 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2881 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2882 mdev->sync_conf.csums_alg, p->csums_alg);
2885 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2886 p->csums_alg, "csums-alg");
2887 if (IS_ERR(csums_tfm)) {
2894 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2895 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2896 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2897 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2898 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2900 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2901 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2902 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2904 dev_err(DEV, "kmalloc of fifo_buffer failed");
2910 spin_lock(&mdev->peer_seq_lock);
2911 /* lock against drbd_nl_syncer_conf() */
2913 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2914 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2915 crypto_free_hash(mdev->verify_tfm);
2916 mdev->verify_tfm = verify_tfm;
2917 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2920 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2921 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2922 crypto_free_hash(mdev->csums_tfm);
2923 mdev->csums_tfm = csums_tfm;
2924 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2926 if (fifo_size != mdev->rs_plan_s.size) {
2927 kfree(mdev->rs_plan_s.values);
2928 mdev->rs_plan_s.values = rs_plan_s;
2929 mdev->rs_plan_s.size = fifo_size;
2930 mdev->rs_planed = 0;
2932 spin_unlock(&mdev->peer_seq_lock);
2937 /* just for completeness: actually not needed,
2938 * as this is not reached if csums_tfm was ok. */
2939 crypto_free_hash(csums_tfm);
2940 /* but free the verify_tfm again, if csums_tfm did not work out */
2941 crypto_free_hash(verify_tfm);
2942 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2946 /* warn if the arguments differ by more than 12.5% */
2947 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2948 const char *s, sector_t a, sector_t b)
2951 if (a == 0 || b == 0)
2953 d = (a > b) ? (a - b) : (b - a);
2954 if (d > (a>>3) || d > (b>>3))
2955 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2956 (unsigned long long)a, (unsigned long long)b);
2959 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2961 struct p_sizes *p = &mdev->data.rbuf.sizes;
2962 enum determine_dev_size dd = unchanged;
2963 sector_t p_size, p_usize, my_usize;
2964 int ldsc = 0; /* local disk size changed */
2965 enum dds_flags ddsf;
2967 p_size = be64_to_cpu(p->d_size);
2968 p_usize = be64_to_cpu(p->u_size);
2970 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2971 dev_err(DEV, "some backing storage is needed\n");
2972 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2976 /* just store the peer's disk size for now.
2977 * we still need to figure out whether we accept that. */
2978 mdev->p_size = p_size;
2980 if (get_ldev(mdev)) {
2981 warn_if_differ_considerably(mdev, "lower level device sizes",
2982 p_size, drbd_get_max_capacity(mdev->ldev));
2983 warn_if_differ_considerably(mdev, "user requested size",
2984 p_usize, mdev->ldev->dc.disk_size);
2986 /* if this is the first connect, or an otherwise expected
2987 * param exchange, choose the minimum */
2988 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2989 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2992 my_usize = mdev->ldev->dc.disk_size;
2994 if (mdev->ldev->dc.disk_size != p_usize) {
2995 mdev->ldev->dc.disk_size = p_usize;
2996 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2997 (unsigned long)mdev->ldev->dc.disk_size);
3000 /* Never shrink a device with usable data during connect.
3001 But allow online shrinking if we are connected. */
3002 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
3003 drbd_get_capacity(mdev->this_bdev) &&
3004 mdev->state.disk >= D_OUTDATED &&
3005 mdev->state.conn < C_CONNECTED) {
3006 dev_err(DEV, "The peer's disk size is too small!\n");
3007 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3008 mdev->ldev->dc.disk_size = my_usize;
3015 ddsf = be16_to_cpu(p->dds_flags);
3016 if (get_ldev(mdev)) {
3017 dd = drbd_determine_dev_size(mdev, ddsf);
3019 if (dd == dev_size_error)
3023 /* I am diskless, need to accept the peer's size. */
3024 drbd_set_my_capacity(mdev, p_size);
3027 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3028 drbd_reconsider_max_bio_size(mdev);
3030 if (get_ldev(mdev)) {
3031 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3032 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3039 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3040 if (be64_to_cpu(p->c_size) !=
3041 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3042 /* we have different sizes, probably peer
3043 * needs to know my new size... */
3044 drbd_send_sizes(mdev, 0, ddsf);
3046 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3047 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3048 if (mdev->state.pdsk >= D_INCONSISTENT &&
3049 mdev->state.disk >= D_INCONSISTENT) {
3050 if (ddsf & DDSF_NO_RESYNC)
3051 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3053 resync_after_online_grow(mdev);
3055 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3062 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3064 struct p_uuids *p = &mdev->data.rbuf.uuids;
3066 int i, updated_uuids = 0;
3068 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3070 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3071 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3073 kfree(mdev->p_uuid);
3074 mdev->p_uuid = p_uuid;
3076 if (mdev->state.conn < C_CONNECTED &&
3077 mdev->state.disk < D_INCONSISTENT &&
3078 mdev->state.role == R_PRIMARY &&
3079 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3080 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3081 (unsigned long long)mdev->ed_uuid);
3082 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3086 if (get_ldev(mdev)) {
3087 int skip_initial_sync =
3088 mdev->state.conn == C_CONNECTED &&
3089 mdev->agreed_pro_version >= 90 &&
3090 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3091 (p_uuid[UI_FLAGS] & 8);
3092 if (skip_initial_sync) {
3093 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3094 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3095 "clear_n_write from receive_uuids",
3096 BM_LOCKED_TEST_ALLOWED);
3097 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3098 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3099 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3105 } else if (mdev->state.disk < D_INCONSISTENT &&
3106 mdev->state.role == R_PRIMARY) {
3107 /* I am a diskless primary, the peer just created a new current UUID
3109 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3112 /* Before we test for the disk state, we should wait until an eventually
3113 ongoing cluster wide state change is finished. That is important if
3114 we are primary and are detaching from our disk. We need to see the
3115 new disk state... */
3116 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3117 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3118 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3121 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3127 * convert_state() - Converts the peer's view of the cluster state to our point of view
3128 * @ps: The state as seen by the peer.
3130 static union drbd_state convert_state(union drbd_state ps)
3132 union drbd_state ms;
3134 static enum drbd_conns c_tab[] = {
3135 [C_CONNECTED] = C_CONNECTED,
3137 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3138 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3139 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3140 [C_VERIFY_S] = C_VERIFY_T,
3146 ms.conn = c_tab[ps.conn];
3151 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3156 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3158 struct p_req_state *p = &mdev->data.rbuf.req_state;
3159 union drbd_state mask, val;
3160 enum drbd_state_rv rv;
3162 mask.i = be32_to_cpu(p->mask);
3163 val.i = be32_to_cpu(p->val);
3165 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3166 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3167 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3171 mask = convert_state(mask);
3172 val = convert_state(val);
3174 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3176 drbd_send_sr_reply(mdev, rv);
3182 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3184 struct p_state *p = &mdev->data.rbuf.state;
3185 union drbd_state os, ns, peer_state;
3186 enum drbd_disk_state real_peer_disk;
3187 enum chg_state_flags cs_flags;
3190 peer_state.i = be32_to_cpu(p->state);
3192 real_peer_disk = peer_state.disk;
3193 if (peer_state.disk == D_NEGOTIATING) {
3194 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3195 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3198 spin_lock_irq(&mdev->req_lock);
3200 os = ns = mdev->state;
3201 spin_unlock_irq(&mdev->req_lock);
3203 /* peer says his disk is uptodate, while we think it is inconsistent,
3204 * and this happens while we think we have a sync going on. */
3205 if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
3206 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3207 /* If we are (becoming) SyncSource, but peer is still in sync
3208 * preparation, ignore its uptodate-ness to avoid flapping, it
3209 * will change to inconsistent once the peer reaches active
3211 * It may have changed syncer-paused flags, however, so we
3212 * cannot ignore this completely. */
3213 if (peer_state.conn > C_CONNECTED &&
3214 peer_state.conn < C_SYNC_SOURCE)
3215 real_peer_disk = D_INCONSISTENT;
3217 /* if peer_state changes to connected at the same time,
3218 * it explicitly notifies us that it finished resync.
3219 * Maybe we should finish it up, too? */
3220 else if (os.conn >= C_SYNC_SOURCE &&
3221 peer_state.conn == C_CONNECTED) {
3222 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3223 drbd_resync_finished(mdev);
3228 /* peer says his disk is inconsistent, while we think it is uptodate,
3229 * and this happens while the peer still thinks we have a sync going on,
3230 * but we think we are already done with the sync.
3231 * We ignore this to avoid flapping pdsk.
3232 * This should not happen, if the peer is a recent version of drbd. */
3233 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3234 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3235 real_peer_disk = D_UP_TO_DATE;
3237 if (ns.conn == C_WF_REPORT_PARAMS)
3238 ns.conn = C_CONNECTED;
3240 if (peer_state.conn == C_AHEAD)
3243 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3244 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3245 int cr; /* consider resync */
3247 /* if we established a new connection */
3248 cr = (os.conn < C_CONNECTED);
3249 /* if we had an established connection
3250 * and one of the nodes newly attaches a disk */
3251 cr |= (os.conn == C_CONNECTED &&
3252 (peer_state.disk == D_NEGOTIATING ||
3253 os.disk == D_NEGOTIATING));
3254 /* if we have both been inconsistent, and the peer has been
3255 * forced to be UpToDate with --overwrite-data */
3256 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3257 /* if we had been plain connected, and the admin requested to
3258 * start a sync by "invalidate" or "invalidate-remote" */
3259 cr |= (os.conn == C_CONNECTED &&
3260 (peer_state.conn >= C_STARTING_SYNC_S &&
3261 peer_state.conn <= C_WF_BITMAP_T));
3264 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3267 if (ns.conn == C_MASK) {
3268 ns.conn = C_CONNECTED;
3269 if (mdev->state.disk == D_NEGOTIATING) {
3270 drbd_force_state(mdev, NS(disk, D_FAILED));
3271 } else if (peer_state.disk == D_NEGOTIATING) {
3272 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3273 peer_state.disk = D_DISKLESS;
3274 real_peer_disk = D_DISKLESS;
3276 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3278 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3279 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3285 spin_lock_irq(&mdev->req_lock);
3286 if (mdev->state.i != os.i)
3288 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3289 ns.peer = peer_state.role;
3290 ns.pdsk = real_peer_disk;
3291 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3292 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3293 ns.disk = mdev->new_state_tmp.disk;
3294 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3295 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3296 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3297 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3298 for temporal network outages! */
3299 spin_unlock_irq(&mdev->req_lock);
3300 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3302 drbd_uuid_new_current(mdev);
3303 clear_bit(NEW_CUR_UUID, &mdev->flags);
3304 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3307 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3309 spin_unlock_irq(&mdev->req_lock);
3311 if (rv < SS_SUCCESS) {
3312 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3316 if (os.conn > C_WF_REPORT_PARAMS) {
3317 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3318 peer_state.disk != D_NEGOTIATING ) {
3319 /* we want resync, peer has not yet decided to sync... */
3320 /* Nowadays only used when forcing a node into primary role and
3321 setting its disk to UpToDate with that */
3322 drbd_send_uuids(mdev);
3323 drbd_send_state(mdev);
3327 mdev->net_conf->want_lose = 0;
3329 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3334 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3336 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3338 wait_event(mdev->misc_wait,
3339 mdev->state.conn == C_WF_SYNC_UUID ||
3340 mdev->state.conn == C_BEHIND ||
3341 mdev->state.conn < C_CONNECTED ||
3342 mdev->state.disk < D_NEGOTIATING);
3344 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3346 /* Here the _drbd_uuid_ functions are right, current should
3347 _not_ be rotated into the history */
3348 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3349 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3350 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3352 drbd_print_uuids(mdev, "updated sync uuid");
3353 drbd_start_resync(mdev, C_SYNC_TARGET);
3357 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3363 * receive_bitmap_plain
3365 * Return 0 when done, 1 when another iteration is needed, and a negative error
3366 * code upon failure.
3369 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3370 unsigned long *buffer, struct bm_xfer_ctx *c)
3372 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3373 unsigned want = num_words * sizeof(long);
3376 if (want != data_size) {
3377 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3382 err = drbd_recv(mdev, buffer, want);
3389 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3391 c->word_offset += num_words;
3392 c->bit_offset = c->word_offset * BITS_PER_LONG;
3393 if (c->bit_offset > c->bm_bits)
3394 c->bit_offset = c->bm_bits;
3402 * Return 0 when done, 1 when another iteration is needed, and a negative error
3403 * code upon failure.
3406 recv_bm_rle_bits(struct drbd_conf *mdev,
3407 struct p_compressed_bm *p,
3408 struct bm_xfer_ctx *c)
3410 struct bitstream bs;
3414 unsigned long s = c->bit_offset;
3416 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3417 int toggle = DCBP_get_start(p);
3421 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3423 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3427 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3428 bits = vli_decode_bits(&rl, look_ahead);
3434 if (e >= c->bm_bits) {
3435 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3438 _drbd_bm_set_bits(mdev, s, e);
3442 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3443 have, bits, look_ahead,
3444 (unsigned int)(bs.cur.b - p->code),
3445 (unsigned int)bs.buf_len);
3448 look_ahead >>= bits;
3451 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3454 look_ahead |= tmp << have;
3459 bm_xfer_ctx_bit_to_word_offset(c);
3461 return (s != c->bm_bits);
3467 * Return 0 when done, 1 when another iteration is needed, and a negative error
3468 * code upon failure.
3471 decode_bitmap_c(struct drbd_conf *mdev,
3472 struct p_compressed_bm *p,
3473 struct bm_xfer_ctx *c)
3475 if (DCBP_get_code(p) == RLE_VLI_Bits)
3476 return recv_bm_rle_bits(mdev, p, c);
3478 /* other variants had been implemented for evaluation,
3479 * but have been dropped as this one turned out to be "best"
3480 * during all our tests. */
3482 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3483 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3487 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3488 const char *direction, struct bm_xfer_ctx *c)
3490 /* what would it take to transfer it "plaintext" */
3491 unsigned plain = sizeof(struct p_header80) *
3492 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3493 + c->bm_words * sizeof(long);
3494 unsigned total = c->bytes[0] + c->bytes[1];
3497 /* total can not be zero. but just in case: */
3501 /* don't report if not compressed */
3505 /* total < plain. check for overflow, still */
3506 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3507 : (1000 * total / plain);
3513 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3514 "total %u; compression: %u.%u%%\n",
3516 c->bytes[1], c->packets[1],
3517 c->bytes[0], c->packets[0],
3518 total, r/10, r % 10);
3521 /* Since we are processing the bitfield from lower addresses to higher,
3522 it does not matter if the process it in 32 bit chunks or 64 bit
3523 chunks as long as it is little endian. (Understand it as byte stream,
3524 beginning with the lowest byte...) If we would use big endian
3525 we would need to process it from the highest address to the lowest,
3526 in order to be agnostic to the 32 vs 64 bits issue.
3528 returns 0 on failure, 1 if we successfully received it. */
3529 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3531 struct bm_xfer_ctx c;
3535 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3537 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3538 /* you are supposed to send additional out-of-sync information
3539 * if you actually set bits during this phase */
3541 /* maybe we should use some per thread scratch page,
3542 * and allocate that during initial device creation? */
3543 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3545 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3549 c = (struct bm_xfer_ctx) {
3550 .bm_bits = drbd_bm_bits(mdev),
3551 .bm_words = drbd_bm_words(mdev),
3555 if (cmd == P_BITMAP) {
3556 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
3557 } else if (cmd == P_COMPRESSED_BITMAP) {
3558 /* MAYBE: sanity check that we speak proto >= 90,
3559 * and the feature is enabled! */
3560 struct p_compressed_bm *p;
3562 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3563 dev_err(DEV, "ReportCBitmap packet too large\n");
3566 /* use the page buff */
3568 memcpy(p, h, sizeof(*h));
3569 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3571 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3572 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3575 err = decode_bitmap_c(mdev, p, &c);
3577 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3581 c.packets[cmd == P_BITMAP]++;
3582 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3589 if (!drbd_recv_header(mdev, &cmd, &data_size))
3593 INFO_bm_xfer_stats(mdev, "receive", &c);
3595 if (mdev->state.conn == C_WF_BITMAP_T) {
3596 enum drbd_state_rv rv;
3598 ok = !drbd_send_bitmap(mdev);
3601 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3602 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3603 D_ASSERT(rv == SS_SUCCESS);
3604 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3605 /* admin may have requested C_DISCONNECTING,
3606 * other threads may have noticed network errors */
3607 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3608 drbd_conn_str(mdev->state.conn));
3613 drbd_bm_unlock(mdev);
3614 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3615 drbd_start_resync(mdev, C_SYNC_SOURCE);
3616 free_page((unsigned long) buffer);
3620 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3622 /* TODO zero copy sink :) */
3623 static char sink[128];
3626 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3631 want = min_t(int, size, sizeof(sink));
3632 r = drbd_recv(mdev, sink, want);
3633 ERR_IF(r <= 0) break;
3639 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3641 /* Make sure we've acked all the TCP data associated
3642 * with the data requests being unplugged */
3643 drbd_tcp_quickack(mdev->data.socket);
3648 static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3650 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3652 switch (mdev->state.conn) {
3653 case C_WF_SYNC_UUID:
3658 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3659 drbd_conn_str(mdev->state.conn));
3662 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3667 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3672 drbd_cmd_handler_f function;
3675 static struct data_cmd drbd_cmd_handler[] = {
3676 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3677 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3678 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3679 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3680 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3681 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3682 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3683 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3684 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3685 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3686 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3687 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3688 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3689 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3690 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3691 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3692 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3693 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3694 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3695 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3696 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3697 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
3698 /* anything missing from this table is in
3699 * the asender_tbl, see get_asender_cmd */
3700 [P_MAX_CMD] = { 0, 0, NULL },
3703 /* All handler functions that expect a sub-header get that sub-heder in
3704 mdev->data.rbuf.header.head.payload.
3706 Usually in mdev->data.rbuf.header.head the callback can find the usual
3707 p_header, but they may not rely on that. Since there is also p_header95 !
3710 static void drbdd(struct drbd_conf *mdev)
3712 union p_header *header = &mdev->data.rbuf.header;
3713 unsigned int packet_size;
3714 enum drbd_packets cmd;
3715 size_t shs; /* sub header size */
3718 while (get_t_state(&mdev->receiver) == Running) {
3719 drbd_thread_current_set_cpu(mdev);
3720 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3723 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3724 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3728 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3729 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3730 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3735 rv = drbd_recv(mdev, &header->h80.payload, shs);
3736 if (unlikely(rv != shs)) {
3737 if (!signal_pending(current))
3738 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
3743 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3745 if (unlikely(!rv)) {
3746 dev_err(DEV, "error receiving %s, l: %d!\n",
3747 cmdname(cmd), packet_size);
3754 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3756 /* If we leave here, we probably want to update at least the
3757 * "Connected" indicator on stable storage. Do so explicitly here. */
3761 void drbd_flush_workqueue(struct drbd_conf *mdev)
3763 struct drbd_wq_barrier barr;
3765 barr.w.cb = w_prev_work_done;
3766 init_completion(&barr.done);
3767 drbd_queue_work(&mdev->data.work, &barr.w);
3768 wait_for_completion(&barr.done);
3771 static void drbd_disconnect(struct drbd_conf *mdev)
3773 enum drbd_fencing_p fp;
3774 union drbd_state os, ns;
3775 int rv = SS_UNKNOWN_ERROR;
3778 if (mdev->state.conn == C_STANDALONE)
3781 /* asender does not clean up anything. it must not interfere, either */
3782 drbd_thread_stop(&mdev->asender);
3783 drbd_free_sock(mdev);
3785 /* wait for current activity to cease. */
3786 spin_lock_irq(&mdev->req_lock);
3787 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3788 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3789 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3790 spin_unlock_irq(&mdev->req_lock);
3792 /* We do not have data structures that would allow us to
3793 * get the rs_pending_cnt down to 0 again.
3794 * * On C_SYNC_TARGET we do not have any data structures describing
3795 * the pending RSDataRequest's we have sent.
3796 * * On C_SYNC_SOURCE there is no data structure that tracks
3797 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3798 * And no, it is not the sum of the reference counts in the
3799 * resync_LRU. The resync_LRU tracks the whole operation including
3800 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3802 drbd_rs_cancel_all(mdev);
3804 mdev->rs_failed = 0;
3805 atomic_set(&mdev->rs_pending_cnt, 0);
3806 wake_up(&mdev->misc_wait);
3808 del_timer(&mdev->request_timer);
3810 /* make sure syncer is stopped and w_resume_next_sg queued */
3811 del_timer_sync(&mdev->resync_timer);
3812 resync_timer_fn((unsigned long)mdev);
3814 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3815 * w_make_resync_request etc. which may still be on the worker queue
3816 * to be "canceled" */
3817 drbd_flush_workqueue(mdev);
3819 /* This also does reclaim_net_ee(). If we do this too early, we might
3820 * miss some resync ee and pages.*/
3821 drbd_process_done_ee(mdev);
3823 kfree(mdev->p_uuid);
3824 mdev->p_uuid = NULL;
3826 if (!is_susp(mdev->state))
3829 dev_info(DEV, "Connection closed\n");
3834 if (get_ldev(mdev)) {
3835 fp = mdev->ldev->dc.fencing;
3839 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3840 drbd_try_outdate_peer_async(mdev);
3842 spin_lock_irq(&mdev->req_lock);
3844 if (os.conn >= C_UNCONNECTED) {
3845 /* Do not restart in case we are C_DISCONNECTING */
3847 ns.conn = C_UNCONNECTED;
3848 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3850 spin_unlock_irq(&mdev->req_lock);
3852 if (os.conn == C_DISCONNECTING) {
3853 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3855 crypto_free_hash(mdev->cram_hmac_tfm);
3856 mdev->cram_hmac_tfm = NULL;
3858 kfree(mdev->net_conf);
3859 mdev->net_conf = NULL;
3860 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3863 /* serialize with bitmap writeout triggered by the state change,
3865 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3867 /* tcp_close and release of sendpage pages can be deferred. I don't
3868 * want to use SO_LINGER, because apparently it can be deferred for
3869 * more than 20 seconds (longest time I checked).
3871 * Actually we don't care for exactly when the network stack does its
3872 * put_page(), but release our reference on these pages right here.
3874 i = drbd_release_ee(mdev, &mdev->net_ee);
3876 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3877 i = atomic_read(&mdev->pp_in_use_by_net);
3879 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3880 i = atomic_read(&mdev->pp_in_use);
3882 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3884 D_ASSERT(list_empty(&mdev->read_ee));
3885 D_ASSERT(list_empty(&mdev->active_ee));
3886 D_ASSERT(list_empty(&mdev->sync_ee));
3887 D_ASSERT(list_empty(&mdev->done_ee));
3889 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3890 atomic_set(&mdev->current_epoch->epoch_size, 0);
3891 D_ASSERT(list_empty(&mdev->current_epoch->list));
3895 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3896 * we can agree on is stored in agreed_pro_version.
3898 * feature flags and the reserved array should be enough room for future
3899 * enhancements of the handshake protocol, and possible plugins...
3901 * for now, they are expected to be zero, but ignored.
3903 static int drbd_send_handshake(struct drbd_conf *mdev)
3905 /* ASSERT current == mdev->receiver ... */
3906 struct p_handshake *p = &mdev->data.sbuf.handshake;
3909 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3910 dev_err(DEV, "interrupted during initial handshake\n");
3911 return 0; /* interrupted. not ok. */
3914 if (mdev->data.socket == NULL) {
3915 mutex_unlock(&mdev->data.mutex);
3919 memset(p, 0, sizeof(*p));
3920 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3921 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3922 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3923 (struct p_header80 *)p, sizeof(*p), 0 );
3924 mutex_unlock(&mdev->data.mutex);
3930 * 1 yes, we have a valid connection
3931 * 0 oops, did not work out, please try again
3932 * -1 peer talks different language,
3933 * no point in trying again, please go standalone.
3935 static int drbd_do_handshake(struct drbd_conf *mdev)
3937 /* ASSERT current == mdev->receiver ... */
3938 struct p_handshake *p = &mdev->data.rbuf.handshake;
3939 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3940 unsigned int length;
3941 enum drbd_packets cmd;
3944 rv = drbd_send_handshake(mdev);
3948 rv = drbd_recv_header(mdev, &cmd, &length);
3952 if (cmd != P_HAND_SHAKE) {
3953 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3958 if (length != expect) {
3959 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3964 rv = drbd_recv(mdev, &p->head.payload, expect);
3967 if (!signal_pending(current))
3968 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
3972 p->protocol_min = be32_to_cpu(p->protocol_min);
3973 p->protocol_max = be32_to_cpu(p->protocol_max);
3974 if (p->protocol_max == 0)
3975 p->protocol_max = p->protocol_min;
3977 if (PRO_VERSION_MAX < p->protocol_min ||
3978 PRO_VERSION_MIN > p->protocol_max)
3981 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3983 dev_info(DEV, "Handshake successful: "
3984 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3989 dev_err(DEV, "incompatible DRBD dialects: "
3990 "I support %d-%d, peer supports %d-%d\n",
3991 PRO_VERSION_MIN, PRO_VERSION_MAX,
3992 p->protocol_min, p->protocol_max);
3996 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3997 static int drbd_do_auth(struct drbd_conf *mdev)
3999 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4000 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4004 #define CHALLENGE_LEN 64
4008 0 - failed, try again (network error),
4009 -1 - auth failed, don't try again.
4012 static int drbd_do_auth(struct drbd_conf *mdev)
4014 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4015 struct scatterlist sg;
4016 char *response = NULL;
4017 char *right_response = NULL;
4018 char *peers_ch = NULL;
4019 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4020 unsigned int resp_size;
4021 struct hash_desc desc;
4022 enum drbd_packets cmd;
4023 unsigned int length;
4026 desc.tfm = mdev->cram_hmac_tfm;
4029 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4030 (u8 *)mdev->net_conf->shared_secret, key_len);
4032 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4037 get_random_bytes(my_challenge, CHALLENGE_LEN);
4039 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4043 rv = drbd_recv_header(mdev, &cmd, &length);
4047 if (cmd != P_AUTH_CHALLENGE) {
4048 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4054 if (length > CHALLENGE_LEN * 2) {
4055 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4060 peers_ch = kmalloc(length, GFP_NOIO);
4061 if (peers_ch == NULL) {
4062 dev_err(DEV, "kmalloc of peers_ch failed\n");
4067 rv = drbd_recv(mdev, peers_ch, length);
4070 if (!signal_pending(current))
4071 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
4076 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4077 response = kmalloc(resp_size, GFP_NOIO);
4078 if (response == NULL) {
4079 dev_err(DEV, "kmalloc of response failed\n");
4084 sg_init_table(&sg, 1);
4085 sg_set_buf(&sg, peers_ch, length);
4087 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4089 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4094 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4098 rv = drbd_recv_header(mdev, &cmd, &length);
4102 if (cmd != P_AUTH_RESPONSE) {
4103 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4109 if (length != resp_size) {
4110 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4115 rv = drbd_recv(mdev, response , resp_size);
4117 if (rv != resp_size) {
4118 if (!signal_pending(current))
4119 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4124 right_response = kmalloc(resp_size, GFP_NOIO);
4125 if (right_response == NULL) {
4126 dev_err(DEV, "kmalloc of right_response failed\n");
4131 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4133 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4135 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4140 rv = !memcmp(response, right_response, resp_size);
4143 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4144 resp_size, mdev->net_conf->cram_hmac_alg);
4151 kfree(right_response);
4157 int drbdd_init(struct drbd_thread *thi)
4159 struct drbd_conf *mdev = thi->mdev;
4160 unsigned int minor = mdev_to_minor(mdev);
4163 sprintf(current->comm, "drbd%d_receiver", minor);
4165 dev_info(DEV, "receiver (re)started\n");
4168 h = drbd_connect(mdev);
4170 drbd_disconnect(mdev);
4171 schedule_timeout_interruptible(HZ);
4174 dev_warn(DEV, "Discarding network configuration.\n");
4175 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4180 if (get_net_conf(mdev)) {
4186 drbd_disconnect(mdev);
4188 dev_info(DEV, "receiver terminated\n");
4192 /* ********* acknowledge sender ******** */
4194 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4196 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4198 int retcode = be32_to_cpu(p->retcode);
4200 if (retcode >= SS_SUCCESS) {
4201 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4203 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4204 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4205 drbd_set_st_err_str(retcode), retcode);
4207 wake_up(&mdev->state_wait);
4212 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4214 return drbd_send_ping_ack(mdev);
4218 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4220 /* restore idle timeout */
4221 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4222 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4223 wake_up(&mdev->misc_wait);
4228 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4230 struct p_block_ack *p = (struct p_block_ack *)h;
4231 sector_t sector = be64_to_cpu(p->sector);
4232 int blksize = be32_to_cpu(p->blksize);
4234 D_ASSERT(mdev->agreed_pro_version >= 89);
4236 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4238 if (get_ldev(mdev)) {
4239 drbd_rs_complete_io(mdev, sector);
4240 drbd_set_in_sync(mdev, sector, blksize);
4241 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4242 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4245 dec_rs_pending(mdev);
4246 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4252 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4253 struct rb_root *root, const char *func,
4254 enum drbd_req_event what, bool missing_ok)
4256 struct drbd_request *req;
4257 struct bio_and_error m;
4259 spin_lock_irq(&mdev->req_lock);
4260 req = find_request(mdev, root, id, sector, missing_ok, func);
4261 if (unlikely(!req)) {
4262 spin_unlock_irq(&mdev->req_lock);
4265 __req_mod(req, what, &m);
4266 spin_unlock_irq(&mdev->req_lock);
4269 complete_master_bio(mdev, &m);
4273 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4275 struct p_block_ack *p = (struct p_block_ack *)h;
4276 sector_t sector = be64_to_cpu(p->sector);
4277 int blksize = be32_to_cpu(p->blksize);
4278 enum drbd_req_event what;
4280 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4282 if (p->block_id == ID_SYNCER) {
4283 drbd_set_in_sync(mdev, sector, blksize);
4284 dec_rs_pending(mdev);
4287 switch (be16_to_cpu(h->command)) {
4288 case P_RS_WRITE_ACK:
4289 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4290 what = write_acked_by_peer_and_sis;
4293 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4294 what = write_acked_by_peer;
4297 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4298 what = recv_acked_by_peer;
4301 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4302 what = conflict_discarded_by_peer;
4309 return validate_req_change_req_state(mdev, p->block_id, sector,
4310 &mdev->write_requests, __func__,
4314 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4316 struct p_block_ack *p = (struct p_block_ack *)h;
4317 sector_t sector = be64_to_cpu(p->sector);
4318 int size = be32_to_cpu(p->blksize);
4319 bool missing_ok = mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4320 mdev->net_conf->wire_protocol == DRBD_PROT_B;
4323 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4325 if (p->block_id == ID_SYNCER) {
4326 dec_rs_pending(mdev);
4327 drbd_rs_failed_io(mdev, sector, size);
4331 found = validate_req_change_req_state(mdev, p->block_id, sector,
4332 &mdev->write_requests, __func__,
4333 neg_acked, missing_ok);
4335 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4336 The master bio might already be completed, therefore the
4337 request is no longer in the collision hash. */
4338 /* In Protocol B we might already have got a P_RECV_ACK
4339 but then get a P_NEG_ACK afterwards. */
4342 drbd_set_out_of_sync(mdev, sector, size);
4347 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4349 struct p_block_ack *p = (struct p_block_ack *)h;
4350 sector_t sector = be64_to_cpu(p->sector);
4352 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4353 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4354 (unsigned long long)sector, be32_to_cpu(p->blksize));
4356 return validate_req_change_req_state(mdev, p->block_id, sector,
4357 &mdev->read_requests, __func__,
4361 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4365 struct p_block_ack *p = (struct p_block_ack *)h;
4367 sector = be64_to_cpu(p->sector);
4368 size = be32_to_cpu(p->blksize);
4370 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4372 dec_rs_pending(mdev);
4374 if (get_ldev_if_state(mdev, D_FAILED)) {
4375 drbd_rs_complete_io(mdev, sector);
4376 switch (be16_to_cpu(h->command)) {
4377 case P_NEG_RS_DREPLY:
4378 drbd_rs_failed_io(mdev, sector, size);
4392 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4394 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4396 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4398 if (mdev->state.conn == C_AHEAD &&
4399 atomic_read(&mdev->ap_in_flight) == 0 &&
4400 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4401 mdev->start_resync_timer.expires = jiffies + HZ;
4402 add_timer(&mdev->start_resync_timer);
4408 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4410 struct p_block_ack *p = (struct p_block_ack *)h;
4411 struct drbd_work *w;
4415 sector = be64_to_cpu(p->sector);
4416 size = be32_to_cpu(p->blksize);
4418 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4420 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4421 drbd_ov_oos_found(mdev, sector, size);
4425 if (!get_ldev(mdev))
4428 drbd_rs_complete_io(mdev, sector);
4429 dec_rs_pending(mdev);
4433 /* let's advance progress step marks only for every other megabyte */
4434 if ((mdev->ov_left & 0x200) == 0x200)
4435 drbd_advance_rs_marks(mdev, mdev->ov_left);
4437 if (mdev->ov_left == 0) {
4438 w = kmalloc(sizeof(*w), GFP_NOIO);
4440 w->cb = w_ov_finished;
4441 drbd_queue_work_front(&mdev->data.work, w);
4443 dev_err(DEV, "kmalloc(w) failed.");
4445 drbd_resync_finished(mdev);
4452 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4457 struct asender_cmd {
4459 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4462 static struct asender_cmd *get_asender_cmd(int cmd)
4464 static struct asender_cmd asender_tbl[] = {
4465 /* anything missing from this table is in
4466 * the drbd_cmd_handler (drbd_default_handler) table,
4467 * see the beginning of drbdd() */
4468 [P_PING] = { sizeof(struct p_header80), got_Ping },
4469 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4470 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4471 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4472 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4473 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4474 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4475 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4476 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4477 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4478 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4479 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4480 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4481 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4482 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
4483 [P_MAX_CMD] = { 0, NULL },
4485 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4487 return &asender_tbl[cmd];
4490 int drbd_asender(struct drbd_thread *thi)
4492 struct drbd_conf *mdev = thi->mdev;
4493 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4494 struct asender_cmd *cmd = NULL;
4499 int expect = sizeof(struct p_header80);
4501 int ping_timeout_active = 0;
4503 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4505 current->policy = SCHED_RR; /* Make this a realtime task! */
4506 current->rt_priority = 2; /* more important than all other tasks */
4508 while (get_t_state(thi) == Running) {
4509 drbd_thread_current_set_cpu(mdev);
4510 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4511 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4512 mdev->meta.socket->sk->sk_rcvtimeo =
4513 mdev->net_conf->ping_timeo*HZ/10;
4514 ping_timeout_active = 1;
4517 /* conditionally cork;
4518 * it may hurt latency if we cork without much to send */
4519 if (!mdev->net_conf->no_cork &&
4520 3 < atomic_read(&mdev->unacked_cnt))
4521 drbd_tcp_cork(mdev->meta.socket);
4523 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4524 flush_signals(current);
4525 if (!drbd_process_done_ee(mdev))
4527 /* to avoid race with newly queued ACKs */
4528 set_bit(SIGNAL_ASENDER, &mdev->flags);
4529 spin_lock_irq(&mdev->req_lock);
4530 empty = list_empty(&mdev->done_ee);
4531 spin_unlock_irq(&mdev->req_lock);
4532 /* new ack may have been queued right here,
4533 * but then there is also a signal pending,
4534 * and we start over... */
4538 /* but unconditionally uncork unless disabled */
4539 if (!mdev->net_conf->no_cork)
4540 drbd_tcp_uncork(mdev->meta.socket);
4542 /* short circuit, recv_msg would return EINTR anyways. */
4543 if (signal_pending(current))
4546 rv = drbd_recv_short(mdev, mdev->meta.socket,
4547 buf, expect-received, 0);
4548 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4550 flush_signals(current);
4553 * -EINTR (on meta) we got a signal
4554 * -EAGAIN (on meta) rcvtimeo expired
4555 * -ECONNRESET other side closed the connection
4556 * -ERESTARTSYS (on data) we got a signal
4557 * rv < 0 other than above: unexpected error!
4558 * rv == expected: full header or command
4559 * rv < expected: "woken" by signal during receive
4560 * rv == 0 : "connection shut down by peer"
4562 if (likely(rv > 0)) {
4565 } else if (rv == 0) {
4566 dev_err(DEV, "meta connection shut down by peer.\n");
4568 } else if (rv == -EAGAIN) {
4569 /* If the data socket received something meanwhile,
4570 * that is good enough: peer is still alive. */
4571 if (time_after(mdev->last_received,
4572 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4574 if (ping_timeout_active) {
4575 dev_err(DEV, "PingAck did not arrive in time.\n");
4578 set_bit(SEND_PING, &mdev->flags);
4580 } else if (rv == -EINTR) {
4583 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4587 if (received == expect && cmd == NULL) {
4588 if (unlikely(h->magic != cpu_to_be32(DRBD_MAGIC))) {
4589 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4590 be32_to_cpu(h->magic),
4591 be16_to_cpu(h->command),
4592 be16_to_cpu(h->length));
4595 cmd = get_asender_cmd(be16_to_cpu(h->command));
4596 len = be16_to_cpu(h->length);
4597 if (unlikely(cmd == NULL)) {
4598 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4599 be32_to_cpu(h->magic),
4600 be16_to_cpu(h->command),
4601 be16_to_cpu(h->length));
4604 expect = cmd->pkt_size;
4605 ERR_IF(len != expect-sizeof(struct p_header80))
4608 if (received == expect) {
4609 mdev->last_received = jiffies;
4610 D_ASSERT(cmd != NULL);
4611 if (!cmd->process(mdev, h))
4614 /* the idle_timeout (ping-int)
4615 * has been restored in got_PingAck() */
4616 if (cmd == get_asender_cmd(P_PING_ACK))
4617 ping_timeout_active = 0;
4621 expect = sizeof(struct p_header80);
4628 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4633 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4636 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4638 D_ASSERT(mdev->state.conn < C_CONNECTED);
4639 dev_info(DEV, "asender terminated\n");