4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/string.h>
46 #include <linux/scatterlist.h>
54 struct drbd_epoch *epoch;
63 static int drbd_do_handshake(struct drbd_conf *mdev);
64 static int drbd_do_auth(struct drbd_conf *mdev);
66 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
69 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
76 spin_unlock(&mdev->epoch_lock);
80 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
83 * some helper functions to deal with single linked page lists,
84 * page->private being our "next" pointer.
87 /* If at least n pages are linked at head, get n pages off.
88 * Otherwise, don't modify head, and return NULL.
89 * Locking is the responsibility of the caller.
91 static struct page *page_chain_del(struct page **head, int n)
105 tmp = page_chain_next(page);
107 break; /* found sufficient pages */
109 /* insufficient pages, don't use any of them. */
114 /* add end of list marker for the returned list */
115 set_page_private(page, 0);
116 /* actual return value, and adjustment of head */
122 /* may be used outside of locks to find the tail of a (usually short)
123 * "private" page chain, before adding it back to a global chain head
124 * with page_chain_add() under a spinlock. */
125 static struct page *page_chain_tail(struct page *page, int *len)
129 while ((tmp = page_chain_next(page)))
136 static int page_chain_free(struct page *page)
140 page_chain_for_each_safe(page, tmp) {
147 static void page_chain_add(struct page **head,
148 struct page *chain_first, struct page *chain_last)
152 tmp = page_chain_tail(chain_first, NULL);
153 BUG_ON(tmp != chain_last);
156 /* add chain to head */
157 set_page_private(chain_last, (unsigned long)*head);
161 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
163 struct page *page = NULL;
164 struct page *tmp = NULL;
167 /* Yes, testing drbd_pp_vacant outside the lock is racy.
168 * So what. It saves a spin_lock. */
169 if (drbd_pp_vacant >= number) {
170 spin_lock(&drbd_pp_lock);
171 page = page_chain_del(&drbd_pp_pool, number);
173 drbd_pp_vacant -= number;
174 spin_unlock(&drbd_pp_lock);
179 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
180 * "criss-cross" setup, that might cause write-out on some other DRBD,
181 * which in turn might block on the other node at this very place. */
182 for (i = 0; i < number; i++) {
183 tmp = alloc_page(GFP_TRY);
186 set_page_private(tmp, (unsigned long)page);
193 /* Not enough pages immediately available this time.
194 * No need to jump around here, drbd_pp_alloc will retry this
195 * function "soon". */
197 tmp = page_chain_tail(page, NULL);
198 spin_lock(&drbd_pp_lock);
199 page_chain_add(&drbd_pp_pool, page, tmp);
201 spin_unlock(&drbd_pp_lock);
206 /* kick lower level device, if we have more than (arbitrary number)
207 * reference counts on it, which typically are locally submitted io
208 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
209 static void maybe_kick_lo(struct drbd_conf *mdev)
211 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
215 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
217 struct drbd_epoch_entry *e;
218 struct list_head *le, *tle;
220 /* The EEs are always appended to the end of the list. Since
221 they are sent in order over the wire, they have to finish
222 in order. As soon as we see the first not finished we can
223 stop to examine the list... */
225 list_for_each_safe(le, tle, &mdev->net_ee) {
226 e = list_entry(le, struct drbd_epoch_entry, w.list);
227 if (drbd_ee_has_active_page(e))
229 list_move(le, to_be_freed);
233 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
235 LIST_HEAD(reclaimed);
236 struct drbd_epoch_entry *e, *t;
239 spin_lock_irq(&mdev->req_lock);
240 reclaim_net_ee(mdev, &reclaimed);
241 spin_unlock_irq(&mdev->req_lock);
243 list_for_each_entry_safe(e, t, &reclaimed, w.list)
244 drbd_free_net_ee(mdev, e);
248 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
249 * @mdev: DRBD device.
250 * @number: number of pages requested
251 * @retry: whether to retry, if not enough pages are available right now
253 * Tries to allocate number pages, first from our own page pool, then from
254 * the kernel, unless this allocation would exceed the max_buffers setting.
255 * Possibly retry until DRBD frees sufficient pages somewhere else.
257 * Returns a page chain linked via page->private.
259 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
261 struct page *page = NULL;
264 /* Yes, we may run up to @number over max_buffers. If we
265 * follow it strictly, the admin will get it wrong anyways. */
266 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
267 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
269 while (page == NULL) {
270 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
272 drbd_kick_lo_and_reclaim_net(mdev);
274 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
275 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
283 if (signal_pending(current)) {
284 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
290 finish_wait(&drbd_pp_wait, &wait);
293 atomic_add(number, &mdev->pp_in_use);
297 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
298 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299 * Either links the page chain back to the global pool,
300 * or returns all pages to the system. */
301 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
303 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
306 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
307 i = page_chain_free(page);
310 tmp = page_chain_tail(page, &i);
311 spin_lock(&drbd_pp_lock);
312 page_chain_add(&drbd_pp_pool, page, tmp);
314 spin_unlock(&drbd_pp_lock);
316 i = atomic_sub_return(i, a);
318 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
319 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
320 wake_up(&drbd_pp_wait);
324 You need to hold the req_lock:
325 _drbd_wait_ee_list_empty()
327 You must not have the req_lock:
333 drbd_process_done_ee()
335 drbd_wait_ee_list_empty()
338 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
341 unsigned int data_size,
342 gfp_t gfp_mask) __must_hold(local)
344 struct drbd_epoch_entry *e;
346 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
348 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
351 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
353 if (!(gfp_mask & __GFP_NOWARN))
354 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
358 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
362 INIT_HLIST_NODE(&e->colision);
366 atomic_set(&e->pending_bios, 0);
375 mempool_free(e, drbd_ee_mempool);
379 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
381 if (e->flags & EE_HAS_DIGEST)
383 drbd_pp_free(mdev, e->pages, is_net);
384 D_ASSERT(atomic_read(&e->pending_bios) == 0);
385 D_ASSERT(hlist_unhashed(&e->colision));
386 mempool_free(e, drbd_ee_mempool);
389 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
391 LIST_HEAD(work_list);
392 struct drbd_epoch_entry *e, *t;
394 int is_net = list == &mdev->net_ee;
396 spin_lock_irq(&mdev->req_lock);
397 list_splice_init(list, &work_list);
398 spin_unlock_irq(&mdev->req_lock);
400 list_for_each_entry_safe(e, t, &work_list, w.list) {
401 drbd_free_some_ee(mdev, e, is_net);
409 * This function is called from _asender only_
410 * but see also comments in _req_mod(,barrier_acked)
411 * and receive_Barrier.
413 * Move entries from net_ee to done_ee, if ready.
414 * Grab done_ee, call all callbacks, free the entries.
415 * The callbacks typically send out ACKs.
417 static int drbd_process_done_ee(struct drbd_conf *mdev)
419 LIST_HEAD(work_list);
420 LIST_HEAD(reclaimed);
421 struct drbd_epoch_entry *e, *t;
422 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
424 spin_lock_irq(&mdev->req_lock);
425 reclaim_net_ee(mdev, &reclaimed);
426 list_splice_init(&mdev->done_ee, &work_list);
427 spin_unlock_irq(&mdev->req_lock);
429 list_for_each_entry_safe(e, t, &reclaimed, w.list)
430 drbd_free_net_ee(mdev, e);
432 /* possible callbacks here:
433 * e_end_block, and e_end_resync_block, e_send_discard_ack.
434 * all ignore the last argument.
436 list_for_each_entry_safe(e, t, &work_list, w.list) {
437 /* list_del not necessary, next/prev members not touched */
438 ok = e->w.cb(mdev, &e->w, !ok) && ok;
439 drbd_free_ee(mdev, e);
441 wake_up(&mdev->ee_wait);
446 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454 spin_unlock_irq(&mdev->req_lock);
457 finish_wait(&mdev->ee_wait, &wait);
458 spin_lock_irq(&mdev->req_lock);
462 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
464 spin_lock_irq(&mdev->req_lock);
465 _drbd_wait_ee_list_empty(mdev, head);
466 spin_unlock_irq(&mdev->req_lock);
469 /* see also kernel_accept; which is only present since 2.6.18.
470 * also we want to log which part of it failed, exactly */
471 static int drbd_accept(struct drbd_conf *mdev, const char **what,
472 struct socket *sock, struct socket **newsock)
474 struct sock *sk = sock->sk;
478 err = sock->ops->listen(sock, 5);
482 *what = "sock_create_lite";
483 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
489 err = sock->ops->accept(sock, *newsock, 0);
491 sock_release(*newsock);
495 (*newsock)->ops = sock->ops;
501 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
502 void *buf, size_t size, int flags)
509 struct msghdr msg = {
511 .msg_iov = (struct iovec *)&iov,
512 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
518 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
524 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
531 struct msghdr msg = {
533 .msg_iov = (struct iovec *)&iov,
534 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
542 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
547 * ECONNRESET other side closed the connection
548 * ERESTARTSYS (on sock) we got a signal
552 if (rv == -ECONNRESET)
553 dev_info(DEV, "sock was reset by peer\n");
554 else if (rv != -ERESTARTSYS)
555 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
557 } else if (rv == 0) {
558 dev_info(DEV, "sock was shut down by peer\n");
561 /* signal came in, or peer/link went down,
562 * after we read a partial message
564 /* D_ASSERT(signal_pending(current)); */
572 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
578 * On individual connections, the socket buffer size must be set prior to the
579 * listen(2) or connect(2) calls in order to have it take effect.
580 * This is our wrapper to do so.
582 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
585 /* open coded SO_SNDBUF, SO_RCVBUF */
587 sock->sk->sk_sndbuf = snd;
588 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
591 sock->sk->sk_rcvbuf = rcv;
592 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
596 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
600 struct sockaddr_in6 src_in6;
602 int disconnect_on_error = 1;
604 if (!get_net_conf(mdev))
607 what = "sock_create_kern";
608 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
609 SOCK_STREAM, IPPROTO_TCP, &sock);
615 sock->sk->sk_rcvtimeo =
616 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
617 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
618 mdev->net_conf->rcvbuf_size);
620 /* explicitly bind to the configured IP as source IP
621 * for the outgoing connections.
622 * This is needed for multihomed hosts and to be
623 * able to use lo: interfaces for drbd.
624 * Make sure to use 0 as port number, so linux selects
625 * a free one dynamically.
627 memcpy(&src_in6, mdev->net_conf->my_addr,
628 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
629 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
630 src_in6.sin6_port = 0;
632 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
634 what = "bind before connect";
635 err = sock->ops->bind(sock,
636 (struct sockaddr *) &src_in6,
637 mdev->net_conf->my_addr_len);
641 /* connect may fail, peer not yet available.
642 * stay C_WF_CONNECTION, don't go Disconnecting! */
643 disconnect_on_error = 0;
645 err = sock->ops->connect(sock,
646 (struct sockaddr *)mdev->net_conf->peer_addr,
647 mdev->net_conf->peer_addr_len, 0);
656 /* timeout, busy, signal pending */
657 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658 case EINTR: case ERESTARTSYS:
659 /* peer not (yet) available, network problem */
660 case ECONNREFUSED: case ENETUNREACH:
661 case EHOSTDOWN: case EHOSTUNREACH:
662 disconnect_on_error = 0;
665 dev_err(DEV, "%s failed, err = %d\n", what, err);
667 if (disconnect_on_error)
668 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
674 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
677 struct socket *s_estab = NULL, *s_listen;
680 if (!get_net_conf(mdev))
683 what = "sock_create_kern";
684 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
685 SOCK_STREAM, IPPROTO_TCP, &s_listen);
691 timeo = mdev->net_conf->try_connect_int * HZ;
692 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
694 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
695 s_listen->sk->sk_rcvtimeo = timeo;
696 s_listen->sk->sk_sndtimeo = timeo;
697 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
698 mdev->net_conf->rcvbuf_size);
700 what = "bind before listen";
701 err = s_listen->ops->bind(s_listen,
702 (struct sockaddr *) mdev->net_conf->my_addr,
703 mdev->net_conf->my_addr_len);
707 err = drbd_accept(mdev, &what, s_listen, &s_estab);
711 sock_release(s_listen);
713 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
714 dev_err(DEV, "%s failed, err = %d\n", what, err);
715 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
723 static int drbd_send_fp(struct drbd_conf *mdev,
724 struct socket *sock, enum drbd_packets cmd)
726 struct p_header80 *h = &mdev->data.sbuf.header.h80;
728 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
731 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
733 struct p_header80 *h = &mdev->data.rbuf.header.h80;
736 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
738 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
739 return be16_to_cpu(h->command);
745 * drbd_socket_okay() - Free the socket if its connection is not okay
746 * @mdev: DRBD device.
747 * @sock: pointer to the pointer to the socket.
749 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
757 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
759 if (rr > 0 || rr == -EAGAIN) {
770 * 1 yes, we have a valid connection
771 * 0 oops, did not work out, please try again
772 * -1 peer talks different language,
773 * no point in trying again, please go standalone.
774 * -2 We do not have a network config...
776 static int drbd_connect(struct drbd_conf *mdev)
778 struct socket *s, *sock, *msock;
781 D_ASSERT(!mdev->data.socket);
783 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
786 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
793 /* 3 tries, this should take less than a second! */
794 s = drbd_try_connect(mdev);
797 /* give the other side time to call bind() & listen() */
798 __set_current_state(TASK_INTERRUPTIBLE);
799 schedule_timeout(HZ / 10);
804 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
808 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
812 dev_err(DEV, "Logic error in drbd_connect()\n");
813 goto out_release_sockets;
818 __set_current_state(TASK_INTERRUPTIBLE);
819 schedule_timeout(HZ / 10);
820 ok = drbd_socket_okay(mdev, &sock);
821 ok = drbd_socket_okay(mdev, &msock) && ok;
827 s = drbd_wait_for_connect(mdev);
829 try = drbd_recv_fp(mdev, s);
830 drbd_socket_okay(mdev, &sock);
831 drbd_socket_okay(mdev, &msock);
835 dev_warn(DEV, "initial packet S crossed\n");
842 dev_warn(DEV, "initial packet M crossed\n");
846 set_bit(DISCARD_CONCURRENT, &mdev->flags);
849 dev_warn(DEV, "Error receiving initial packet\n");
856 if (mdev->state.conn <= C_DISCONNECTING)
857 goto out_release_sockets;
858 if (signal_pending(current)) {
859 flush_signals(current);
861 if (get_t_state(&mdev->receiver) == Exiting)
862 goto out_release_sockets;
866 ok = drbd_socket_okay(mdev, &sock);
867 ok = drbd_socket_okay(mdev, &msock) && ok;
873 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
874 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
876 sock->sk->sk_allocation = GFP_NOIO;
877 msock->sk->sk_allocation = GFP_NOIO;
879 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
880 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
883 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
884 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
885 * first set it to the P_HAND_SHAKE timeout,
886 * which we set to 4x the configured ping_timeout. */
887 sock->sk->sk_sndtimeo =
888 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
890 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
891 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
893 /* we don't want delays.
894 * we use TCP_CORK where apropriate, though */
895 drbd_tcp_nodelay(sock);
896 drbd_tcp_nodelay(msock);
898 mdev->data.socket = sock;
899 mdev->meta.socket = msock;
900 mdev->last_received = jiffies;
902 D_ASSERT(mdev->asender.task == NULL);
904 h = drbd_do_handshake(mdev);
908 if (mdev->cram_hmac_tfm) {
909 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
910 switch (drbd_do_auth(mdev)) {
912 dev_err(DEV, "Authentication of peer failed\n");
915 dev_err(DEV, "Authentication of peer failed, trying again.\n");
920 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
923 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
924 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
926 atomic_set(&mdev->packet_seq, 0);
929 drbd_thread_start(&mdev->asender);
931 if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) {
932 drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET);
936 if (!drbd_send_protocol(mdev))
938 drbd_send_sync_param(mdev, &mdev->sync_conf);
939 drbd_send_sizes(mdev, 0, 0);
940 drbd_send_uuids(mdev);
941 drbd_send_state(mdev);
942 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
943 clear_bit(RESIZE_PENDING, &mdev->flags);
955 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
957 union p_header *h = &mdev->data.rbuf.header;
960 r = drbd_recv(mdev, h, sizeof(*h));
961 if (unlikely(r != sizeof(*h))) {
962 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
966 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
967 *cmd = be16_to_cpu(h->h80.command);
968 *packet_size = be16_to_cpu(h->h80.length);
969 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
970 *cmd = be16_to_cpu(h->h95.command);
971 *packet_size = be32_to_cpu(h->h95.length);
973 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
974 be32_to_cpu(h->h80.magic),
975 be16_to_cpu(h->h80.command),
976 be16_to_cpu(h->h80.length));
979 mdev->last_received = jiffies;
984 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
988 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
989 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
990 NULL, BLKDEV_IFL_WAIT);
992 dev_err(DEV, "local disk flush failed with status %d\n", rv);
993 /* would rather check on EOPNOTSUPP, but that is not reliable.
994 * don't try again for ANY return value != 0
995 * if (rv == -EOPNOTSUPP) */
996 drbd_bump_write_ordering(mdev, WO_drain_io);
1001 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1004 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1006 struct flush_work *fw = (struct flush_work *)w;
1007 struct drbd_epoch *epoch = fw->epoch;
1011 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
1012 drbd_flush_after_epoch(mdev, epoch);
1014 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1015 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1021 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1022 * @mdev: DRBD device.
1023 * @epoch: Epoch object.
1026 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1027 struct drbd_epoch *epoch,
1028 enum epoch_event ev)
1030 int finish, epoch_size;
1031 struct drbd_epoch *next_epoch;
1032 int schedule_flush = 0;
1033 enum finish_epoch rv = FE_STILL_LIVE;
1035 spin_lock(&mdev->epoch_lock);
1040 epoch_size = atomic_read(&epoch->epoch_size);
1042 switch (ev & ~EV_CLEANUP) {
1044 atomic_dec(&epoch->active);
1046 case EV_GOT_BARRIER_NR:
1047 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1049 /* Special case: If we just switched from WO_bio_barrier to
1050 WO_bdev_flush we should not finish the current epoch */
1051 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1052 mdev->write_ordering != WO_bio_barrier &&
1053 epoch == mdev->current_epoch)
1054 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1056 case EV_BARRIER_DONE:
1057 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1059 case EV_BECAME_LAST:
1064 if (epoch_size != 0 &&
1065 atomic_read(&epoch->active) == 0 &&
1066 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1067 epoch->list.prev == &mdev->current_epoch->list &&
1068 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1069 /* Nearly all conditions are met to finish that epoch... */
1070 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1071 mdev->write_ordering == WO_none ||
1072 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1075 set_bit(DE_IS_FINISHING, &epoch->flags);
1076 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1077 mdev->write_ordering == WO_bio_barrier) {
1078 atomic_inc(&epoch->active);
1083 if (!(ev & EV_CLEANUP)) {
1084 spin_unlock(&mdev->epoch_lock);
1085 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1086 spin_lock(&mdev->epoch_lock);
1090 if (mdev->current_epoch != epoch) {
1091 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1092 list_del(&epoch->list);
1093 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1097 if (rv == FE_STILL_LIVE)
1101 atomic_set(&epoch->epoch_size, 0);
1102 /* atomic_set(&epoch->active, 0); is already zero */
1103 if (rv == FE_STILL_LIVE)
1114 spin_unlock(&mdev->epoch_lock);
1116 if (schedule_flush) {
1117 struct flush_work *fw;
1118 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1122 drbd_queue_work(&mdev->data.work, &fw->w);
1124 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1125 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1126 /* That is not a recursion, only one level */
1127 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1128 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1136 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1137 * @mdev: DRBD device.
1138 * @wo: Write ordering method to try.
1140 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1142 enum write_ordering_e pwo;
1143 static char *write_ordering_str[] = {
1145 [WO_drain_io] = "drain",
1146 [WO_bdev_flush] = "flush",
1147 [WO_bio_barrier] = "barrier",
1150 pwo = mdev->write_ordering;
1152 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1154 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1156 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1158 mdev->write_ordering = wo;
1159 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1160 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1165 * @mdev: DRBD device.
1167 * @rw: flag field, see bio->bi_rw
1169 /* TODO allocate from our own bio_set. */
1170 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1171 const unsigned rw, const int fault_type)
1173 struct bio *bios = NULL;
1175 struct page *page = e->pages;
1176 sector_t sector = e->sector;
1177 unsigned ds = e->size;
1178 unsigned n_bios = 0;
1179 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1181 /* In most cases, we will only need one bio. But in case the lower
1182 * level restrictions happen to be different at this offset on this
1183 * side than those of the sending peer, we may need to submit the
1184 * request in more than one bio. */
1186 bio = bio_alloc(GFP_NOIO, nr_pages);
1188 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1191 /* > e->sector, unless this is the first bio */
1192 bio->bi_sector = sector;
1193 bio->bi_bdev = mdev->ldev->backing_bdev;
1194 /* we special case some flags in the multi-bio case, see below
1195 * (REQ_UNPLUG, REQ_HARDBARRIER) */
1197 bio->bi_private = e;
1198 bio->bi_end_io = drbd_endio_sec;
1200 bio->bi_next = bios;
1204 page_chain_for_each(page) {
1205 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1206 if (!bio_add_page(bio, page, len, 0)) {
1207 /* a single page must always be possible! */
1208 BUG_ON(bio->bi_vcnt == 0);
1215 D_ASSERT(page == NULL);
1218 atomic_set(&e->pending_bios, n_bios);
1221 bios = bios->bi_next;
1222 bio->bi_next = NULL;
1224 /* strip off REQ_UNPLUG unless it is the last bio */
1226 bio->bi_rw &= ~REQ_UNPLUG;
1228 drbd_generic_make_request(mdev, fault_type, bio);
1230 /* strip off REQ_HARDBARRIER,
1231 * unless it is the first or last bio */
1232 if (bios && bios->bi_next)
1233 bios->bi_rw &= ~REQ_HARDBARRIER;
1235 maybe_kick_lo(mdev);
1241 bios = bios->bi_next;
1248 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1249 * @mdev: DRBD device.
1251 * @cancel: The connection will be closed anyways (unused in this callback)
1253 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1255 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1256 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1257 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1258 so that we can finish that epoch in drbd_may_finish_epoch().
1259 That is necessary if we already have a long chain of Epochs, before
1260 we realize that REQ_HARDBARRIER is actually not supported */
1262 /* As long as the -ENOTSUPP on the barrier is reported immediately
1263 that will never trigger. If it is reported late, we will just
1264 print that warning and continue correctly for all future requests
1265 with WO_bdev_flush */
1266 if (previous_epoch(mdev, e->epoch))
1267 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1269 /* we still have a local reference,
1270 * get_ldev was done in receive_Data. */
1272 e->w.cb = e_end_block;
1273 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1274 /* drbd_submit_ee fails for one reason only:
1275 * if was not able to allocate sufficient bios.
1276 * requeue, try again later. */
1277 e->w.cb = w_e_reissue;
1278 drbd_queue_work(&mdev->data.work, &e->w);
1283 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1285 int rv, issue_flush;
1286 struct p_barrier *p = &mdev->data.rbuf.barrier;
1287 struct drbd_epoch *epoch;
1291 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1294 mdev->current_epoch->barrier_nr = p->barrier;
1295 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1297 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1298 * the activity log, which means it would not be resynced in case the
1299 * R_PRIMARY crashes now.
1300 * Therefore we must send the barrier_ack after the barrier request was
1302 switch (mdev->write_ordering) {
1303 case WO_bio_barrier:
1305 if (rv == FE_RECYCLED)
1311 if (rv == FE_STILL_LIVE) {
1312 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1313 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1314 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1316 if (rv == FE_RECYCLED)
1319 /* The asender will send all the ACKs and barrier ACKs out, since
1320 all EEs moved from the active_ee to the done_ee. We need to
1321 provide a new epoch object for the EEs that come in soon */
1325 /* receiver context, in the writeout path of the other node.
1326 * avoid potential distributed deadlock */
1327 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1329 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1330 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1331 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1333 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1334 if (rv == FE_RECYCLED)
1338 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1344 atomic_set(&epoch->epoch_size, 0);
1345 atomic_set(&epoch->active, 0);
1347 spin_lock(&mdev->epoch_lock);
1348 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1349 list_add(&epoch->list, &mdev->current_epoch->list);
1350 mdev->current_epoch = epoch;
1353 /* The current_epoch got recycled while we allocated this one... */
1356 spin_unlock(&mdev->epoch_lock);
1361 /* used from receive_RSDataReply (recv_resync_read)
1362 * and from receive_Data */
1363 static struct drbd_epoch_entry *
1364 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1366 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1367 struct drbd_epoch_entry *e;
1370 void *dig_in = mdev->int_dig_in;
1371 void *dig_vv = mdev->int_dig_vv;
1372 unsigned long *data;
1374 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1375 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1378 rr = drbd_recv(mdev, dig_in, dgs);
1380 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1388 ERR_IF(data_size & 0x1ff) return NULL;
1389 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1391 /* even though we trust out peer,
1392 * we sometimes have to double check. */
1393 if (sector + (data_size>>9) > capacity) {
1394 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1395 (unsigned long long)capacity,
1396 (unsigned long long)sector, data_size);
1400 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1401 * "criss-cross" setup, that might cause write-out on some other DRBD,
1402 * which in turn might block on the other node at this very place. */
1403 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1409 page_chain_for_each(page) {
1410 unsigned len = min_t(int, ds, PAGE_SIZE);
1412 rr = drbd_recv(mdev, data, len);
1413 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1414 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1415 data[0] = data[0] ^ (unsigned long)-1;
1419 drbd_free_ee(mdev, e);
1420 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1428 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1429 if (memcmp(dig_in, dig_vv, dgs)) {
1430 dev_err(DEV, "Digest integrity check FAILED.\n");
1431 drbd_bcast_ee(mdev, "digest failed",
1432 dgs, dig_in, dig_vv, e);
1433 drbd_free_ee(mdev, e);
1437 mdev->recv_cnt += data_size>>9;
1441 /* drbd_drain_block() just takes a data block
1442 * out of the socket input buffer, and discards it.
1444 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1453 page = drbd_pp_alloc(mdev, 1, 1);
1457 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1458 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1460 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1461 rr, min_t(int, data_size, PAGE_SIZE));
1467 drbd_pp_free(mdev, page, 0);
1471 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1472 sector_t sector, int data_size)
1474 struct bio_vec *bvec;
1476 int dgs, rr, i, expect;
1477 void *dig_in = mdev->int_dig_in;
1478 void *dig_vv = mdev->int_dig_vv;
1480 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1481 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1484 rr = drbd_recv(mdev, dig_in, dgs);
1486 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1494 /* optimistically update recv_cnt. if receiving fails below,
1495 * we disconnect anyways, and counters will be reset. */
1496 mdev->recv_cnt += data_size>>9;
1498 bio = req->master_bio;
1499 D_ASSERT(sector == bio->bi_sector);
1501 bio_for_each_segment(bvec, bio, i) {
1502 expect = min_t(int, data_size, bvec->bv_len);
1503 rr = drbd_recv(mdev,
1504 kmap(bvec->bv_page)+bvec->bv_offset,
1506 kunmap(bvec->bv_page);
1508 dev_warn(DEV, "short read receiving data reply: "
1509 "read %d expected %d\n",
1517 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1518 if (memcmp(dig_in, dig_vv, dgs)) {
1519 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1524 D_ASSERT(data_size == 0);
1528 /* e_end_resync_block() is called via
1529 * drbd_process_done_ee() by asender only */
1530 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1532 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1533 sector_t sector = e->sector;
1536 D_ASSERT(hlist_unhashed(&e->colision));
1538 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1539 drbd_set_in_sync(mdev, sector, e->size);
1540 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1542 /* Record failure to sync */
1543 drbd_rs_failed_io(mdev, sector, e->size);
1545 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1552 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1554 struct drbd_epoch_entry *e;
1556 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1560 dec_rs_pending(mdev);
1563 /* corresponding dec_unacked() in e_end_resync_block()
1564 * respective _drbd_clear_done_ee */
1566 e->w.cb = e_end_resync_block;
1568 spin_lock_irq(&mdev->req_lock);
1569 list_add(&e->w.list, &mdev->sync_ee);
1570 spin_unlock_irq(&mdev->req_lock);
1572 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1573 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1576 drbd_free_ee(mdev, e);
1582 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1584 struct drbd_request *req;
1587 struct p_data *p = &mdev->data.rbuf.data;
1589 sector = be64_to_cpu(p->sector);
1591 spin_lock_irq(&mdev->req_lock);
1592 req = _ar_id_to_req(mdev, p->block_id, sector);
1593 spin_unlock_irq(&mdev->req_lock);
1594 if (unlikely(!req)) {
1595 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1599 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1600 * special casing it there for the various failure cases.
1601 * still no race with drbd_fail_pending_reads */
1602 ok = recv_dless_read(mdev, req, sector, data_size);
1605 req_mod(req, data_received);
1606 /* else: nothing. handled from drbd_disconnect...
1607 * I don't think we may complete this just yet
1608 * in case we are "on-disconnect: freeze" */
1613 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1617 struct p_data *p = &mdev->data.rbuf.data;
1619 sector = be64_to_cpu(p->sector);
1620 D_ASSERT(p->block_id == ID_SYNCER);
1622 if (get_ldev(mdev)) {
1623 /* data is submitted to disk within recv_resync_read.
1624 * corresponding put_ldev done below on error,
1625 * or in drbd_endio_write_sec. */
1626 ok = recv_resync_read(mdev, sector, data_size);
1628 if (__ratelimit(&drbd_ratelimit_state))
1629 dev_err(DEV, "Can not write resync data to local disk.\n");
1631 ok = drbd_drain_block(mdev, data_size);
1633 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1636 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1641 /* e_end_block() is called via drbd_process_done_ee().
1642 * this means this function only runs in the asender thread
1644 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1646 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1647 sector_t sector = e->sector;
1648 struct drbd_epoch *epoch;
1651 if (e->flags & EE_IS_BARRIER) {
1652 epoch = previous_epoch(mdev, e->epoch);
1654 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1657 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1658 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1659 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1660 mdev->state.conn <= C_PAUSED_SYNC_T &&
1661 e->flags & EE_MAY_SET_IN_SYNC) ?
1662 P_RS_WRITE_ACK : P_WRITE_ACK;
1663 ok &= drbd_send_ack(mdev, pcmd, e);
1664 if (pcmd == P_RS_WRITE_ACK)
1665 drbd_set_in_sync(mdev, sector, e->size);
1667 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1668 /* we expect it to be marked out of sync anyways...
1669 * maybe assert this? */
1673 /* we delete from the conflict detection hash _after_ we sent out the
1674 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1675 if (mdev->net_conf->two_primaries) {
1676 spin_lock_irq(&mdev->req_lock);
1677 D_ASSERT(!hlist_unhashed(&e->colision));
1678 hlist_del_init(&e->colision);
1679 spin_unlock_irq(&mdev->req_lock);
1681 D_ASSERT(hlist_unhashed(&e->colision));
1684 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1689 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1691 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1694 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1695 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1697 spin_lock_irq(&mdev->req_lock);
1698 D_ASSERT(!hlist_unhashed(&e->colision));
1699 hlist_del_init(&e->colision);
1700 spin_unlock_irq(&mdev->req_lock);
1707 /* Called from receive_Data.
1708 * Synchronize packets on sock with packets on msock.
1710 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1711 * packet traveling on msock, they are still processed in the order they have
1714 * Note: we don't care for Ack packets overtaking P_DATA packets.
1716 * In case packet_seq is larger than mdev->peer_seq number, there are
1717 * outstanding packets on the msock. We wait for them to arrive.
1718 * In case we are the logically next packet, we update mdev->peer_seq
1719 * ourselves. Correctly handles 32bit wrap around.
1721 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1722 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1723 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1724 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1726 * returns 0 if we may process the packet,
1727 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1728 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1734 spin_lock(&mdev->peer_seq_lock);
1736 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1737 if (seq_le(packet_seq, mdev->peer_seq+1))
1739 if (signal_pending(current)) {
1743 p_seq = mdev->peer_seq;
1744 spin_unlock(&mdev->peer_seq_lock);
1745 timeout = schedule_timeout(30*HZ);
1746 spin_lock(&mdev->peer_seq_lock);
1747 if (timeout == 0 && p_seq == mdev->peer_seq) {
1749 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1753 finish_wait(&mdev->seq_wait, &wait);
1754 if (mdev->peer_seq+1 == packet_seq)
1756 spin_unlock(&mdev->peer_seq_lock);
1760 static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1762 if (mdev->agreed_pro_version >= 95)
1763 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1764 (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
1765 (dpf & DP_FUA ? REQ_FUA : 0) |
1766 (dpf & DP_FLUSH ? REQ_FUA : 0) |
1767 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1769 return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0;
1772 /* mirrored write */
1773 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1776 struct drbd_epoch_entry *e;
1777 struct p_data *p = &mdev->data.rbuf.data;
1781 if (!get_ldev(mdev)) {
1782 if (__ratelimit(&drbd_ratelimit_state))
1783 dev_err(DEV, "Can not write mirrored data block "
1784 "to local disk.\n");
1785 spin_lock(&mdev->peer_seq_lock);
1786 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1788 spin_unlock(&mdev->peer_seq_lock);
1790 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1791 atomic_inc(&mdev->current_epoch->epoch_size);
1792 return drbd_drain_block(mdev, data_size);
1795 /* get_ldev(mdev) successful.
1796 * Corresponding put_ldev done either below (on various errors),
1797 * or in drbd_endio_write_sec, if we successfully submit the data at
1798 * the end of this function. */
1800 sector = be64_to_cpu(p->sector);
1801 e = read_in_block(mdev, p->block_id, sector, data_size);
1807 e->w.cb = e_end_block;
1809 spin_lock(&mdev->epoch_lock);
1810 e->epoch = mdev->current_epoch;
1811 atomic_inc(&e->epoch->epoch_size);
1812 atomic_inc(&e->epoch->active);
1814 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1815 struct drbd_epoch *epoch;
1816 /* Issue a barrier if we start a new epoch, and the previous epoch
1817 was not a epoch containing a single request which already was
1819 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1820 if (epoch == e->epoch) {
1821 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1822 rw |= REQ_HARDBARRIER;
1823 e->flags |= EE_IS_BARRIER;
1825 if (atomic_read(&epoch->epoch_size) > 1 ||
1826 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1827 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1828 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1829 rw |= REQ_HARDBARRIER;
1830 e->flags |= EE_IS_BARRIER;
1834 spin_unlock(&mdev->epoch_lock);
1836 dp_flags = be32_to_cpu(p->dp_flags);
1837 rw |= write_flags_to_bio(mdev, dp_flags);
1839 if (dp_flags & DP_MAY_SET_IN_SYNC)
1840 e->flags |= EE_MAY_SET_IN_SYNC;
1842 /* I'm the receiver, I do hold a net_cnt reference. */
1843 if (!mdev->net_conf->two_primaries) {
1844 spin_lock_irq(&mdev->req_lock);
1846 /* don't get the req_lock yet,
1847 * we may sleep in drbd_wait_peer_seq */
1848 const int size = e->size;
1849 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1851 struct drbd_request *i;
1852 struct hlist_node *n;
1853 struct hlist_head *slot;
1856 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1857 BUG_ON(mdev->ee_hash == NULL);
1858 BUG_ON(mdev->tl_hash == NULL);
1860 /* conflict detection and handling:
1861 * 1. wait on the sequence number,
1862 * in case this data packet overtook ACK packets.
1863 * 2. check our hash tables for conflicting requests.
1864 * we only need to walk the tl_hash, since an ee can not
1865 * have a conflict with an other ee: on the submitting
1866 * node, the corresponding req had already been conflicting,
1867 * and a conflicting req is never sent.
1869 * Note: for two_primaries, we are protocol C,
1870 * so there cannot be any request that is DONE
1871 * but still on the transfer log.
1873 * unconditionally add to the ee_hash.
1875 * if no conflicting request is found:
1878 * if any conflicting request is found
1879 * that has not yet been acked,
1880 * AND I have the "discard concurrent writes" flag:
1881 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1883 * if any conflicting request is found:
1884 * block the receiver, waiting on misc_wait
1885 * until no more conflicting requests are there,
1886 * or we get interrupted (disconnect).
1888 * we do not just write after local io completion of those
1889 * requests, but only after req is done completely, i.e.
1890 * we wait for the P_DISCARD_ACK to arrive!
1892 * then proceed normally, i.e. submit.
1894 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1895 goto out_interrupted;
1897 spin_lock_irq(&mdev->req_lock);
1899 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1901 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1902 slot = tl_hash_slot(mdev, sector);
1905 int have_unacked = 0;
1906 int have_conflict = 0;
1907 prepare_to_wait(&mdev->misc_wait, &wait,
1908 TASK_INTERRUPTIBLE);
1909 hlist_for_each_entry(i, n, slot, colision) {
1911 /* only ALERT on first iteration,
1912 * we may be woken up early... */
1914 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1915 " new: %llus +%u; pending: %llus +%u\n",
1916 current->comm, current->pid,
1917 (unsigned long long)sector, size,
1918 (unsigned long long)i->sector, i->size);
1919 if (i->rq_state & RQ_NET_PENDING)
1928 /* Discard Ack only for the _first_ iteration */
1929 if (first && discard && have_unacked) {
1930 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1931 (unsigned long long)sector);
1933 e->w.cb = e_send_discard_ack;
1934 list_add_tail(&e->w.list, &mdev->done_ee);
1936 spin_unlock_irq(&mdev->req_lock);
1938 /* we could probably send that P_DISCARD_ACK ourselves,
1939 * but I don't like the receiver using the msock */
1943 finish_wait(&mdev->misc_wait, &wait);
1947 if (signal_pending(current)) {
1948 hlist_del_init(&e->colision);
1950 spin_unlock_irq(&mdev->req_lock);
1952 finish_wait(&mdev->misc_wait, &wait);
1953 goto out_interrupted;
1956 spin_unlock_irq(&mdev->req_lock);
1959 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1960 "sec=%llus\n", (unsigned long long)sector);
1961 } else if (discard) {
1962 /* we had none on the first iteration.
1963 * there must be none now. */
1964 D_ASSERT(have_unacked == 0);
1967 spin_lock_irq(&mdev->req_lock);
1969 finish_wait(&mdev->misc_wait, &wait);
1972 list_add(&e->w.list, &mdev->active_ee);
1973 spin_unlock_irq(&mdev->req_lock);
1975 switch (mdev->net_conf->wire_protocol) {
1978 /* corresponding dec_unacked() in e_end_block()
1979 * respective _drbd_clear_done_ee */
1982 /* I really don't like it that the receiver thread
1983 * sends on the msock, but anyways */
1984 drbd_send_ack(mdev, P_RECV_ACK, e);
1991 if (mdev->state.pdsk == D_DISKLESS) {
1992 /* In case we have the only disk of the cluster, */
1993 drbd_set_out_of_sync(mdev, e->sector, e->size);
1994 e->flags |= EE_CALL_AL_COMPLETE_IO;
1995 drbd_al_begin_io(mdev, e->sector);
1998 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2002 /* yes, the epoch_size now is imbalanced.
2003 * but we drop the connection anyways, so we don't have a chance to
2004 * receive a barrier... atomic_inc(&mdev->epoch_size); */
2006 drbd_free_ee(mdev, e);
2010 /* We may throttle resync, if the lower device seems to be busy,
2011 * and current sync rate is above c_min_rate.
2013 * To decide whether or not the lower device is busy, we use a scheme similar
2014 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2015 * (more than 64 sectors) of activity we cannot account for with our own resync
2016 * activity, it obviously is "busy".
2018 * The current sync rate used here uses only the most recent two step marks,
2019 * to have a short time average so we can react faster.
2021 int drbd_rs_should_slow_down(struct drbd_conf *mdev)
2023 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2024 unsigned long db, dt, dbdt;
2028 /* feature disabled? */
2029 if (mdev->sync_conf.c_min_rate == 0)
2032 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2033 (int)part_stat_read(&disk->part0, sectors[1]) -
2034 atomic_read(&mdev->rs_sect_ev);
2035 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2036 unsigned long rs_left;
2039 mdev->rs_last_events = curr_events;
2041 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2043 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS;
2044 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2046 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2049 db = mdev->rs_mark_left[i] - rs_left;
2050 dbdt = Bit2KB(db/dt);
2052 if (dbdt > mdev->sync_conf.c_min_rate)
2059 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
2062 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2063 struct drbd_epoch_entry *e;
2064 struct digest_info *di = NULL;
2066 unsigned int fault_type;
2067 struct p_block_req *p = &mdev->data.rbuf.block_req;
2069 sector = be64_to_cpu(p->sector);
2070 size = be32_to_cpu(p->blksize);
2072 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2073 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2074 (unsigned long long)sector, size);
2077 if (sector + (size>>9) > capacity) {
2078 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2079 (unsigned long long)sector, size);
2083 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2084 if (__ratelimit(&drbd_ratelimit_state))
2085 dev_err(DEV, "Can not satisfy peer's read request, "
2086 "no local data.\n");
2087 drbd_send_ack_rp(mdev, cmd == P_DATA_REQUEST ? P_NEG_DREPLY :
2088 P_NEG_RS_DREPLY , p);
2089 /* drain possibly payload */
2090 return drbd_drain_block(mdev, digest_size);
2093 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2094 * "criss-cross" setup, that might cause write-out on some other DRBD,
2095 * which in turn might block on the other node at this very place. */
2096 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2103 case P_DATA_REQUEST:
2104 e->w.cb = w_e_end_data_req;
2105 fault_type = DRBD_FAULT_DT_RD;
2106 /* application IO, don't drbd_rs_begin_io */
2109 case P_RS_DATA_REQUEST:
2110 e->w.cb = w_e_end_rsdata_req;
2111 fault_type = DRBD_FAULT_RS_RD;
2115 case P_CSUM_RS_REQUEST:
2116 fault_type = DRBD_FAULT_RS_RD;
2117 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2121 di->digest_size = digest_size;
2122 di->digest = (((char *)di)+sizeof(struct digest_info));
2125 e->flags |= EE_HAS_DIGEST;
2127 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2130 if (cmd == P_CSUM_RS_REQUEST) {
2131 D_ASSERT(mdev->agreed_pro_version >= 89);
2132 e->w.cb = w_e_end_csum_rs_req;
2133 } else if (cmd == P_OV_REPLY) {
2134 e->w.cb = w_e_end_ov_reply;
2135 dec_rs_pending(mdev);
2136 /* drbd_rs_begin_io done when we sent this request,
2137 * but accounting still needs to be done. */
2138 goto submit_for_resync;
2143 if (mdev->state.conn >= C_CONNECTED &&
2144 mdev->state.conn != C_VERIFY_T)
2145 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2146 drbd_conn_str(mdev->state.conn));
2147 if (mdev->ov_start_sector == ~(sector_t)0 &&
2148 mdev->agreed_pro_version >= 90) {
2149 mdev->ov_start_sector = sector;
2150 mdev->ov_position = sector;
2151 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2152 dev_info(DEV, "Online Verify start sector: %llu\n",
2153 (unsigned long long)sector);
2155 e->w.cb = w_e_end_ov_req;
2156 fault_type = DRBD_FAULT_RS_RD;
2160 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2162 fault_type = DRBD_FAULT_MAX;
2166 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2167 * wrt the receiver, but it is not as straightforward as it may seem.
2168 * Various places in the resync start and stop logic assume resync
2169 * requests are processed in order, requeuing this on the worker thread
2170 * introduces a bunch of new code for synchronization between threads.
2172 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2173 * "forever", throttling after drbd_rs_begin_io will lock that extent
2174 * for application writes for the same time. For now, just throttle
2175 * here, where the rest of the code expects the receiver to sleep for
2179 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2180 * this defers syncer requests for some time, before letting at least
2181 * on request through. The resync controller on the receiving side
2182 * will adapt to the incoming rate accordingly.
2184 * We cannot throttle here if remote is Primary/SyncTarget:
2185 * we would also throttle its application reads.
2186 * In that case, throttling is done on the SyncTarget only.
2188 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev))
2190 if (drbd_rs_begin_io(mdev, e->sector))
2194 atomic_add(size >> 9, &mdev->rs_sect_ev);
2198 spin_lock_irq(&mdev->req_lock);
2199 list_add_tail(&e->w.list, &mdev->read_ee);
2200 spin_unlock_irq(&mdev->req_lock);
2202 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2207 drbd_free_ee(mdev, e);
2211 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2213 int self, peer, rv = -100;
2214 unsigned long ch_self, ch_peer;
2216 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2217 peer = mdev->p_uuid[UI_BITMAP] & 1;
2219 ch_peer = mdev->p_uuid[UI_SIZE];
2220 ch_self = mdev->comm_bm_set;
2222 switch (mdev->net_conf->after_sb_0p) {
2224 case ASB_DISCARD_SECONDARY:
2225 case ASB_CALL_HELPER:
2226 dev_err(DEV, "Configuration error.\n");
2228 case ASB_DISCONNECT:
2230 case ASB_DISCARD_YOUNGER_PRI:
2231 if (self == 0 && peer == 1) {
2235 if (self == 1 && peer == 0) {
2239 /* Else fall through to one of the other strategies... */
2240 case ASB_DISCARD_OLDER_PRI:
2241 if (self == 0 && peer == 1) {
2245 if (self == 1 && peer == 0) {
2249 /* Else fall through to one of the other strategies... */
2250 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2251 "Using discard-least-changes instead\n");
2252 case ASB_DISCARD_ZERO_CHG:
2253 if (ch_peer == 0 && ch_self == 0) {
2254 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2258 if (ch_peer == 0) { rv = 1; break; }
2259 if (ch_self == 0) { rv = -1; break; }
2261 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2263 case ASB_DISCARD_LEAST_CHG:
2264 if (ch_self < ch_peer)
2266 else if (ch_self > ch_peer)
2268 else /* ( ch_self == ch_peer ) */
2269 /* Well, then use something else. */
2270 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2273 case ASB_DISCARD_LOCAL:
2276 case ASB_DISCARD_REMOTE:
2283 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2285 int self, peer, hg, rv = -100;
2287 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2288 peer = mdev->p_uuid[UI_BITMAP] & 1;
2290 switch (mdev->net_conf->after_sb_1p) {
2291 case ASB_DISCARD_YOUNGER_PRI:
2292 case ASB_DISCARD_OLDER_PRI:
2293 case ASB_DISCARD_LEAST_CHG:
2294 case ASB_DISCARD_LOCAL:
2295 case ASB_DISCARD_REMOTE:
2296 dev_err(DEV, "Configuration error.\n");
2298 case ASB_DISCONNECT:
2301 hg = drbd_asb_recover_0p(mdev);
2302 if (hg == -1 && mdev->state.role == R_SECONDARY)
2304 if (hg == 1 && mdev->state.role == R_PRIMARY)
2308 rv = drbd_asb_recover_0p(mdev);
2310 case ASB_DISCARD_SECONDARY:
2311 return mdev->state.role == R_PRIMARY ? 1 : -1;
2312 case ASB_CALL_HELPER:
2313 hg = drbd_asb_recover_0p(mdev);
2314 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2315 self = drbd_set_role(mdev, R_SECONDARY, 0);
2316 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2317 * we might be here in C_WF_REPORT_PARAMS which is transient.
2318 * we do not need to wait for the after state change work either. */
2319 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2320 if (self != SS_SUCCESS) {
2321 drbd_khelper(mdev, "pri-lost-after-sb");
2323 dev_warn(DEV, "Successfully gave up primary role.\n");
2333 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2335 int self, peer, hg, rv = -100;
2337 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2338 peer = mdev->p_uuid[UI_BITMAP] & 1;
2340 switch (mdev->net_conf->after_sb_2p) {
2341 case ASB_DISCARD_YOUNGER_PRI:
2342 case ASB_DISCARD_OLDER_PRI:
2343 case ASB_DISCARD_LEAST_CHG:
2344 case ASB_DISCARD_LOCAL:
2345 case ASB_DISCARD_REMOTE:
2347 case ASB_DISCARD_SECONDARY:
2348 dev_err(DEV, "Configuration error.\n");
2351 rv = drbd_asb_recover_0p(mdev);
2353 case ASB_DISCONNECT:
2355 case ASB_CALL_HELPER:
2356 hg = drbd_asb_recover_0p(mdev);
2358 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2359 * we might be here in C_WF_REPORT_PARAMS which is transient.
2360 * we do not need to wait for the after state change work either. */
2361 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2362 if (self != SS_SUCCESS) {
2363 drbd_khelper(mdev, "pri-lost-after-sb");
2365 dev_warn(DEV, "Successfully gave up primary role.\n");
2375 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2376 u64 bits, u64 flags)
2379 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2382 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2384 (unsigned long long)uuid[UI_CURRENT],
2385 (unsigned long long)uuid[UI_BITMAP],
2386 (unsigned long long)uuid[UI_HISTORY_START],
2387 (unsigned long long)uuid[UI_HISTORY_END],
2388 (unsigned long long)bits,
2389 (unsigned long long)flags);
2393 100 after split brain try auto recover
2394 2 C_SYNC_SOURCE set BitMap
2395 1 C_SYNC_SOURCE use BitMap
2397 -1 C_SYNC_TARGET use BitMap
2398 -2 C_SYNC_TARGET set BitMap
2399 -100 after split brain, disconnect
2400 -1000 unrelated data
2402 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2407 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2408 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2411 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2415 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2416 peer != UUID_JUST_CREATED)
2420 if (self != UUID_JUST_CREATED &&
2421 (peer == UUID_JUST_CREATED || peer == (u64)0))
2425 int rct, dc; /* roles at crash time */
2427 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2429 if (mdev->agreed_pro_version < 91)
2432 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2433 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2434 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2435 drbd_uuid_set_bm(mdev, 0UL);
2437 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2438 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2441 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2448 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2450 if (mdev->agreed_pro_version < 91)
2453 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2454 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2455 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2457 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2458 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2459 mdev->p_uuid[UI_BITMAP] = 0UL;
2461 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2464 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2471 /* Common power [off|failure] */
2472 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2473 (mdev->p_uuid[UI_FLAGS] & 2);
2474 /* lowest bit is set when we were primary,
2475 * next bit (weight 2) is set when peer was primary */
2479 case 0: /* !self_pri && !peer_pri */ return 0;
2480 case 1: /* self_pri && !peer_pri */ return 1;
2481 case 2: /* !self_pri && peer_pri */ return -1;
2482 case 3: /* self_pri && peer_pri */
2483 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2489 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2494 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2496 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2497 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2499 /* The last P_SYNC_UUID did not get though. Undo the last start of
2500 resync as sync source modifications of the peer's UUIDs. */
2502 if (mdev->agreed_pro_version < 91)
2505 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2506 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2512 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2513 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2514 peer = mdev->p_uuid[i] & ~((u64)1);
2520 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2521 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2526 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2528 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2529 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2531 /* The last P_SYNC_UUID did not get though. Undo the last start of
2532 resync as sync source modifications of our UUIDs. */
2534 if (mdev->agreed_pro_version < 91)
2537 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2538 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2540 dev_info(DEV, "Undid last start of resync:\n");
2542 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2543 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2551 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2552 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2553 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2559 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2560 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2561 if (self == peer && self != ((u64)0))
2565 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2566 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2567 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2568 peer = mdev->p_uuid[j] & ~((u64)1);
2577 /* drbd_sync_handshake() returns the new conn state on success, or
2578 CONN_MASK (-1) on failure.
2580 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2581 enum drbd_disk_state peer_disk) __must_hold(local)
2584 enum drbd_conns rv = C_MASK;
2585 enum drbd_disk_state mydisk;
2587 mydisk = mdev->state.disk;
2588 if (mydisk == D_NEGOTIATING)
2589 mydisk = mdev->new_state_tmp.disk;
2591 dev_info(DEV, "drbd_sync_handshake:\n");
2592 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2593 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2594 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2596 hg = drbd_uuid_compare(mdev, &rule_nr);
2598 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2601 dev_alert(DEV, "Unrelated data, aborting!\n");
2605 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2609 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2610 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2611 int f = (hg == -100) || abs(hg) == 2;
2612 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2615 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2616 hg > 0 ? "source" : "target");
2620 drbd_khelper(mdev, "initial-split-brain");
2622 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2623 int pcount = (mdev->state.role == R_PRIMARY)
2624 + (peer_role == R_PRIMARY);
2625 int forced = (hg == -100);
2629 hg = drbd_asb_recover_0p(mdev);
2632 hg = drbd_asb_recover_1p(mdev);
2635 hg = drbd_asb_recover_2p(mdev);
2638 if (abs(hg) < 100) {
2639 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2640 "automatically solved. Sync from %s node\n",
2641 pcount, (hg < 0) ? "peer" : "this");
2643 dev_warn(DEV, "Doing a full sync, since"
2644 " UUIDs where ambiguous.\n");
2651 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2653 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2657 dev_warn(DEV, "Split-Brain detected, manually solved. "
2658 "Sync from %s node\n",
2659 (hg < 0) ? "peer" : "this");
2663 /* FIXME this log message is not correct if we end up here
2664 * after an attempted attach on a diskless node.
2665 * We just refuse to attach -- well, we drop the "connection"
2666 * to that disk, in a way... */
2667 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2668 drbd_khelper(mdev, "split-brain");
2672 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2673 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2677 if (hg < 0 && /* by intention we do not use mydisk here. */
2678 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2679 switch (mdev->net_conf->rr_conflict) {
2680 case ASB_CALL_HELPER:
2681 drbd_khelper(mdev, "pri-lost");
2683 case ASB_DISCONNECT:
2684 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2687 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2692 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2694 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2696 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2697 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2698 abs(hg) >= 2 ? "full" : "bit-map based");
2703 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2704 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2708 if (hg > 0) { /* become sync source. */
2710 } else if (hg < 0) { /* become sync target */
2714 if (drbd_bm_total_weight(mdev)) {
2715 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2716 drbd_bm_total_weight(mdev));
2723 /* returns 1 if invalid */
2724 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2726 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2727 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2728 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2731 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2732 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2733 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2736 /* everything else is valid if they are equal on both sides. */
2740 /* everything es is invalid. */
2744 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2746 struct p_protocol *p = &mdev->data.rbuf.protocol;
2747 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2748 int p_want_lose, p_two_primaries, cf;
2749 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2751 p_proto = be32_to_cpu(p->protocol);
2752 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2753 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2754 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2755 p_two_primaries = be32_to_cpu(p->two_primaries);
2756 cf = be32_to_cpu(p->conn_flags);
2757 p_want_lose = cf & CF_WANT_LOSE;
2759 clear_bit(CONN_DRY_RUN, &mdev->flags);
2761 if (cf & CF_DRY_RUN)
2762 set_bit(CONN_DRY_RUN, &mdev->flags);
2764 if (p_proto != mdev->net_conf->wire_protocol) {
2765 dev_err(DEV, "incompatible communication protocols\n");
2769 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2770 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2774 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2775 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2779 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2780 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2784 if (p_want_lose && mdev->net_conf->want_lose) {
2785 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2789 if (p_two_primaries != mdev->net_conf->two_primaries) {
2790 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2794 if (mdev->agreed_pro_version >= 87) {
2795 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2797 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2800 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2801 if (strcmp(p_integrity_alg, my_alg)) {
2802 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2805 dev_info(DEV, "data-integrity-alg: %s\n",
2806 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2812 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2817 * input: alg name, feature name
2818 * return: NULL (alg name was "")
2819 * ERR_PTR(error) if something goes wrong
2820 * or the crypto hash ptr, if it worked out ok. */
2821 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2822 const char *alg, const char *name)
2824 struct crypto_hash *tfm;
2829 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2831 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2832 alg, name, PTR_ERR(tfm));
2835 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2836 crypto_free_hash(tfm);
2837 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2838 return ERR_PTR(-EINVAL);
2843 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2846 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2847 unsigned int header_size, data_size, exp_max_sz;
2848 struct crypto_hash *verify_tfm = NULL;
2849 struct crypto_hash *csums_tfm = NULL;
2850 const int apv = mdev->agreed_pro_version;
2851 int *rs_plan_s = NULL;
2854 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2855 : apv == 88 ? sizeof(struct p_rs_param)
2857 : apv <= 94 ? sizeof(struct p_rs_param_89)
2858 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2860 if (packet_size > exp_max_sz) {
2861 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2862 packet_size, exp_max_sz);
2867 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2868 data_size = packet_size - header_size;
2869 } else if (apv <= 94) {
2870 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2871 data_size = packet_size - header_size;
2872 D_ASSERT(data_size == 0);
2874 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2875 data_size = packet_size - header_size;
2876 D_ASSERT(data_size == 0);
2879 /* initialize verify_alg and csums_alg */
2880 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2882 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2885 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2889 if (data_size > SHARED_SECRET_MAX) {
2890 dev_err(DEV, "verify-alg too long, "
2891 "peer wants %u, accepting only %u byte\n",
2892 data_size, SHARED_SECRET_MAX);
2896 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2899 /* we expect NUL terminated string */
2900 /* but just in case someone tries to be evil */
2901 D_ASSERT(p->verify_alg[data_size-1] == 0);
2902 p->verify_alg[data_size-1] = 0;
2904 } else /* apv >= 89 */ {
2905 /* we still expect NUL terminated strings */
2906 /* but just in case someone tries to be evil */
2907 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2908 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2909 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2910 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2913 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2914 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2915 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2916 mdev->sync_conf.verify_alg, p->verify_alg);
2919 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2920 p->verify_alg, "verify-alg");
2921 if (IS_ERR(verify_tfm)) {
2927 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2928 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2929 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2930 mdev->sync_conf.csums_alg, p->csums_alg);
2933 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2934 p->csums_alg, "csums-alg");
2935 if (IS_ERR(csums_tfm)) {
2942 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2943 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2944 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2945 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2946 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2948 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2949 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2950 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
2952 dev_err(DEV, "kmalloc of fifo_buffer failed");
2958 spin_lock(&mdev->peer_seq_lock);
2959 /* lock against drbd_nl_syncer_conf() */
2961 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2962 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2963 crypto_free_hash(mdev->verify_tfm);
2964 mdev->verify_tfm = verify_tfm;
2965 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2968 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2969 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2970 crypto_free_hash(mdev->csums_tfm);
2971 mdev->csums_tfm = csums_tfm;
2972 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2974 if (fifo_size != mdev->rs_plan_s.size) {
2975 kfree(mdev->rs_plan_s.values);
2976 mdev->rs_plan_s.values = rs_plan_s;
2977 mdev->rs_plan_s.size = fifo_size;
2978 mdev->rs_planed = 0;
2980 spin_unlock(&mdev->peer_seq_lock);
2985 /* just for completeness: actually not needed,
2986 * as this is not reached if csums_tfm was ok. */
2987 crypto_free_hash(csums_tfm);
2988 /* but free the verify_tfm again, if csums_tfm did not work out */
2989 crypto_free_hash(verify_tfm);
2990 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2994 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2996 /* sorry, we currently have no working implementation
2997 * of distributed TCQ */
3000 /* warn if the arguments differ by more than 12.5% */
3001 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3002 const char *s, sector_t a, sector_t b)
3005 if (a == 0 || b == 0)
3007 d = (a > b) ? (a - b) : (b - a);
3008 if (d > (a>>3) || d > (b>>3))
3009 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3010 (unsigned long long)a, (unsigned long long)b);
3013 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3015 struct p_sizes *p = &mdev->data.rbuf.sizes;
3016 enum determine_dev_size dd = unchanged;
3017 unsigned int max_seg_s;
3018 sector_t p_size, p_usize, my_usize;
3019 int ldsc = 0; /* local disk size changed */
3020 enum dds_flags ddsf;
3022 p_size = be64_to_cpu(p->d_size);
3023 p_usize = be64_to_cpu(p->u_size);
3025 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
3026 dev_err(DEV, "some backing storage is needed\n");
3027 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3031 /* just store the peer's disk size for now.
3032 * we still need to figure out whether we accept that. */
3033 mdev->p_size = p_size;
3035 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
3036 if (get_ldev(mdev)) {
3037 warn_if_differ_considerably(mdev, "lower level device sizes",
3038 p_size, drbd_get_max_capacity(mdev->ldev));
3039 warn_if_differ_considerably(mdev, "user requested size",
3040 p_usize, mdev->ldev->dc.disk_size);
3042 /* if this is the first connect, or an otherwise expected
3043 * param exchange, choose the minimum */
3044 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3045 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3048 my_usize = mdev->ldev->dc.disk_size;
3050 if (mdev->ldev->dc.disk_size != p_usize) {
3051 mdev->ldev->dc.disk_size = p_usize;
3052 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3053 (unsigned long)mdev->ldev->dc.disk_size);
3056 /* Never shrink a device with usable data during connect.
3057 But allow online shrinking if we are connected. */
3058 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
3059 drbd_get_capacity(mdev->this_bdev) &&
3060 mdev->state.disk >= D_OUTDATED &&
3061 mdev->state.conn < C_CONNECTED) {
3062 dev_err(DEV, "The peer's disk size is too small!\n");
3063 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3064 mdev->ldev->dc.disk_size = my_usize;
3072 ddsf = be16_to_cpu(p->dds_flags);
3073 if (get_ldev(mdev)) {
3074 dd = drbd_determin_dev_size(mdev, ddsf);
3076 if (dd == dev_size_error)
3080 /* I am diskless, need to accept the peer's size. */
3081 drbd_set_my_capacity(mdev, p_size);
3084 if (get_ldev(mdev)) {
3085 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3086 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3090 if (mdev->agreed_pro_version < 94)
3091 max_seg_s = be32_to_cpu(p->max_segment_size);
3092 else if (mdev->agreed_pro_version == 94)
3093 max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
3094 else /* drbd 8.3.8 onwards */
3095 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3097 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
3098 drbd_setup_queue_param(mdev, max_seg_s);
3100 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3104 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3105 if (be64_to_cpu(p->c_size) !=
3106 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3107 /* we have different sizes, probably peer
3108 * needs to know my new size... */
3109 drbd_send_sizes(mdev, 0, ddsf);
3111 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3112 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3113 if (mdev->state.pdsk >= D_INCONSISTENT &&
3114 mdev->state.disk >= D_INCONSISTENT) {
3115 if (ddsf & DDSF_NO_RESYNC)
3116 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3118 resync_after_online_grow(mdev);
3120 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3127 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3129 struct p_uuids *p = &mdev->data.rbuf.uuids;
3133 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3135 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3136 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3138 kfree(mdev->p_uuid);
3139 mdev->p_uuid = p_uuid;
3141 if (mdev->state.conn < C_CONNECTED &&
3142 mdev->state.disk < D_INCONSISTENT &&
3143 mdev->state.role == R_PRIMARY &&
3144 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3145 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3146 (unsigned long long)mdev->ed_uuid);
3147 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3151 if (get_ldev(mdev)) {
3152 int skip_initial_sync =
3153 mdev->state.conn == C_CONNECTED &&
3154 mdev->agreed_pro_version >= 90 &&
3155 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3156 (p_uuid[UI_FLAGS] & 8);
3157 if (skip_initial_sync) {
3158 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3159 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3160 "clear_n_write from receive_uuids");
3161 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3162 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3163 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3168 } else if (mdev->state.disk < D_INCONSISTENT &&
3169 mdev->state.role == R_PRIMARY) {
3170 /* I am a diskless primary, the peer just created a new current UUID
3172 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3175 /* Before we test for the disk state, we should wait until an eventually
3176 ongoing cluster wide state change is finished. That is important if
3177 we are primary and are detaching from our disk. We need to see the
3178 new disk state... */
3179 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3180 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3181 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3187 * convert_state() - Converts the peer's view of the cluster state to our point of view
3188 * @ps: The state as seen by the peer.
3190 static union drbd_state convert_state(union drbd_state ps)
3192 union drbd_state ms;
3194 static enum drbd_conns c_tab[] = {
3195 [C_CONNECTED] = C_CONNECTED,
3197 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3198 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3199 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3200 [C_VERIFY_S] = C_VERIFY_T,
3206 ms.conn = c_tab[ps.conn];
3211 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3216 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3218 struct p_req_state *p = &mdev->data.rbuf.req_state;
3219 union drbd_state mask, val;
3222 mask.i = be32_to_cpu(p->mask);
3223 val.i = be32_to_cpu(p->val);
3225 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3226 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3227 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3231 mask = convert_state(mask);
3232 val = convert_state(val);
3234 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3236 drbd_send_sr_reply(mdev, rv);
3242 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3244 struct p_state *p = &mdev->data.rbuf.state;
3245 union drbd_state os, ns, peer_state;
3246 enum drbd_disk_state real_peer_disk;
3247 enum chg_state_flags cs_flags;
3250 peer_state.i = be32_to_cpu(p->state);
3252 real_peer_disk = peer_state.disk;
3253 if (peer_state.disk == D_NEGOTIATING) {
3254 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3255 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3258 spin_lock_irq(&mdev->req_lock);
3260 os = ns = mdev->state;
3261 spin_unlock_irq(&mdev->req_lock);
3263 if (ns.conn == C_WF_REPORT_PARAMS)
3264 ns.conn = C_CONNECTED;
3266 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3267 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3268 int cr; /* consider resync */
3270 /* if we established a new connection */
3271 cr = (os.conn < C_CONNECTED);
3272 /* if we had an established connection
3273 * and one of the nodes newly attaches a disk */
3274 cr |= (os.conn == C_CONNECTED &&
3275 (peer_state.disk == D_NEGOTIATING ||
3276 os.disk == D_NEGOTIATING));
3277 /* if we have both been inconsistent, and the peer has been
3278 * forced to be UpToDate with --overwrite-data */
3279 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3280 /* if we had been plain connected, and the admin requested to
3281 * start a sync by "invalidate" or "invalidate-remote" */
3282 cr |= (os.conn == C_CONNECTED &&
3283 (peer_state.conn >= C_STARTING_SYNC_S &&
3284 peer_state.conn <= C_WF_BITMAP_T));
3287 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3290 if (ns.conn == C_MASK) {
3291 ns.conn = C_CONNECTED;
3292 if (mdev->state.disk == D_NEGOTIATING) {
3293 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3294 } else if (peer_state.disk == D_NEGOTIATING) {
3295 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3296 peer_state.disk = D_DISKLESS;
3297 real_peer_disk = D_DISKLESS;
3299 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3301 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3302 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3308 spin_lock_irq(&mdev->req_lock);
3309 if (mdev->state.i != os.i)
3311 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3312 ns.peer = peer_state.role;
3313 ns.pdsk = real_peer_disk;
3314 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3315 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3316 ns.disk = mdev->new_state_tmp.disk;
3317 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3318 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3319 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3320 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3321 for temporal network outages! */
3322 spin_unlock_irq(&mdev->req_lock);
3323 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3325 drbd_uuid_new_current(mdev);
3326 clear_bit(NEW_CUR_UUID, &mdev->flags);
3327 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3330 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3332 spin_unlock_irq(&mdev->req_lock);
3334 if (rv < SS_SUCCESS) {
3335 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3339 if (os.conn > C_WF_REPORT_PARAMS) {
3340 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3341 peer_state.disk != D_NEGOTIATING ) {
3342 /* we want resync, peer has not yet decided to sync... */
3343 /* Nowadays only used when forcing a node into primary role and
3344 setting its disk to UpToDate with that */
3345 drbd_send_uuids(mdev);
3346 drbd_send_state(mdev);
3350 mdev->net_conf->want_lose = 0;
3352 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3357 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3359 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3361 wait_event(mdev->misc_wait,
3362 mdev->state.conn == C_WF_SYNC_UUID ||
3363 mdev->state.conn < C_CONNECTED ||
3364 mdev->state.disk < D_NEGOTIATING);
3366 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3368 /* Here the _drbd_uuid_ functions are right, current should
3369 _not_ be rotated into the history */
3370 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3371 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3372 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3374 drbd_start_resync(mdev, C_SYNC_TARGET);
3378 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3383 enum receive_bitmap_ret { OK, DONE, FAILED };
3385 static enum receive_bitmap_ret
3386 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3387 unsigned long *buffer, struct bm_xfer_ctx *c)
3389 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3390 unsigned want = num_words * sizeof(long);
3392 if (want != data_size) {
3393 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3398 if (drbd_recv(mdev, buffer, want) != want)
3401 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3403 c->word_offset += num_words;
3404 c->bit_offset = c->word_offset * BITS_PER_LONG;
3405 if (c->bit_offset > c->bm_bits)
3406 c->bit_offset = c->bm_bits;
3411 static enum receive_bitmap_ret
3412 recv_bm_rle_bits(struct drbd_conf *mdev,
3413 struct p_compressed_bm *p,
3414 struct bm_xfer_ctx *c)
3416 struct bitstream bs;
3420 unsigned long s = c->bit_offset;
3422 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3423 int toggle = DCBP_get_start(p);
3427 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3429 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3433 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3434 bits = vli_decode_bits(&rl, look_ahead);
3440 if (e >= c->bm_bits) {
3441 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3444 _drbd_bm_set_bits(mdev, s, e);
3448 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3449 have, bits, look_ahead,
3450 (unsigned int)(bs.cur.b - p->code),
3451 (unsigned int)bs.buf_len);
3454 look_ahead >>= bits;
3457 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3460 look_ahead |= tmp << have;
3465 bm_xfer_ctx_bit_to_word_offset(c);
3467 return (s == c->bm_bits) ? DONE : OK;
3470 static enum receive_bitmap_ret
3471 decode_bitmap_c(struct drbd_conf *mdev,
3472 struct p_compressed_bm *p,
3473 struct bm_xfer_ctx *c)
3475 if (DCBP_get_code(p) == RLE_VLI_Bits)
3476 return recv_bm_rle_bits(mdev, p, c);
3478 /* other variants had been implemented for evaluation,
3479 * but have been dropped as this one turned out to be "best"
3480 * during all our tests. */
3482 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3483 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3487 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3488 const char *direction, struct bm_xfer_ctx *c)
3490 /* what would it take to transfer it "plaintext" */
3491 unsigned plain = sizeof(struct p_header80) *
3492 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3493 + c->bm_words * sizeof(long);
3494 unsigned total = c->bytes[0] + c->bytes[1];
3497 /* total can not be zero. but just in case: */
3501 /* don't report if not compressed */
3505 /* total < plain. check for overflow, still */
3506 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3507 : (1000 * total / plain);
3513 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3514 "total %u; compression: %u.%u%%\n",
3516 c->bytes[1], c->packets[1],
3517 c->bytes[0], c->packets[0],
3518 total, r/10, r % 10);
3521 /* Since we are processing the bitfield from lower addresses to higher,
3522 it does not matter if the process it in 32 bit chunks or 64 bit
3523 chunks as long as it is little endian. (Understand it as byte stream,
3524 beginning with the lowest byte...) If we would use big endian
3525 we would need to process it from the highest address to the lowest,
3526 in order to be agnostic to the 32 vs 64 bits issue.
3528 returns 0 on failure, 1 if we successfully received it. */
3529 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3531 struct bm_xfer_ctx c;
3533 enum receive_bitmap_ret ret;
3535 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3537 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3539 drbd_bm_lock(mdev, "receive bitmap");
3541 /* maybe we should use some per thread scratch page,
3542 * and allocate that during initial device creation? */
3543 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3545 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3549 c = (struct bm_xfer_ctx) {
3550 .bm_bits = drbd_bm_bits(mdev),
3551 .bm_words = drbd_bm_words(mdev),
3555 if (cmd == P_BITMAP) {
3556 ret = receive_bitmap_plain(mdev, data_size, buffer, &c);
3557 } else if (cmd == P_COMPRESSED_BITMAP) {
3558 /* MAYBE: sanity check that we speak proto >= 90,
3559 * and the feature is enabled! */
3560 struct p_compressed_bm *p;
3562 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3563 dev_err(DEV, "ReportCBitmap packet too large\n");
3566 /* use the page buff */
3568 memcpy(p, h, sizeof(*h));
3569 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3571 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3572 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3575 ret = decode_bitmap_c(mdev, p, &c);
3577 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3581 c.packets[cmd == P_BITMAP]++;
3582 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3587 if (!drbd_recv_header(mdev, &cmd, &data_size))
3589 } while (ret == OK);
3593 INFO_bm_xfer_stats(mdev, "receive", &c);
3595 if (mdev->state.conn == C_WF_BITMAP_T) {
3596 ok = !drbd_send_bitmap(mdev);
3599 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3600 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3601 D_ASSERT(ok == SS_SUCCESS);
3602 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3603 /* admin may have requested C_DISCONNECTING,
3604 * other threads may have noticed network errors */
3605 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3606 drbd_conn_str(mdev->state.conn));
3611 drbd_bm_unlock(mdev);
3612 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3613 drbd_start_resync(mdev, C_SYNC_SOURCE);
3614 free_page((unsigned long) buffer);
3618 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3620 /* TODO zero copy sink :) */
3621 static char sink[128];
3624 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3629 want = min_t(int, size, sizeof(sink));
3630 r = drbd_recv(mdev, sink, want);
3631 ERR_IF(r <= 0) break;
3637 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3639 if (mdev->state.disk >= D_INCONSISTENT)
3642 /* Make sure we've acked all the TCP data associated
3643 * with the data requests being unplugged */
3644 drbd_tcp_quickack(mdev->data.socket);
3649 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3654 drbd_cmd_handler_f function;
3657 static struct data_cmd drbd_cmd_handler[] = {
3658 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3659 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3660 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3661 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3662 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3663 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3664 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3665 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3666 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3667 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3668 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3669 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3670 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3671 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3672 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3673 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3674 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3675 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3676 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3677 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3678 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3679 /* anything missing from this table is in
3680 * the asender_tbl, see get_asender_cmd */
3681 [P_MAX_CMD] = { 0, 0, NULL },
3684 /* All handler functions that expect a sub-header get that sub-heder in
3685 mdev->data.rbuf.header.head.payload.
3687 Usually in mdev->data.rbuf.header.head the callback can find the usual
3688 p_header, but they may not rely on that. Since there is also p_header95 !
3691 static void drbdd(struct drbd_conf *mdev)
3693 union p_header *header = &mdev->data.rbuf.header;
3694 unsigned int packet_size;
3695 enum drbd_packets cmd;
3696 size_t shs; /* sub header size */
3699 while (get_t_state(&mdev->receiver) == Running) {
3700 drbd_thread_current_set_cpu(mdev);
3701 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3704 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3705 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3709 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3710 rv = drbd_recv(mdev, &header->h80.payload, shs);
3711 if (unlikely(rv != shs)) {
3712 dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
3716 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3717 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3721 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3723 if (unlikely(!rv)) {
3724 dev_err(DEV, "error receiving %s, l: %d!\n",
3725 cmdname(cmd), packet_size);
3732 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3736 void drbd_flush_workqueue(struct drbd_conf *mdev)
3738 struct drbd_wq_barrier barr;
3740 barr.w.cb = w_prev_work_done;
3741 init_completion(&barr.done);
3742 drbd_queue_work(&mdev->data.work, &barr.w);
3743 wait_for_completion(&barr.done);
3746 void drbd_free_tl_hash(struct drbd_conf *mdev)
3748 struct hlist_head *h;
3750 spin_lock_irq(&mdev->req_lock);
3752 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3753 spin_unlock_irq(&mdev->req_lock);
3757 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3759 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3760 (int)(h - mdev->ee_hash), h->first);
3761 kfree(mdev->ee_hash);
3762 mdev->ee_hash = NULL;
3763 mdev->ee_hash_s = 0;
3766 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3768 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3769 (int)(h - mdev->tl_hash), h->first);
3770 kfree(mdev->tl_hash);
3771 mdev->tl_hash = NULL;
3772 mdev->tl_hash_s = 0;
3773 spin_unlock_irq(&mdev->req_lock);
3776 static void drbd_disconnect(struct drbd_conf *mdev)
3778 enum drbd_fencing_p fp;
3779 union drbd_state os, ns;
3780 int rv = SS_UNKNOWN_ERROR;
3783 if (mdev->state.conn == C_STANDALONE)
3785 if (mdev->state.conn >= C_WF_CONNECTION)
3786 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3787 drbd_conn_str(mdev->state.conn));
3789 /* asender does not clean up anything. it must not interfere, either */
3790 drbd_thread_stop(&mdev->asender);
3791 drbd_free_sock(mdev);
3793 /* wait for current activity to cease. */
3794 spin_lock_irq(&mdev->req_lock);
3795 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3796 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3797 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3798 spin_unlock_irq(&mdev->req_lock);
3800 /* We do not have data structures that would allow us to
3801 * get the rs_pending_cnt down to 0 again.
3802 * * On C_SYNC_TARGET we do not have any data structures describing
3803 * the pending RSDataRequest's we have sent.
3804 * * On C_SYNC_SOURCE there is no data structure that tracks
3805 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3806 * And no, it is not the sum of the reference counts in the
3807 * resync_LRU. The resync_LRU tracks the whole operation including
3808 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3810 drbd_rs_cancel_all(mdev);
3812 mdev->rs_failed = 0;
3813 atomic_set(&mdev->rs_pending_cnt, 0);
3814 wake_up(&mdev->misc_wait);
3816 /* make sure syncer is stopped and w_resume_next_sg queued */
3817 del_timer_sync(&mdev->resync_timer);
3818 resync_timer_fn((unsigned long)mdev);
3820 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3821 * w_make_resync_request etc. which may still be on the worker queue
3822 * to be "canceled" */
3823 drbd_flush_workqueue(mdev);
3825 /* This also does reclaim_net_ee(). If we do this too early, we might
3826 * miss some resync ee and pages.*/
3827 drbd_process_done_ee(mdev);
3829 kfree(mdev->p_uuid);
3830 mdev->p_uuid = NULL;
3832 if (!is_susp(mdev->state))
3835 dev_info(DEV, "Connection closed\n");
3840 if (get_ldev(mdev)) {
3841 fp = mdev->ldev->dc.fencing;
3845 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3846 drbd_try_outdate_peer_async(mdev);
3848 spin_lock_irq(&mdev->req_lock);
3850 if (os.conn >= C_UNCONNECTED) {
3851 /* Do not restart in case we are C_DISCONNECTING */
3853 ns.conn = C_UNCONNECTED;
3854 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3856 spin_unlock_irq(&mdev->req_lock);
3858 if (os.conn == C_DISCONNECTING) {
3859 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3861 if (!is_susp(mdev->state)) {
3862 /* we must not free the tl_hash
3863 * while application io is still on the fly */
3864 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3865 drbd_free_tl_hash(mdev);
3868 crypto_free_hash(mdev->cram_hmac_tfm);
3869 mdev->cram_hmac_tfm = NULL;
3871 kfree(mdev->net_conf);
3872 mdev->net_conf = NULL;
3873 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3876 /* tcp_close and release of sendpage pages can be deferred. I don't
3877 * want to use SO_LINGER, because apparently it can be deferred for
3878 * more than 20 seconds (longest time I checked).
3880 * Actually we don't care for exactly when the network stack does its
3881 * put_page(), but release our reference on these pages right here.
3883 i = drbd_release_ee(mdev, &mdev->net_ee);
3885 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3886 i = atomic_read(&mdev->pp_in_use_by_net);
3888 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3889 i = atomic_read(&mdev->pp_in_use);
3891 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3893 D_ASSERT(list_empty(&mdev->read_ee));
3894 D_ASSERT(list_empty(&mdev->active_ee));
3895 D_ASSERT(list_empty(&mdev->sync_ee));
3896 D_ASSERT(list_empty(&mdev->done_ee));
3898 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3899 atomic_set(&mdev->current_epoch->epoch_size, 0);
3900 D_ASSERT(list_empty(&mdev->current_epoch->list));
3904 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3905 * we can agree on is stored in agreed_pro_version.
3907 * feature flags and the reserved array should be enough room for future
3908 * enhancements of the handshake protocol, and possible plugins...
3910 * for now, they are expected to be zero, but ignored.
3912 static int drbd_send_handshake(struct drbd_conf *mdev)
3914 /* ASSERT current == mdev->receiver ... */
3915 struct p_handshake *p = &mdev->data.sbuf.handshake;
3918 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3919 dev_err(DEV, "interrupted during initial handshake\n");
3920 return 0; /* interrupted. not ok. */
3923 if (mdev->data.socket == NULL) {
3924 mutex_unlock(&mdev->data.mutex);
3928 memset(p, 0, sizeof(*p));
3929 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3930 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3931 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3932 (struct p_header80 *)p, sizeof(*p), 0 );
3933 mutex_unlock(&mdev->data.mutex);
3939 * 1 yes, we have a valid connection
3940 * 0 oops, did not work out, please try again
3941 * -1 peer talks different language,
3942 * no point in trying again, please go standalone.
3944 static int drbd_do_handshake(struct drbd_conf *mdev)
3946 /* ASSERT current == mdev->receiver ... */
3947 struct p_handshake *p = &mdev->data.rbuf.handshake;
3948 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
3949 unsigned int length;
3950 enum drbd_packets cmd;
3953 rv = drbd_send_handshake(mdev);
3957 rv = drbd_recv_header(mdev, &cmd, &length);
3961 if (cmd != P_HAND_SHAKE) {
3962 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3967 if (length != expect) {
3968 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3973 rv = drbd_recv(mdev, &p->head.payload, expect);
3976 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3980 p->protocol_min = be32_to_cpu(p->protocol_min);
3981 p->protocol_max = be32_to_cpu(p->protocol_max);
3982 if (p->protocol_max == 0)
3983 p->protocol_max = p->protocol_min;
3985 if (PRO_VERSION_MAX < p->protocol_min ||
3986 PRO_VERSION_MIN > p->protocol_max)
3989 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3991 dev_info(DEV, "Handshake successful: "
3992 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3997 dev_err(DEV, "incompatible DRBD dialects: "
3998 "I support %d-%d, peer supports %d-%d\n",
3999 PRO_VERSION_MIN, PRO_VERSION_MAX,
4000 p->protocol_min, p->protocol_max);
4004 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4005 static int drbd_do_auth(struct drbd_conf *mdev)
4007 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4008 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4012 #define CHALLENGE_LEN 64
4016 0 - failed, try again (network error),
4017 -1 - auth failed, don't try again.
4020 static int drbd_do_auth(struct drbd_conf *mdev)
4022 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4023 struct scatterlist sg;
4024 char *response = NULL;
4025 char *right_response = NULL;
4026 char *peers_ch = NULL;
4027 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4028 unsigned int resp_size;
4029 struct hash_desc desc;
4030 enum drbd_packets cmd;
4031 unsigned int length;
4034 desc.tfm = mdev->cram_hmac_tfm;
4037 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4038 (u8 *)mdev->net_conf->shared_secret, key_len);
4040 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4045 get_random_bytes(my_challenge, CHALLENGE_LEN);
4047 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4051 rv = drbd_recv_header(mdev, &cmd, &length);
4055 if (cmd != P_AUTH_CHALLENGE) {
4056 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4062 if (length > CHALLENGE_LEN * 2) {
4063 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4068 peers_ch = kmalloc(length, GFP_NOIO);
4069 if (peers_ch == NULL) {
4070 dev_err(DEV, "kmalloc of peers_ch failed\n");
4075 rv = drbd_recv(mdev, peers_ch, length);
4078 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4083 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4084 response = kmalloc(resp_size, GFP_NOIO);
4085 if (response == NULL) {
4086 dev_err(DEV, "kmalloc of response failed\n");
4091 sg_init_table(&sg, 1);
4092 sg_set_buf(&sg, peers_ch, length);
4094 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4096 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4101 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4105 rv = drbd_recv_header(mdev, &cmd, &length);
4109 if (cmd != P_AUTH_RESPONSE) {
4110 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4116 if (length != resp_size) {
4117 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4122 rv = drbd_recv(mdev, response , resp_size);
4124 if (rv != resp_size) {
4125 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4130 right_response = kmalloc(resp_size, GFP_NOIO);
4131 if (right_response == NULL) {
4132 dev_err(DEV, "kmalloc of right_response failed\n");
4137 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4139 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4141 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4146 rv = !memcmp(response, right_response, resp_size);
4149 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4150 resp_size, mdev->net_conf->cram_hmac_alg);
4157 kfree(right_response);
4163 int drbdd_init(struct drbd_thread *thi)
4165 struct drbd_conf *mdev = thi->mdev;
4166 unsigned int minor = mdev_to_minor(mdev);
4169 sprintf(current->comm, "drbd%d_receiver", minor);
4171 dev_info(DEV, "receiver (re)started\n");
4174 h = drbd_connect(mdev);
4176 drbd_disconnect(mdev);
4177 __set_current_state(TASK_INTERRUPTIBLE);
4178 schedule_timeout(HZ);
4181 dev_warn(DEV, "Discarding network configuration.\n");
4182 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4187 if (get_net_conf(mdev)) {
4193 drbd_disconnect(mdev);
4195 dev_info(DEV, "receiver terminated\n");
4199 /* ********* acknowledge sender ******** */
4201 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4203 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4205 int retcode = be32_to_cpu(p->retcode);
4207 if (retcode >= SS_SUCCESS) {
4208 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4210 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4211 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4212 drbd_set_st_err_str(retcode), retcode);
4214 wake_up(&mdev->state_wait);
4219 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4221 return drbd_send_ping_ack(mdev);
4225 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4227 /* restore idle timeout */
4228 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4229 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4230 wake_up(&mdev->misc_wait);
4235 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4237 struct p_block_ack *p = (struct p_block_ack *)h;
4238 sector_t sector = be64_to_cpu(p->sector);
4239 int blksize = be32_to_cpu(p->blksize);
4241 D_ASSERT(mdev->agreed_pro_version >= 89);
4243 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4245 if (get_ldev(mdev)) {
4246 drbd_rs_complete_io(mdev, sector);
4247 drbd_set_in_sync(mdev, sector, blksize);
4248 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4249 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4252 dec_rs_pending(mdev);
4253 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4258 /* when we receive the ACK for a write request,
4259 * verify that we actually know about it */
4260 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4261 u64 id, sector_t sector)
4263 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4264 struct hlist_node *n;
4265 struct drbd_request *req;
4267 hlist_for_each_entry(req, n, slot, colision) {
4268 if ((unsigned long)req == (unsigned long)id) {
4269 if (req->sector != sector) {
4270 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4271 "wrong sector (%llus versus %llus)\n", req,
4272 (unsigned long long)req->sector,
4273 (unsigned long long)sector);
4279 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4280 (void *)(unsigned long)id, (unsigned long long)sector);
4284 typedef struct drbd_request *(req_validator_fn)
4285 (struct drbd_conf *mdev, u64 id, sector_t sector);
4287 static int validate_req_change_req_state(struct drbd_conf *mdev,
4288 u64 id, sector_t sector, req_validator_fn validator,
4289 const char *func, enum drbd_req_event what)
4291 struct drbd_request *req;
4292 struct bio_and_error m;
4294 spin_lock_irq(&mdev->req_lock);
4295 req = validator(mdev, id, sector);
4296 if (unlikely(!req)) {
4297 spin_unlock_irq(&mdev->req_lock);
4298 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4301 __req_mod(req, what, &m);
4302 spin_unlock_irq(&mdev->req_lock);
4305 complete_master_bio(mdev, &m);
4309 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4311 struct p_block_ack *p = (struct p_block_ack *)h;
4312 sector_t sector = be64_to_cpu(p->sector);
4313 int blksize = be32_to_cpu(p->blksize);
4314 enum drbd_req_event what;
4316 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4318 if (is_syncer_block_id(p->block_id)) {
4319 drbd_set_in_sync(mdev, sector, blksize);
4320 dec_rs_pending(mdev);
4323 switch (be16_to_cpu(h->command)) {
4324 case P_RS_WRITE_ACK:
4325 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4326 what = write_acked_by_peer_and_sis;
4329 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4330 what = write_acked_by_peer;
4333 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4334 what = recv_acked_by_peer;
4337 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4338 what = conflict_discarded_by_peer;
4345 return validate_req_change_req_state(mdev, p->block_id, sector,
4346 _ack_id_to_req, __func__ , what);
4349 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4351 struct p_block_ack *p = (struct p_block_ack *)h;
4352 sector_t sector = be64_to_cpu(p->sector);
4354 if (__ratelimit(&drbd_ratelimit_state))
4355 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4357 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4359 if (is_syncer_block_id(p->block_id)) {
4360 int size = be32_to_cpu(p->blksize);
4361 dec_rs_pending(mdev);
4362 drbd_rs_failed_io(mdev, sector, size);
4365 return validate_req_change_req_state(mdev, p->block_id, sector,
4366 _ack_id_to_req, __func__ , neg_acked);
4369 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4371 struct p_block_ack *p = (struct p_block_ack *)h;
4372 sector_t sector = be64_to_cpu(p->sector);
4374 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4375 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4376 (unsigned long long)sector, be32_to_cpu(p->blksize));
4378 return validate_req_change_req_state(mdev, p->block_id, sector,
4379 _ar_id_to_req, __func__ , neg_acked);
4382 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4386 struct p_block_ack *p = (struct p_block_ack *)h;
4388 sector = be64_to_cpu(p->sector);
4389 size = be32_to_cpu(p->blksize);
4391 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4393 dec_rs_pending(mdev);
4395 if (get_ldev_if_state(mdev, D_FAILED)) {
4396 drbd_rs_complete_io(mdev, sector);
4397 drbd_rs_failed_io(mdev, sector, size);
4404 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4406 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4408 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4413 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4415 struct p_block_ack *p = (struct p_block_ack *)h;
4416 struct drbd_work *w;
4420 sector = be64_to_cpu(p->sector);
4421 size = be32_to_cpu(p->blksize);
4423 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4425 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4426 drbd_ov_oos_found(mdev, sector, size);
4430 if (!get_ldev(mdev))
4433 drbd_rs_complete_io(mdev, sector);
4434 dec_rs_pending(mdev);
4436 if (--mdev->ov_left == 0) {
4437 w = kmalloc(sizeof(*w), GFP_NOIO);
4439 w->cb = w_ov_finished;
4440 drbd_queue_work_front(&mdev->data.work, w);
4442 dev_err(DEV, "kmalloc(w) failed.");
4444 drbd_resync_finished(mdev);
4451 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4456 struct asender_cmd {
4458 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4461 static struct asender_cmd *get_asender_cmd(int cmd)
4463 static struct asender_cmd asender_tbl[] = {
4464 /* anything missing from this table is in
4465 * the drbd_cmd_handler (drbd_default_handler) table,
4466 * see the beginning of drbdd() */
4467 [P_PING] = { sizeof(struct p_header80), got_Ping },
4468 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4469 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4470 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4471 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4472 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4473 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4474 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4475 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4476 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4477 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4478 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4479 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4480 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4481 [P_MAX_CMD] = { 0, NULL },
4483 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4485 return &asender_tbl[cmd];
4488 int drbd_asender(struct drbd_thread *thi)
4490 struct drbd_conf *mdev = thi->mdev;
4491 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4492 struct asender_cmd *cmd = NULL;
4497 int expect = sizeof(struct p_header80);
4500 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4502 current->policy = SCHED_RR; /* Make this a realtime task! */
4503 current->rt_priority = 2; /* more important than all other tasks */
4505 while (get_t_state(thi) == Running) {
4506 drbd_thread_current_set_cpu(mdev);
4507 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4508 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4509 mdev->meta.socket->sk->sk_rcvtimeo =
4510 mdev->net_conf->ping_timeo*HZ/10;
4513 /* conditionally cork;
4514 * it may hurt latency if we cork without much to send */
4515 if (!mdev->net_conf->no_cork &&
4516 3 < atomic_read(&mdev->unacked_cnt))
4517 drbd_tcp_cork(mdev->meta.socket);
4519 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4520 flush_signals(current);
4521 if (!drbd_process_done_ee(mdev)) {
4522 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4525 /* to avoid race with newly queued ACKs */
4526 set_bit(SIGNAL_ASENDER, &mdev->flags);
4527 spin_lock_irq(&mdev->req_lock);
4528 empty = list_empty(&mdev->done_ee);
4529 spin_unlock_irq(&mdev->req_lock);
4530 /* new ack may have been queued right here,
4531 * but then there is also a signal pending,
4532 * and we start over... */
4536 /* but unconditionally uncork unless disabled */
4537 if (!mdev->net_conf->no_cork)
4538 drbd_tcp_uncork(mdev->meta.socket);
4540 /* short circuit, recv_msg would return EINTR anyways. */
4541 if (signal_pending(current))
4544 rv = drbd_recv_short(mdev, mdev->meta.socket,
4545 buf, expect-received, 0);
4546 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4548 flush_signals(current);
4551 * -EINTR (on meta) we got a signal
4552 * -EAGAIN (on meta) rcvtimeo expired
4553 * -ECONNRESET other side closed the connection
4554 * -ERESTARTSYS (on data) we got a signal
4555 * rv < 0 other than above: unexpected error!
4556 * rv == expected: full header or command
4557 * rv < expected: "woken" by signal during receive
4558 * rv == 0 : "connection shut down by peer"
4560 if (likely(rv > 0)) {
4563 } else if (rv == 0) {
4564 dev_err(DEV, "meta connection shut down by peer.\n");
4566 } else if (rv == -EAGAIN) {
4567 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4568 mdev->net_conf->ping_timeo*HZ/10) {
4569 dev_err(DEV, "PingAck did not arrive in time.\n");
4572 set_bit(SEND_PING, &mdev->flags);
4574 } else if (rv == -EINTR) {
4577 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4581 if (received == expect && cmd == NULL) {
4582 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4583 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4584 be32_to_cpu(h->magic),
4585 be16_to_cpu(h->command),
4586 be16_to_cpu(h->length));
4589 cmd = get_asender_cmd(be16_to_cpu(h->command));
4590 len = be16_to_cpu(h->length);
4591 if (unlikely(cmd == NULL)) {
4592 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4593 be32_to_cpu(h->magic),
4594 be16_to_cpu(h->command),
4595 be16_to_cpu(h->length));
4598 expect = cmd->pkt_size;
4599 ERR_IF(len != expect-sizeof(struct p_header80))
4602 if (received == expect) {
4603 D_ASSERT(cmd != NULL);
4604 if (!cmd->process(mdev, h))
4609 expect = sizeof(struct p_header80);
4616 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4620 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4622 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4624 D_ASSERT(mdev->state.conn < C_CONNECTED);
4625 dev_info(DEV, "asender terminated\n");