4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
55 struct drbd_epoch *epoch;
64 static int drbd_do_handshake(struct drbd_conf *mdev);
65 static int drbd_do_auth(struct drbd_conf *mdev);
67 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
68 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
70 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
72 struct drbd_epoch *prev;
73 spin_lock(&mdev->epoch_lock);
74 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
75 if (prev == epoch || prev == mdev->current_epoch)
77 spin_unlock(&mdev->epoch_lock);
81 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
84 * some helper functions to deal with single linked page lists,
85 * page->private being our "next" pointer.
88 /* If at least n pages are linked at head, get n pages off.
89 * Otherwise, don't modify head, and return NULL.
90 * Locking is the responsibility of the caller.
92 static struct page *page_chain_del(struct page **head, int n)
106 tmp = page_chain_next(page);
108 break; /* found sufficient pages */
110 /* insufficient pages, don't use any of them. */
115 /* add end of list marker for the returned list */
116 set_page_private(page, 0);
117 /* actual return value, and adjustment of head */
123 /* may be used outside of locks to find the tail of a (usually short)
124 * "private" page chain, before adding it back to a global chain head
125 * with page_chain_add() under a spinlock. */
126 static struct page *page_chain_tail(struct page *page, int *len)
130 while ((tmp = page_chain_next(page)))
137 static int page_chain_free(struct page *page)
141 page_chain_for_each_safe(page, tmp) {
148 static void page_chain_add(struct page **head,
149 struct page *chain_first, struct page *chain_last)
153 tmp = page_chain_tail(chain_first, NULL);
154 BUG_ON(tmp != chain_last);
157 /* add chain to head */
158 set_page_private(chain_last, (unsigned long)*head);
162 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
164 struct page *page = NULL;
165 struct page *tmp = NULL;
168 /* Yes, testing drbd_pp_vacant outside the lock is racy.
169 * So what. It saves a spin_lock. */
170 if (drbd_pp_vacant >= number) {
171 spin_lock(&drbd_pp_lock);
172 page = page_chain_del(&drbd_pp_pool, number);
174 drbd_pp_vacant -= number;
175 spin_unlock(&drbd_pp_lock);
180 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
181 * "criss-cross" setup, that might cause write-out on some other DRBD,
182 * which in turn might block on the other node at this very place. */
183 for (i = 0; i < number; i++) {
184 tmp = alloc_page(GFP_TRY);
187 set_page_private(tmp, (unsigned long)page);
194 /* Not enough pages immediately available this time.
195 * No need to jump around here, drbd_pp_alloc will retry this
196 * function "soon". */
198 tmp = page_chain_tail(page, NULL);
199 spin_lock(&drbd_pp_lock);
200 page_chain_add(&drbd_pp_pool, page, tmp);
202 spin_unlock(&drbd_pp_lock);
207 /* kick lower level device, if we have more than (arbitrary number)
208 * reference counts on it, which typically are locally submitted io
209 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
210 static void maybe_kick_lo(struct drbd_conf *mdev)
212 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
216 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
218 struct drbd_epoch_entry *e;
219 struct list_head *le, *tle;
221 /* The EEs are always appended to the end of the list. Since
222 they are sent in order over the wire, they have to finish
223 in order. As soon as we see the first not finished we can
224 stop to examine the list... */
226 list_for_each_safe(le, tle, &mdev->net_ee) {
227 e = list_entry(le, struct drbd_epoch_entry, w.list);
228 if (drbd_ee_has_active_page(e))
230 list_move(le, to_be_freed);
234 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
236 LIST_HEAD(reclaimed);
237 struct drbd_epoch_entry *e, *t;
240 spin_lock_irq(&mdev->req_lock);
241 reclaim_net_ee(mdev, &reclaimed);
242 spin_unlock_irq(&mdev->req_lock);
244 list_for_each_entry_safe(e, t, &reclaimed, w.list)
245 drbd_free_ee(mdev, e);
249 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
250 * @mdev: DRBD device.
251 * @number: number of pages requested
252 * @retry: whether to retry, if not enough pages are available right now
254 * Tries to allocate number pages, first from our own page pool, then from
255 * the kernel, unless this allocation would exceed the max_buffers setting.
256 * Possibly retry until DRBD frees sufficient pages somewhere else.
258 * Returns a page chain linked via page->private.
260 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
262 struct page *page = NULL;
265 /* Yes, we may run up to @number over max_buffers. If we
266 * follow it strictly, the admin will get it wrong anyways. */
267 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
268 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
270 while (page == NULL) {
271 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
273 drbd_kick_lo_and_reclaim_net(mdev);
275 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
276 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
284 if (signal_pending(current)) {
285 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
291 finish_wait(&drbd_pp_wait, &wait);
294 atomic_add(number, &mdev->pp_in_use);
298 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
299 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
300 * Either links the page chain back to the global pool,
301 * or returns all pages to the system. */
302 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
305 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
306 i = page_chain_free(page);
309 tmp = page_chain_tail(page, &i);
310 spin_lock(&drbd_pp_lock);
311 page_chain_add(&drbd_pp_pool, page, tmp);
313 spin_unlock(&drbd_pp_lock);
315 atomic_sub(i, &mdev->pp_in_use);
316 i = atomic_read(&mdev->pp_in_use);
318 dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
319 wake_up(&drbd_pp_wait);
323 You need to hold the req_lock:
324 _drbd_wait_ee_list_empty()
326 You must not have the req_lock:
332 drbd_process_done_ee()
334 drbd_wait_ee_list_empty()
337 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
340 unsigned int data_size,
341 gfp_t gfp_mask) __must_hold(local)
343 struct drbd_epoch_entry *e;
345 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
347 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
350 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
352 if (!(gfp_mask & __GFP_NOWARN))
353 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
357 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
361 INIT_HLIST_NODE(&e->colision);
365 atomic_set(&e->pending_bios, 0);
375 mempool_free(e, drbd_ee_mempool);
379 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
381 drbd_pp_free(mdev, e->pages);
382 D_ASSERT(atomic_read(&e->pending_bios) == 0);
383 D_ASSERT(hlist_unhashed(&e->colision));
384 mempool_free(e, drbd_ee_mempool);
387 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
389 LIST_HEAD(work_list);
390 struct drbd_epoch_entry *e, *t;
393 spin_lock_irq(&mdev->req_lock);
394 list_splice_init(list, &work_list);
395 spin_unlock_irq(&mdev->req_lock);
397 list_for_each_entry_safe(e, t, &work_list, w.list) {
398 drbd_free_ee(mdev, e);
406 * This function is called from _asender only_
407 * but see also comments in _req_mod(,barrier_acked)
408 * and receive_Barrier.
410 * Move entries from net_ee to done_ee, if ready.
411 * Grab done_ee, call all callbacks, free the entries.
412 * The callbacks typically send out ACKs.
414 static int drbd_process_done_ee(struct drbd_conf *mdev)
416 LIST_HEAD(work_list);
417 LIST_HEAD(reclaimed);
418 struct drbd_epoch_entry *e, *t;
419 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
421 spin_lock_irq(&mdev->req_lock);
422 reclaim_net_ee(mdev, &reclaimed);
423 list_splice_init(&mdev->done_ee, &work_list);
424 spin_unlock_irq(&mdev->req_lock);
426 list_for_each_entry_safe(e, t, &reclaimed, w.list)
427 drbd_free_ee(mdev, e);
429 /* possible callbacks here:
430 * e_end_block, and e_end_resync_block, e_send_discard_ack.
431 * all ignore the last argument.
433 list_for_each_entry_safe(e, t, &work_list, w.list) {
434 /* list_del not necessary, next/prev members not touched */
435 ok = e->w.cb(mdev, &e->w, !ok) && ok;
436 drbd_free_ee(mdev, e);
438 wake_up(&mdev->ee_wait);
443 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
447 /* avoids spin_lock/unlock
448 * and calling prepare_to_wait in the fast path */
449 while (!list_empty(head)) {
450 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
451 spin_unlock_irq(&mdev->req_lock);
454 finish_wait(&mdev->ee_wait, &wait);
455 spin_lock_irq(&mdev->req_lock);
459 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
461 spin_lock_irq(&mdev->req_lock);
462 _drbd_wait_ee_list_empty(mdev, head);
463 spin_unlock_irq(&mdev->req_lock);
466 /* see also kernel_accept; which is only present since 2.6.18.
467 * also we want to log which part of it failed, exactly */
468 static int drbd_accept(struct drbd_conf *mdev, const char **what,
469 struct socket *sock, struct socket **newsock)
471 struct sock *sk = sock->sk;
475 err = sock->ops->listen(sock, 5);
479 *what = "sock_create_lite";
480 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
486 err = sock->ops->accept(sock, *newsock, 0);
488 sock_release(*newsock);
492 (*newsock)->ops = sock->ops;
498 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
499 void *buf, size_t size, int flags)
506 struct msghdr msg = {
508 .msg_iov = (struct iovec *)&iov,
509 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
515 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
521 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
528 struct msghdr msg = {
530 .msg_iov = (struct iovec *)&iov,
531 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
539 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
544 * ECONNRESET other side closed the connection
545 * ERESTARTSYS (on sock) we got a signal
549 if (rv == -ECONNRESET)
550 dev_info(DEV, "sock was reset by peer\n");
551 else if (rv != -ERESTARTSYS)
552 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
554 } else if (rv == 0) {
555 dev_info(DEV, "sock was shut down by peer\n");
558 /* signal came in, or peer/link went down,
559 * after we read a partial message
561 /* D_ASSERT(signal_pending(current)); */
569 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
574 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
578 struct sockaddr_in6 src_in6;
580 int disconnect_on_error = 1;
582 if (!get_net_conf(mdev))
585 what = "sock_create_kern";
586 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
587 SOCK_STREAM, IPPROTO_TCP, &sock);
593 sock->sk->sk_rcvtimeo =
594 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
596 /* explicitly bind to the configured IP as source IP
597 * for the outgoing connections.
598 * This is needed for multihomed hosts and to be
599 * able to use lo: interfaces for drbd.
600 * Make sure to use 0 as port number, so linux selects
601 * a free one dynamically.
603 memcpy(&src_in6, mdev->net_conf->my_addr,
604 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
605 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
606 src_in6.sin6_port = 0;
608 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
610 what = "bind before connect";
611 err = sock->ops->bind(sock,
612 (struct sockaddr *) &src_in6,
613 mdev->net_conf->my_addr_len);
617 /* connect may fail, peer not yet available.
618 * stay C_WF_CONNECTION, don't go Disconnecting! */
619 disconnect_on_error = 0;
621 err = sock->ops->connect(sock,
622 (struct sockaddr *)mdev->net_conf->peer_addr,
623 mdev->net_conf->peer_addr_len, 0);
632 /* timeout, busy, signal pending */
633 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
634 case EINTR: case ERESTARTSYS:
635 /* peer not (yet) available, network problem */
636 case ECONNREFUSED: case ENETUNREACH:
637 case EHOSTDOWN: case EHOSTUNREACH:
638 disconnect_on_error = 0;
641 dev_err(DEV, "%s failed, err = %d\n", what, err);
643 if (disconnect_on_error)
644 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
650 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
653 struct socket *s_estab = NULL, *s_listen;
656 if (!get_net_conf(mdev))
659 what = "sock_create_kern";
660 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
661 SOCK_STREAM, IPPROTO_TCP, &s_listen);
667 timeo = mdev->net_conf->try_connect_int * HZ;
668 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
670 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
671 s_listen->sk->sk_rcvtimeo = timeo;
672 s_listen->sk->sk_sndtimeo = timeo;
674 what = "bind before listen";
675 err = s_listen->ops->bind(s_listen,
676 (struct sockaddr *) mdev->net_conf->my_addr,
677 mdev->net_conf->my_addr_len);
681 err = drbd_accept(mdev, &what, s_listen, &s_estab);
685 sock_release(s_listen);
687 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
688 dev_err(DEV, "%s failed, err = %d\n", what, err);
689 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
697 static int drbd_send_fp(struct drbd_conf *mdev,
698 struct socket *sock, enum drbd_packets cmd)
700 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
702 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
705 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
707 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
710 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
712 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
713 return be16_to_cpu(h->command);
719 * drbd_socket_okay() - Free the socket if its connection is not okay
720 * @mdev: DRBD device.
721 * @sock: pointer to the pointer to the socket.
723 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
731 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
733 if (rr > 0 || rr == -EAGAIN) {
744 * 1 yes, we have a valid connection
745 * 0 oops, did not work out, please try again
746 * -1 peer talks different language,
747 * no point in trying again, please go standalone.
748 * -2 We do not have a network config...
750 static int drbd_connect(struct drbd_conf *mdev)
752 struct socket *s, *sock, *msock;
755 D_ASSERT(!mdev->data.socket);
757 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
758 dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
760 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
763 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
770 /* 3 tries, this should take less than a second! */
771 s = drbd_try_connect(mdev);
774 /* give the other side time to call bind() & listen() */
775 __set_current_state(TASK_INTERRUPTIBLE);
776 schedule_timeout(HZ / 10);
781 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
785 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
789 dev_err(DEV, "Logic error in drbd_connect()\n");
790 goto out_release_sockets;
795 __set_current_state(TASK_INTERRUPTIBLE);
796 schedule_timeout(HZ / 10);
797 ok = drbd_socket_okay(mdev, &sock);
798 ok = drbd_socket_okay(mdev, &msock) && ok;
804 s = drbd_wait_for_connect(mdev);
806 try = drbd_recv_fp(mdev, s);
807 drbd_socket_okay(mdev, &sock);
808 drbd_socket_okay(mdev, &msock);
812 dev_warn(DEV, "initial packet S crossed\n");
819 dev_warn(DEV, "initial packet M crossed\n");
823 set_bit(DISCARD_CONCURRENT, &mdev->flags);
826 dev_warn(DEV, "Error receiving initial packet\n");
833 if (mdev->state.conn <= C_DISCONNECTING)
834 goto out_release_sockets;
835 if (signal_pending(current)) {
836 flush_signals(current);
838 if (get_t_state(&mdev->receiver) == Exiting)
839 goto out_release_sockets;
843 ok = drbd_socket_okay(mdev, &sock);
844 ok = drbd_socket_okay(mdev, &msock) && ok;
850 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
851 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
853 sock->sk->sk_allocation = GFP_NOIO;
854 msock->sk->sk_allocation = GFP_NOIO;
856 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
857 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
859 if (mdev->net_conf->sndbuf_size) {
860 sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size;
861 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
864 if (mdev->net_conf->rcvbuf_size) {
865 sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size;
866 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
870 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
871 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
872 * first set it to the P_HAND_SHAKE timeout,
873 * which we set to 4x the configured ping_timeout. */
874 sock->sk->sk_sndtimeo =
875 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
877 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
878 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
880 /* we don't want delays.
881 * we use TCP_CORK where apropriate, though */
882 drbd_tcp_nodelay(sock);
883 drbd_tcp_nodelay(msock);
885 mdev->data.socket = sock;
886 mdev->meta.socket = msock;
887 mdev->last_received = jiffies;
889 D_ASSERT(mdev->asender.task == NULL);
891 h = drbd_do_handshake(mdev);
895 if (mdev->cram_hmac_tfm) {
896 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
897 switch (drbd_do_auth(mdev)) {
899 dev_err(DEV, "Authentication of peer failed\n");
902 dev_err(DEV, "Authentication of peer failed, trying again.\n");
907 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
910 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
911 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
913 atomic_set(&mdev->packet_seq, 0);
916 drbd_thread_start(&mdev->asender);
918 if (!drbd_send_protocol(mdev))
920 drbd_send_sync_param(mdev, &mdev->sync_conf);
921 drbd_send_sizes(mdev, 0, 0);
922 drbd_send_uuids(mdev);
923 drbd_send_state(mdev);
924 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
925 clear_bit(RESIZE_PENDING, &mdev->flags);
937 static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
941 r = drbd_recv(mdev, h, sizeof(*h));
943 if (unlikely(r != sizeof(*h))) {
944 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
947 h->command = be16_to_cpu(h->command);
948 h->length = be16_to_cpu(h->length);
949 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
950 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
951 (long)be32_to_cpu(h->magic),
952 h->command, h->length);
955 mdev->last_received = jiffies;
960 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
964 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
965 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
966 NULL, BLKDEV_IFL_WAIT);
968 dev_err(DEV, "local disk flush failed with status %d\n", rv);
969 /* would rather check on EOPNOTSUPP, but that is not reliable.
970 * don't try again for ANY return value != 0
971 * if (rv == -EOPNOTSUPP) */
972 drbd_bump_write_ordering(mdev, WO_drain_io);
977 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
980 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
982 struct flush_work *fw = (struct flush_work *)w;
983 struct drbd_epoch *epoch = fw->epoch;
987 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
988 drbd_flush_after_epoch(mdev, epoch);
990 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
991 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
997 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
998 * @mdev: DRBD device.
999 * @epoch: Epoch object.
1002 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1003 struct drbd_epoch *epoch,
1004 enum epoch_event ev)
1006 int finish, epoch_size;
1007 struct drbd_epoch *next_epoch;
1008 int schedule_flush = 0;
1009 enum finish_epoch rv = FE_STILL_LIVE;
1011 spin_lock(&mdev->epoch_lock);
1016 epoch_size = atomic_read(&epoch->epoch_size);
1018 switch (ev & ~EV_CLEANUP) {
1020 atomic_dec(&epoch->active);
1022 case EV_GOT_BARRIER_NR:
1023 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1025 /* Special case: If we just switched from WO_bio_barrier to
1026 WO_bdev_flush we should not finish the current epoch */
1027 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1028 mdev->write_ordering != WO_bio_barrier &&
1029 epoch == mdev->current_epoch)
1030 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1032 case EV_BARRIER_DONE:
1033 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1035 case EV_BECAME_LAST:
1040 if (epoch_size != 0 &&
1041 atomic_read(&epoch->active) == 0 &&
1042 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1043 epoch->list.prev == &mdev->current_epoch->list &&
1044 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1045 /* Nearly all conditions are met to finish that epoch... */
1046 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1047 mdev->write_ordering == WO_none ||
1048 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1051 set_bit(DE_IS_FINISHING, &epoch->flags);
1052 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1053 mdev->write_ordering == WO_bio_barrier) {
1054 atomic_inc(&epoch->active);
1059 if (!(ev & EV_CLEANUP)) {
1060 spin_unlock(&mdev->epoch_lock);
1061 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1062 spin_lock(&mdev->epoch_lock);
1066 if (mdev->current_epoch != epoch) {
1067 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1068 list_del(&epoch->list);
1069 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1073 if (rv == FE_STILL_LIVE)
1077 atomic_set(&epoch->epoch_size, 0);
1078 /* atomic_set(&epoch->active, 0); is alrady zero */
1079 if (rv == FE_STILL_LIVE)
1090 spin_unlock(&mdev->epoch_lock);
1092 if (schedule_flush) {
1093 struct flush_work *fw;
1094 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1098 drbd_queue_work(&mdev->data.work, &fw->w);
1100 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1101 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1102 /* That is not a recursion, only one level */
1103 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1104 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1112 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1113 * @mdev: DRBD device.
1114 * @wo: Write ordering method to try.
1116 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1118 enum write_ordering_e pwo;
1119 static char *write_ordering_str[] = {
1121 [WO_drain_io] = "drain",
1122 [WO_bdev_flush] = "flush",
1123 [WO_bio_barrier] = "barrier",
1126 pwo = mdev->write_ordering;
1128 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1130 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1132 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1134 mdev->write_ordering = wo;
1135 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1136 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1141 * @mdev: DRBD device.
1143 * @rw: flag field, see bio->bi_rw
1145 /* TODO allocate from our own bio_set. */
1146 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1147 const unsigned rw, const int fault_type)
1149 struct bio *bios = NULL;
1151 struct page *page = e->pages;
1152 sector_t sector = e->sector;
1153 unsigned ds = e->size;
1154 unsigned n_bios = 0;
1155 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1157 if (atomic_read(&mdev->new_c_uuid)) {
1158 if (atomic_add_unless(&mdev->new_c_uuid, -1, 1)) {
1159 drbd_uuid_new_current(mdev);
1162 atomic_dec(&mdev->new_c_uuid);
1163 wake_up(&mdev->misc_wait);
1165 wait_event(mdev->misc_wait, !atomic_read(&mdev->new_c_uuid));
1168 /* In most cases, we will only need one bio. But in case the lower
1169 * level restrictions happen to be different at this offset on this
1170 * side than those of the sending peer, we may need to submit the
1171 * request in more than one bio. */
1173 bio = bio_alloc(GFP_NOIO, nr_pages);
1175 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1178 /* > e->sector, unless this is the first bio */
1179 bio->bi_sector = sector;
1180 bio->bi_bdev = mdev->ldev->backing_bdev;
1181 /* we special case some flags in the multi-bio case, see below
1182 * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
1184 bio->bi_private = e;
1185 bio->bi_end_io = drbd_endio_sec;
1187 bio->bi_next = bios;
1191 page_chain_for_each(page) {
1192 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1193 if (!bio_add_page(bio, page, len, 0)) {
1194 /* a single page must always be possible! */
1195 BUG_ON(bio->bi_vcnt == 0);
1202 D_ASSERT(page == NULL);
1205 atomic_set(&e->pending_bios, n_bios);
1208 bios = bios->bi_next;
1209 bio->bi_next = NULL;
1211 /* strip off BIO_RW_UNPLUG unless it is the last bio */
1213 bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
1215 drbd_generic_make_request(mdev, fault_type, bio);
1217 /* strip off BIO_RW_BARRIER,
1218 * unless it is the first or last bio */
1219 if (bios && bios->bi_next)
1220 bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
1222 maybe_kick_lo(mdev);
1228 bios = bios->bi_next;
1235 * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
1236 * @mdev: DRBD device.
1238 * @cancel: The connection will be closed anyways (unused in this callback)
1240 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1242 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1243 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1244 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1245 so that we can finish that epoch in drbd_may_finish_epoch().
1246 That is necessary if we already have a long chain of Epochs, before
1247 we realize that BIO_RW_BARRIER is actually not supported */
1249 /* As long as the -ENOTSUPP on the barrier is reported immediately
1250 that will never trigger. If it is reported late, we will just
1251 print that warning and continue correctly for all future requests
1252 with WO_bdev_flush */
1253 if (previous_epoch(mdev, e->epoch))
1254 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1256 /* we still have a local reference,
1257 * get_ldev was done in receive_Data. */
1259 e->w.cb = e_end_block;
1260 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1261 /* drbd_submit_ee fails for one reason only:
1262 * if was not able to allocate sufficient bios.
1263 * requeue, try again later. */
1264 e->w.cb = w_e_reissue;
1265 drbd_queue_work(&mdev->data.work, &e->w);
1270 static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1272 int rv, issue_flush;
1273 struct p_barrier *p = (struct p_barrier *)h;
1274 struct drbd_epoch *epoch;
1276 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1278 rv = drbd_recv(mdev, h->payload, h->length);
1279 ERR_IF(rv != h->length) return FALSE;
1283 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1286 mdev->current_epoch->barrier_nr = p->barrier;
1287 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1289 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1290 * the activity log, which means it would not be resynced in case the
1291 * R_PRIMARY crashes now.
1292 * Therefore we must send the barrier_ack after the barrier request was
1294 switch (mdev->write_ordering) {
1295 case WO_bio_barrier:
1297 if (rv == FE_RECYCLED)
1303 if (rv == FE_STILL_LIVE) {
1304 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1305 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1306 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1308 if (rv == FE_RECYCLED)
1311 /* The asender will send all the ACKs and barrier ACKs out, since
1312 all EEs moved from the active_ee to the done_ee. We need to
1313 provide a new epoch object for the EEs that come in soon */
1317 /* receiver context, in the writeout path of the other node.
1318 * avoid potential distributed deadlock */
1319 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1321 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1322 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1323 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1325 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1326 if (rv == FE_RECYCLED)
1330 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1336 atomic_set(&epoch->epoch_size, 0);
1337 atomic_set(&epoch->active, 0);
1339 spin_lock(&mdev->epoch_lock);
1340 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1341 list_add(&epoch->list, &mdev->current_epoch->list);
1342 mdev->current_epoch = epoch;
1345 /* The current_epoch got recycled while we allocated this one... */
1348 spin_unlock(&mdev->epoch_lock);
1353 /* used from receive_RSDataReply (recv_resync_read)
1354 * and from receive_Data */
1355 static struct drbd_epoch_entry *
1356 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1358 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1359 struct drbd_epoch_entry *e;
1362 void *dig_in = mdev->int_dig_in;
1363 void *dig_vv = mdev->int_dig_vv;
1364 unsigned long *data;
1366 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1367 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1370 rr = drbd_recv(mdev, dig_in, dgs);
1372 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1380 ERR_IF(data_size & 0x1ff) return NULL;
1381 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1383 /* even though we trust out peer,
1384 * we sometimes have to double check. */
1385 if (sector + (data_size>>9) > capacity) {
1386 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1387 (unsigned long long)capacity,
1388 (unsigned long long)sector, data_size);
1392 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1393 * "criss-cross" setup, that might cause write-out on some other DRBD,
1394 * which in turn might block on the other node at this very place. */
1395 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1401 page_chain_for_each(page) {
1402 unsigned len = min_t(int, ds, PAGE_SIZE);
1404 rr = drbd_recv(mdev, data, len);
1405 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1406 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1407 data[0] = data[0] ^ (unsigned long)-1;
1411 drbd_free_ee(mdev, e);
1412 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1420 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1421 if (memcmp(dig_in, dig_vv, dgs)) {
1422 dev_err(DEV, "Digest integrity check FAILED.\n");
1423 drbd_bcast_ee(mdev, "digest failed",
1424 dgs, dig_in, dig_vv, e);
1425 drbd_free_ee(mdev, e);
1429 mdev->recv_cnt += data_size>>9;
1433 /* drbd_drain_block() just takes a data block
1434 * out of the socket input buffer, and discards it.
1436 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1445 page = drbd_pp_alloc(mdev, 1, 1);
1449 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1450 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1452 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1453 rr, min_t(int, data_size, PAGE_SIZE));
1459 drbd_pp_free(mdev, page);
1463 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1464 sector_t sector, int data_size)
1466 struct bio_vec *bvec;
1468 int dgs, rr, i, expect;
1469 void *dig_in = mdev->int_dig_in;
1470 void *dig_vv = mdev->int_dig_vv;
1472 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1473 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1476 rr = drbd_recv(mdev, dig_in, dgs);
1478 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1486 /* optimistically update recv_cnt. if receiving fails below,
1487 * we disconnect anyways, and counters will be reset. */
1488 mdev->recv_cnt += data_size>>9;
1490 bio = req->master_bio;
1491 D_ASSERT(sector == bio->bi_sector);
1493 bio_for_each_segment(bvec, bio, i) {
1494 expect = min_t(int, data_size, bvec->bv_len);
1495 rr = drbd_recv(mdev,
1496 kmap(bvec->bv_page)+bvec->bv_offset,
1498 kunmap(bvec->bv_page);
1500 dev_warn(DEV, "short read receiving data reply: "
1501 "read %d expected %d\n",
1509 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1510 if (memcmp(dig_in, dig_vv, dgs)) {
1511 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1516 D_ASSERT(data_size == 0);
1520 /* e_end_resync_block() is called via
1521 * drbd_process_done_ee() by asender only */
1522 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1524 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1525 sector_t sector = e->sector;
1528 D_ASSERT(hlist_unhashed(&e->colision));
1530 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1531 drbd_set_in_sync(mdev, sector, e->size);
1532 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1534 /* Record failure to sync */
1535 drbd_rs_failed_io(mdev, sector, e->size);
1537 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1544 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1546 struct drbd_epoch_entry *e;
1548 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1552 dec_rs_pending(mdev);
1555 /* corresponding dec_unacked() in e_end_resync_block()
1556 * respective _drbd_clear_done_ee */
1558 e->w.cb = e_end_resync_block;
1560 spin_lock_irq(&mdev->req_lock);
1561 list_add(&e->w.list, &mdev->sync_ee);
1562 spin_unlock_irq(&mdev->req_lock);
1564 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1567 drbd_free_ee(mdev, e);
1573 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1575 struct drbd_request *req;
1577 unsigned int header_size, data_size;
1579 struct p_data *p = (struct p_data *)h;
1581 header_size = sizeof(*p) - sizeof(*h);
1582 data_size = h->length - header_size;
1584 ERR_IF(data_size == 0) return FALSE;
1586 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1589 sector = be64_to_cpu(p->sector);
1591 spin_lock_irq(&mdev->req_lock);
1592 req = _ar_id_to_req(mdev, p->block_id, sector);
1593 spin_unlock_irq(&mdev->req_lock);
1594 if (unlikely(!req)) {
1595 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1599 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1600 * special casing it there for the various failure cases.
1601 * still no race with drbd_fail_pending_reads */
1602 ok = recv_dless_read(mdev, req, sector, data_size);
1605 req_mod(req, data_received);
1606 /* else: nothing. handled from drbd_disconnect...
1607 * I don't think we may complete this just yet
1608 * in case we are "on-disconnect: freeze" */
1613 static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1616 unsigned int header_size, data_size;
1618 struct p_data *p = (struct p_data *)h;
1620 header_size = sizeof(*p) - sizeof(*h);
1621 data_size = h->length - header_size;
1623 ERR_IF(data_size == 0) return FALSE;
1625 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1628 sector = be64_to_cpu(p->sector);
1629 D_ASSERT(p->block_id == ID_SYNCER);
1631 if (get_ldev(mdev)) {
1632 /* data is submitted to disk within recv_resync_read.
1633 * corresponding put_ldev done below on error,
1634 * or in drbd_endio_write_sec. */
1635 ok = recv_resync_read(mdev, sector, data_size);
1637 if (__ratelimit(&drbd_ratelimit_state))
1638 dev_err(DEV, "Can not write resync data to local disk.\n");
1640 ok = drbd_drain_block(mdev, data_size);
1642 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1648 /* e_end_block() is called via drbd_process_done_ee().
1649 * this means this function only runs in the asender thread
1651 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1653 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1654 sector_t sector = e->sector;
1655 struct drbd_epoch *epoch;
1658 if (e->flags & EE_IS_BARRIER) {
1659 epoch = previous_epoch(mdev, e->epoch);
1661 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1664 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1665 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1666 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1667 mdev->state.conn <= C_PAUSED_SYNC_T &&
1668 e->flags & EE_MAY_SET_IN_SYNC) ?
1669 P_RS_WRITE_ACK : P_WRITE_ACK;
1670 ok &= drbd_send_ack(mdev, pcmd, e);
1671 if (pcmd == P_RS_WRITE_ACK)
1672 drbd_set_in_sync(mdev, sector, e->size);
1674 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1675 /* we expect it to be marked out of sync anyways...
1676 * maybe assert this? */
1680 /* we delete from the conflict detection hash _after_ we sent out the
1681 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1682 if (mdev->net_conf->two_primaries) {
1683 spin_lock_irq(&mdev->req_lock);
1684 D_ASSERT(!hlist_unhashed(&e->colision));
1685 hlist_del_init(&e->colision);
1686 spin_unlock_irq(&mdev->req_lock);
1688 D_ASSERT(hlist_unhashed(&e->colision));
1691 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1696 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1698 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1701 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1702 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1704 spin_lock_irq(&mdev->req_lock);
1705 D_ASSERT(!hlist_unhashed(&e->colision));
1706 hlist_del_init(&e->colision);
1707 spin_unlock_irq(&mdev->req_lock);
1714 /* Called from receive_Data.
1715 * Synchronize packets on sock with packets on msock.
1717 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1718 * packet traveling on msock, they are still processed in the order they have
1721 * Note: we don't care for Ack packets overtaking P_DATA packets.
1723 * In case packet_seq is larger than mdev->peer_seq number, there are
1724 * outstanding packets on the msock. We wait for them to arrive.
1725 * In case we are the logically next packet, we update mdev->peer_seq
1726 * ourselves. Correctly handles 32bit wrap around.
1728 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1729 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1730 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1731 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1733 * returns 0 if we may process the packet,
1734 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1735 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1741 spin_lock(&mdev->peer_seq_lock);
1743 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1744 if (seq_le(packet_seq, mdev->peer_seq+1))
1746 if (signal_pending(current)) {
1750 p_seq = mdev->peer_seq;
1751 spin_unlock(&mdev->peer_seq_lock);
1752 timeout = schedule_timeout(30*HZ);
1753 spin_lock(&mdev->peer_seq_lock);
1754 if (timeout == 0 && p_seq == mdev->peer_seq) {
1756 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1760 finish_wait(&mdev->seq_wait, &wait);
1761 if (mdev->peer_seq+1 == packet_seq)
1763 spin_unlock(&mdev->peer_seq_lock);
1767 /* mirrored write */
1768 static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1771 struct drbd_epoch_entry *e;
1772 struct p_data *p = (struct p_data *)h;
1773 int header_size, data_size;
1777 header_size = sizeof(*p) - sizeof(*h);
1778 data_size = h->length - header_size;
1780 ERR_IF(data_size == 0) return FALSE;
1782 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1785 if (!get_ldev(mdev)) {
1786 if (__ratelimit(&drbd_ratelimit_state))
1787 dev_err(DEV, "Can not write mirrored data block "
1788 "to local disk.\n");
1789 spin_lock(&mdev->peer_seq_lock);
1790 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1792 spin_unlock(&mdev->peer_seq_lock);
1794 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1795 atomic_inc(&mdev->current_epoch->epoch_size);
1796 return drbd_drain_block(mdev, data_size);
1799 /* get_ldev(mdev) successful.
1800 * Corresponding put_ldev done either below (on various errors),
1801 * or in drbd_endio_write_sec, if we successfully submit the data at
1802 * the end of this function. */
1804 sector = be64_to_cpu(p->sector);
1805 e = read_in_block(mdev, p->block_id, sector, data_size);
1811 e->w.cb = e_end_block;
1813 spin_lock(&mdev->epoch_lock);
1814 e->epoch = mdev->current_epoch;
1815 atomic_inc(&e->epoch->epoch_size);
1816 atomic_inc(&e->epoch->active);
1818 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1819 struct drbd_epoch *epoch;
1820 /* Issue a barrier if we start a new epoch, and the previous epoch
1821 was not a epoch containing a single request which already was
1823 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1824 if (epoch == e->epoch) {
1825 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1826 rw |= (1<<BIO_RW_BARRIER);
1827 e->flags |= EE_IS_BARRIER;
1829 if (atomic_read(&epoch->epoch_size) > 1 ||
1830 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1831 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1832 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1833 rw |= (1<<BIO_RW_BARRIER);
1834 e->flags |= EE_IS_BARRIER;
1838 spin_unlock(&mdev->epoch_lock);
1840 dp_flags = be32_to_cpu(p->dp_flags);
1841 if (dp_flags & DP_HARDBARRIER) {
1842 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1843 /* rw |= (1<<BIO_RW_BARRIER); */
1845 if (dp_flags & DP_RW_SYNC)
1846 rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
1847 if (dp_flags & DP_MAY_SET_IN_SYNC)
1848 e->flags |= EE_MAY_SET_IN_SYNC;
1850 /* I'm the receiver, I do hold a net_cnt reference. */
1851 if (!mdev->net_conf->two_primaries) {
1852 spin_lock_irq(&mdev->req_lock);
1854 /* don't get the req_lock yet,
1855 * we may sleep in drbd_wait_peer_seq */
1856 const int size = e->size;
1857 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1859 struct drbd_request *i;
1860 struct hlist_node *n;
1861 struct hlist_head *slot;
1864 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1865 BUG_ON(mdev->ee_hash == NULL);
1866 BUG_ON(mdev->tl_hash == NULL);
1868 /* conflict detection and handling:
1869 * 1. wait on the sequence number,
1870 * in case this data packet overtook ACK packets.
1871 * 2. check our hash tables for conflicting requests.
1872 * we only need to walk the tl_hash, since an ee can not
1873 * have a conflict with an other ee: on the submitting
1874 * node, the corresponding req had already been conflicting,
1875 * and a conflicting req is never sent.
1877 * Note: for two_primaries, we are protocol C,
1878 * so there cannot be any request that is DONE
1879 * but still on the transfer log.
1881 * unconditionally add to the ee_hash.
1883 * if no conflicting request is found:
1886 * if any conflicting request is found
1887 * that has not yet been acked,
1888 * AND I have the "discard concurrent writes" flag:
1889 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1891 * if any conflicting request is found:
1892 * block the receiver, waiting on misc_wait
1893 * until no more conflicting requests are there,
1894 * or we get interrupted (disconnect).
1896 * we do not just write after local io completion of those
1897 * requests, but only after req is done completely, i.e.
1898 * we wait for the P_DISCARD_ACK to arrive!
1900 * then proceed normally, i.e. submit.
1902 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1903 goto out_interrupted;
1905 spin_lock_irq(&mdev->req_lock);
1907 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1909 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1910 slot = tl_hash_slot(mdev, sector);
1913 int have_unacked = 0;
1914 int have_conflict = 0;
1915 prepare_to_wait(&mdev->misc_wait, &wait,
1916 TASK_INTERRUPTIBLE);
1917 hlist_for_each_entry(i, n, slot, colision) {
1919 /* only ALERT on first iteration,
1920 * we may be woken up early... */
1922 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1923 " new: %llus +%u; pending: %llus +%u\n",
1924 current->comm, current->pid,
1925 (unsigned long long)sector, size,
1926 (unsigned long long)i->sector, i->size);
1927 if (i->rq_state & RQ_NET_PENDING)
1936 /* Discard Ack only for the _first_ iteration */
1937 if (first && discard && have_unacked) {
1938 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1939 (unsigned long long)sector);
1941 e->w.cb = e_send_discard_ack;
1942 list_add_tail(&e->w.list, &mdev->done_ee);
1944 spin_unlock_irq(&mdev->req_lock);
1946 /* we could probably send that P_DISCARD_ACK ourselves,
1947 * but I don't like the receiver using the msock */
1951 finish_wait(&mdev->misc_wait, &wait);
1955 if (signal_pending(current)) {
1956 hlist_del_init(&e->colision);
1958 spin_unlock_irq(&mdev->req_lock);
1960 finish_wait(&mdev->misc_wait, &wait);
1961 goto out_interrupted;
1964 spin_unlock_irq(&mdev->req_lock);
1967 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1968 "sec=%llus\n", (unsigned long long)sector);
1969 } else if (discard) {
1970 /* we had none on the first iteration.
1971 * there must be none now. */
1972 D_ASSERT(have_unacked == 0);
1975 spin_lock_irq(&mdev->req_lock);
1977 finish_wait(&mdev->misc_wait, &wait);
1980 list_add(&e->w.list, &mdev->active_ee);
1981 spin_unlock_irq(&mdev->req_lock);
1983 switch (mdev->net_conf->wire_protocol) {
1986 /* corresponding dec_unacked() in e_end_block()
1987 * respective _drbd_clear_done_ee */
1990 /* I really don't like it that the receiver thread
1991 * sends on the msock, but anyways */
1992 drbd_send_ack(mdev, P_RECV_ACK, e);
1999 if (mdev->state.pdsk == D_DISKLESS) {
2000 /* In case we have the only disk of the cluster, */
2001 drbd_set_out_of_sync(mdev, e->sector, e->size);
2002 e->flags |= EE_CALL_AL_COMPLETE_IO;
2003 drbd_al_begin_io(mdev, e->sector);
2006 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2010 /* yes, the epoch_size now is imbalanced.
2011 * but we drop the connection anyways, so we don't have a chance to
2012 * receive a barrier... atomic_inc(&mdev->epoch_size); */
2014 drbd_free_ee(mdev, e);
2018 static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2021 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2022 struct drbd_epoch_entry *e;
2023 struct digest_info *di = NULL;
2024 int size, digest_size;
2025 unsigned int fault_type;
2026 struct p_block_req *p =
2027 (struct p_block_req *)h;
2028 const int brps = sizeof(*p)-sizeof(*h);
2030 if (drbd_recv(mdev, h->payload, brps) != brps)
2033 sector = be64_to_cpu(p->sector);
2034 size = be32_to_cpu(p->blksize);
2036 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2037 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2038 (unsigned long long)sector, size);
2041 if (sector + (size>>9) > capacity) {
2042 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2043 (unsigned long long)sector, size);
2047 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2048 if (__ratelimit(&drbd_ratelimit_state))
2049 dev_err(DEV, "Can not satisfy peer's read request, "
2050 "no local data.\n");
2051 drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
2052 P_NEG_RS_DREPLY , p);
2053 return drbd_drain_block(mdev, h->length - brps);
2056 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2057 * "criss-cross" setup, that might cause write-out on some other DRBD,
2058 * which in turn might block on the other node at this very place. */
2059 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2065 switch (h->command) {
2066 case P_DATA_REQUEST:
2067 e->w.cb = w_e_end_data_req;
2068 fault_type = DRBD_FAULT_DT_RD;
2070 case P_RS_DATA_REQUEST:
2071 e->w.cb = w_e_end_rsdata_req;
2072 fault_type = DRBD_FAULT_RS_RD;
2073 /* Eventually this should become asynchronously. Currently it
2074 * blocks the whole receiver just to delay the reading of a
2075 * resync data block.
2076 * the drbd_work_queue mechanism is made for this...
2078 if (!drbd_rs_begin_io(mdev, sector)) {
2079 /* we have been interrupted,
2080 * probably connection lost! */
2081 D_ASSERT(signal_pending(current));
2087 case P_CSUM_RS_REQUEST:
2088 fault_type = DRBD_FAULT_RS_RD;
2089 digest_size = h->length - brps ;
2090 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2094 di->digest_size = digest_size;
2095 di->digest = (((char *)di)+sizeof(struct digest_info));
2097 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2100 e->block_id = (u64)(unsigned long)di;
2101 if (h->command == P_CSUM_RS_REQUEST) {
2102 D_ASSERT(mdev->agreed_pro_version >= 89);
2103 e->w.cb = w_e_end_csum_rs_req;
2104 } else if (h->command == P_OV_REPLY) {
2105 e->w.cb = w_e_end_ov_reply;
2106 dec_rs_pending(mdev);
2110 if (!drbd_rs_begin_io(mdev, sector)) {
2111 /* we have been interrupted, probably connection lost! */
2112 D_ASSERT(signal_pending(current));
2118 if (mdev->state.conn >= C_CONNECTED &&
2119 mdev->state.conn != C_VERIFY_T)
2120 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2121 drbd_conn_str(mdev->state.conn));
2122 if (mdev->ov_start_sector == ~(sector_t)0 &&
2123 mdev->agreed_pro_version >= 90) {
2124 mdev->ov_start_sector = sector;
2125 mdev->ov_position = sector;
2126 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2127 dev_info(DEV, "Online Verify start sector: %llu\n",
2128 (unsigned long long)sector);
2130 e->w.cb = w_e_end_ov_req;
2131 fault_type = DRBD_FAULT_RS_RD;
2132 /* Eventually this should become asynchronous. Currently it
2133 * blocks the whole receiver just to delay the reading of a
2134 * resync data block.
2135 * the drbd_work_queue mechanism is made for this...
2137 if (!drbd_rs_begin_io(mdev, sector)) {
2138 /* we have been interrupted,
2139 * probably connection lost! */
2140 D_ASSERT(signal_pending(current));
2147 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2148 cmdname(h->command));
2149 fault_type = DRBD_FAULT_MAX;
2152 spin_lock_irq(&mdev->req_lock);
2153 list_add(&e->w.list, &mdev->read_ee);
2154 spin_unlock_irq(&mdev->req_lock);
2158 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2164 drbd_free_ee(mdev, e);
2168 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2170 int self, peer, rv = -100;
2171 unsigned long ch_self, ch_peer;
2173 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2174 peer = mdev->p_uuid[UI_BITMAP] & 1;
2176 ch_peer = mdev->p_uuid[UI_SIZE];
2177 ch_self = mdev->comm_bm_set;
2179 switch (mdev->net_conf->after_sb_0p) {
2181 case ASB_DISCARD_SECONDARY:
2182 case ASB_CALL_HELPER:
2183 dev_err(DEV, "Configuration error.\n");
2185 case ASB_DISCONNECT:
2187 case ASB_DISCARD_YOUNGER_PRI:
2188 if (self == 0 && peer == 1) {
2192 if (self == 1 && peer == 0) {
2196 /* Else fall through to one of the other strategies... */
2197 case ASB_DISCARD_OLDER_PRI:
2198 if (self == 0 && peer == 1) {
2202 if (self == 1 && peer == 0) {
2206 /* Else fall through to one of the other strategies... */
2207 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2208 "Using discard-least-changes instead\n");
2209 case ASB_DISCARD_ZERO_CHG:
2210 if (ch_peer == 0 && ch_self == 0) {
2211 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2215 if (ch_peer == 0) { rv = 1; break; }
2216 if (ch_self == 0) { rv = -1; break; }
2218 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2220 case ASB_DISCARD_LEAST_CHG:
2221 if (ch_self < ch_peer)
2223 else if (ch_self > ch_peer)
2225 else /* ( ch_self == ch_peer ) */
2226 /* Well, then use something else. */
2227 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2230 case ASB_DISCARD_LOCAL:
2233 case ASB_DISCARD_REMOTE:
2240 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2242 int self, peer, hg, rv = -100;
2244 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2245 peer = mdev->p_uuid[UI_BITMAP] & 1;
2247 switch (mdev->net_conf->after_sb_1p) {
2248 case ASB_DISCARD_YOUNGER_PRI:
2249 case ASB_DISCARD_OLDER_PRI:
2250 case ASB_DISCARD_LEAST_CHG:
2251 case ASB_DISCARD_LOCAL:
2252 case ASB_DISCARD_REMOTE:
2253 dev_err(DEV, "Configuration error.\n");
2255 case ASB_DISCONNECT:
2258 hg = drbd_asb_recover_0p(mdev);
2259 if (hg == -1 && mdev->state.role == R_SECONDARY)
2261 if (hg == 1 && mdev->state.role == R_PRIMARY)
2265 rv = drbd_asb_recover_0p(mdev);
2267 case ASB_DISCARD_SECONDARY:
2268 return mdev->state.role == R_PRIMARY ? 1 : -1;
2269 case ASB_CALL_HELPER:
2270 hg = drbd_asb_recover_0p(mdev);
2271 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2272 self = drbd_set_role(mdev, R_SECONDARY, 0);
2273 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2274 * we might be here in C_WF_REPORT_PARAMS which is transient.
2275 * we do not need to wait for the after state change work either. */
2276 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2277 if (self != SS_SUCCESS) {
2278 drbd_khelper(mdev, "pri-lost-after-sb");
2280 dev_warn(DEV, "Successfully gave up primary role.\n");
2290 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2292 int self, peer, hg, rv = -100;
2294 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2295 peer = mdev->p_uuid[UI_BITMAP] & 1;
2297 switch (mdev->net_conf->after_sb_2p) {
2298 case ASB_DISCARD_YOUNGER_PRI:
2299 case ASB_DISCARD_OLDER_PRI:
2300 case ASB_DISCARD_LEAST_CHG:
2301 case ASB_DISCARD_LOCAL:
2302 case ASB_DISCARD_REMOTE:
2304 case ASB_DISCARD_SECONDARY:
2305 dev_err(DEV, "Configuration error.\n");
2308 rv = drbd_asb_recover_0p(mdev);
2310 case ASB_DISCONNECT:
2312 case ASB_CALL_HELPER:
2313 hg = drbd_asb_recover_0p(mdev);
2315 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2316 * we might be here in C_WF_REPORT_PARAMS which is transient.
2317 * we do not need to wait for the after state change work either. */
2318 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2319 if (self != SS_SUCCESS) {
2320 drbd_khelper(mdev, "pri-lost-after-sb");
2322 dev_warn(DEV, "Successfully gave up primary role.\n");
2332 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2333 u64 bits, u64 flags)
2336 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2339 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2341 (unsigned long long)uuid[UI_CURRENT],
2342 (unsigned long long)uuid[UI_BITMAP],
2343 (unsigned long long)uuid[UI_HISTORY_START],
2344 (unsigned long long)uuid[UI_HISTORY_END],
2345 (unsigned long long)bits,
2346 (unsigned long long)flags);
2350 100 after split brain try auto recover
2351 2 C_SYNC_SOURCE set BitMap
2352 1 C_SYNC_SOURCE use BitMap
2354 -1 C_SYNC_TARGET use BitMap
2355 -2 C_SYNC_TARGET set BitMap
2356 -100 after split brain, disconnect
2357 -1000 unrelated data
2359 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2364 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2365 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2368 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2372 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2373 peer != UUID_JUST_CREATED)
2377 if (self != UUID_JUST_CREATED &&
2378 (peer == UUID_JUST_CREATED || peer == (u64)0))
2382 int rct, dc; /* roles at crash time */
2384 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2386 if (mdev->agreed_pro_version < 91)
2389 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2390 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2391 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2392 drbd_uuid_set_bm(mdev, 0UL);
2394 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2395 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2398 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2405 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2407 if (mdev->agreed_pro_version < 91)
2410 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2411 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2412 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2414 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2415 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2416 mdev->p_uuid[UI_BITMAP] = 0UL;
2418 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2421 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2428 /* Common power [off|failure] */
2429 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2430 (mdev->p_uuid[UI_FLAGS] & 2);
2431 /* lowest bit is set when we were primary,
2432 * next bit (weight 2) is set when peer was primary */
2436 case 0: /* !self_pri && !peer_pri */ return 0;
2437 case 1: /* self_pri && !peer_pri */ return 1;
2438 case 2: /* !self_pri && peer_pri */ return -1;
2439 case 3: /* self_pri && peer_pri */
2440 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2446 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2451 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2453 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2454 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2456 /* The last P_SYNC_UUID did not get though. Undo the last start of
2457 resync as sync source modifications of the peer's UUIDs. */
2459 if (mdev->agreed_pro_version < 91)
2462 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2463 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2469 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2470 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2471 peer = mdev->p_uuid[i] & ~((u64)1);
2477 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2478 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2483 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2485 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2486 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2488 /* The last P_SYNC_UUID did not get though. Undo the last start of
2489 resync as sync source modifications of our UUIDs. */
2491 if (mdev->agreed_pro_version < 91)
2494 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2495 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2497 dev_info(DEV, "Undid last start of resync:\n");
2499 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2500 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2508 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2509 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2510 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2516 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2517 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2518 if (self == peer && self != ((u64)0))
2522 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2523 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2524 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2525 peer = mdev->p_uuid[j] & ~((u64)1);
2534 /* drbd_sync_handshake() returns the new conn state on success, or
2535 CONN_MASK (-1) on failure.
2537 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2538 enum drbd_disk_state peer_disk) __must_hold(local)
2541 enum drbd_conns rv = C_MASK;
2542 enum drbd_disk_state mydisk;
2544 mydisk = mdev->state.disk;
2545 if (mydisk == D_NEGOTIATING)
2546 mydisk = mdev->new_state_tmp.disk;
2548 dev_info(DEV, "drbd_sync_handshake:\n");
2549 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2550 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2551 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2553 hg = drbd_uuid_compare(mdev, &rule_nr);
2555 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2558 dev_alert(DEV, "Unrelated data, aborting!\n");
2562 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2566 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2567 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2568 int f = (hg == -100) || abs(hg) == 2;
2569 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2572 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2573 hg > 0 ? "source" : "target");
2577 drbd_khelper(mdev, "initial-split-brain");
2579 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2580 int pcount = (mdev->state.role == R_PRIMARY)
2581 + (peer_role == R_PRIMARY);
2582 int forced = (hg == -100);
2586 hg = drbd_asb_recover_0p(mdev);
2589 hg = drbd_asb_recover_1p(mdev);
2592 hg = drbd_asb_recover_2p(mdev);
2595 if (abs(hg) < 100) {
2596 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2597 "automatically solved. Sync from %s node\n",
2598 pcount, (hg < 0) ? "peer" : "this");
2600 dev_warn(DEV, "Doing a full sync, since"
2601 " UUIDs where ambiguous.\n");
2608 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2610 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2614 dev_warn(DEV, "Split-Brain detected, manually solved. "
2615 "Sync from %s node\n",
2616 (hg < 0) ? "peer" : "this");
2620 /* FIXME this log message is not correct if we end up here
2621 * after an attempted attach on a diskless node.
2622 * We just refuse to attach -- well, we drop the "connection"
2623 * to that disk, in a way... */
2624 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2625 drbd_khelper(mdev, "split-brain");
2629 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2630 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2634 if (hg < 0 && /* by intention we do not use mydisk here. */
2635 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2636 switch (mdev->net_conf->rr_conflict) {
2637 case ASB_CALL_HELPER:
2638 drbd_khelper(mdev, "pri-lost");
2640 case ASB_DISCONNECT:
2641 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2644 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2649 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2651 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2653 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2654 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2655 abs(hg) >= 2 ? "full" : "bit-map based");
2660 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2661 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2665 if (hg > 0) { /* become sync source. */
2667 } else if (hg < 0) { /* become sync target */
2671 if (drbd_bm_total_weight(mdev)) {
2672 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2673 drbd_bm_total_weight(mdev));
2680 /* returns 1 if invalid */
2681 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2683 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2684 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2685 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2688 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2689 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2690 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2693 /* everything else is valid if they are equal on both sides. */
2697 /* everything es is invalid. */
2701 static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2703 struct p_protocol *p = (struct p_protocol *)h;
2704 int header_size, data_size;
2705 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2706 int p_want_lose, p_two_primaries, cf;
2707 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2709 header_size = sizeof(*p) - sizeof(*h);
2710 data_size = h->length - header_size;
2712 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2715 p_proto = be32_to_cpu(p->protocol);
2716 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2717 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2718 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2719 p_two_primaries = be32_to_cpu(p->two_primaries);
2720 cf = be32_to_cpu(p->conn_flags);
2721 p_want_lose = cf & CF_WANT_LOSE;
2723 clear_bit(CONN_DRY_RUN, &mdev->flags);
2725 if (cf & CF_DRY_RUN)
2726 set_bit(CONN_DRY_RUN, &mdev->flags);
2728 if (p_proto != mdev->net_conf->wire_protocol) {
2729 dev_err(DEV, "incompatible communication protocols\n");
2733 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2734 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2738 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2739 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2743 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2744 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2748 if (p_want_lose && mdev->net_conf->want_lose) {
2749 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2753 if (p_two_primaries != mdev->net_conf->two_primaries) {
2754 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2758 if (mdev->agreed_pro_version >= 87) {
2759 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2761 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2764 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2765 if (strcmp(p_integrity_alg, my_alg)) {
2766 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2769 dev_info(DEV, "data-integrity-alg: %s\n",
2770 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2776 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2781 * input: alg name, feature name
2782 * return: NULL (alg name was "")
2783 * ERR_PTR(error) if something goes wrong
2784 * or the crypto hash ptr, if it worked out ok. */
2785 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2786 const char *alg, const char *name)
2788 struct crypto_hash *tfm;
2793 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2795 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2796 alg, name, PTR_ERR(tfm));
2799 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2800 crypto_free_hash(tfm);
2801 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2802 return ERR_PTR(-EINVAL);
2807 static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2810 struct p_rs_param_89 *p = (struct p_rs_param_89 *)h;
2811 unsigned int header_size, data_size, exp_max_sz;
2812 struct crypto_hash *verify_tfm = NULL;
2813 struct crypto_hash *csums_tfm = NULL;
2814 const int apv = mdev->agreed_pro_version;
2816 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2817 : apv == 88 ? sizeof(struct p_rs_param)
2819 : /* 89 */ sizeof(struct p_rs_param_89);
2821 if (h->length > exp_max_sz) {
2822 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2823 h->length, exp_max_sz);
2828 header_size = sizeof(struct p_rs_param) - sizeof(*h);
2829 data_size = h->length - header_size;
2830 } else /* apv >= 89 */ {
2831 header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
2832 data_size = h->length - header_size;
2833 D_ASSERT(data_size == 0);
2836 /* initialize verify_alg and csums_alg */
2837 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2839 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2842 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2846 if (data_size > SHARED_SECRET_MAX) {
2847 dev_err(DEV, "verify-alg too long, "
2848 "peer wants %u, accepting only %u byte\n",
2849 data_size, SHARED_SECRET_MAX);
2853 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2856 /* we expect NUL terminated string */
2857 /* but just in case someone tries to be evil */
2858 D_ASSERT(p->verify_alg[data_size-1] == 0);
2859 p->verify_alg[data_size-1] = 0;
2861 } else /* apv >= 89 */ {
2862 /* we still expect NUL terminated strings */
2863 /* but just in case someone tries to be evil */
2864 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2865 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2866 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2867 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2870 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2871 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2872 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2873 mdev->sync_conf.verify_alg, p->verify_alg);
2876 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2877 p->verify_alg, "verify-alg");
2878 if (IS_ERR(verify_tfm)) {
2884 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2885 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2886 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2887 mdev->sync_conf.csums_alg, p->csums_alg);
2890 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2891 p->csums_alg, "csums-alg");
2892 if (IS_ERR(csums_tfm)) {
2899 spin_lock(&mdev->peer_seq_lock);
2900 /* lock against drbd_nl_syncer_conf() */
2902 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2903 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2904 crypto_free_hash(mdev->verify_tfm);
2905 mdev->verify_tfm = verify_tfm;
2906 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2909 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2910 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2911 crypto_free_hash(mdev->csums_tfm);
2912 mdev->csums_tfm = csums_tfm;
2913 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2915 spin_unlock(&mdev->peer_seq_lock);
2920 /* just for completeness: actually not needed,
2921 * as this is not reached if csums_tfm was ok. */
2922 crypto_free_hash(csums_tfm);
2923 /* but free the verify_tfm again, if csums_tfm did not work out */
2924 crypto_free_hash(verify_tfm);
2925 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2929 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2931 /* sorry, we currently have no working implementation
2932 * of distributed TCQ */
2935 /* warn if the arguments differ by more than 12.5% */
2936 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2937 const char *s, sector_t a, sector_t b)
2940 if (a == 0 || b == 0)
2942 d = (a > b) ? (a - b) : (b - a);
2943 if (d > (a>>3) || d > (b>>3))
2944 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2945 (unsigned long long)a, (unsigned long long)b);
2948 static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2950 struct p_sizes *p = (struct p_sizes *)h;
2951 enum determine_dev_size dd = unchanged;
2952 unsigned int max_seg_s;
2953 sector_t p_size, p_usize, my_usize;
2954 int ldsc = 0; /* local disk size changed */
2955 enum dds_flags ddsf;
2957 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2958 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2961 p_size = be64_to_cpu(p->d_size);
2962 p_usize = be64_to_cpu(p->u_size);
2964 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2965 dev_err(DEV, "some backing storage is needed\n");
2966 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2970 /* just store the peer's disk size for now.
2971 * we still need to figure out whether we accept that. */
2972 mdev->p_size = p_size;
2974 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2975 if (get_ldev(mdev)) {
2976 warn_if_differ_considerably(mdev, "lower level device sizes",
2977 p_size, drbd_get_max_capacity(mdev->ldev));
2978 warn_if_differ_considerably(mdev, "user requested size",
2979 p_usize, mdev->ldev->dc.disk_size);
2981 /* if this is the first connect, or an otherwise expected
2982 * param exchange, choose the minimum */
2983 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2984 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2987 my_usize = mdev->ldev->dc.disk_size;
2989 if (mdev->ldev->dc.disk_size != p_usize) {
2990 mdev->ldev->dc.disk_size = p_usize;
2991 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2992 (unsigned long)mdev->ldev->dc.disk_size);
2995 /* Never shrink a device with usable data during connect.
2996 But allow online shrinking if we are connected. */
2997 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2998 drbd_get_capacity(mdev->this_bdev) &&
2999 mdev->state.disk >= D_OUTDATED &&
3000 mdev->state.conn < C_CONNECTED) {
3001 dev_err(DEV, "The peer's disk size is too small!\n");
3002 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3003 mdev->ldev->dc.disk_size = my_usize;
3011 ddsf = be16_to_cpu(p->dds_flags);
3012 if (get_ldev(mdev)) {
3013 dd = drbd_determin_dev_size(mdev, ddsf);
3015 if (dd == dev_size_error)
3019 /* I am diskless, need to accept the peer's size. */
3020 drbd_set_my_capacity(mdev, p_size);
3023 if (get_ldev(mdev)) {
3024 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3025 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3029 if (mdev->agreed_pro_version < 94)
3030 max_seg_s = be32_to_cpu(p->max_segment_size);
3031 else /* drbd 8.3.8 onwards */
3032 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3034 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
3035 drbd_setup_queue_param(mdev, max_seg_s);
3037 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3041 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3042 if (be64_to_cpu(p->c_size) !=
3043 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3044 /* we have different sizes, probably peer
3045 * needs to know my new size... */
3046 drbd_send_sizes(mdev, 0, ddsf);
3048 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3049 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3050 if (mdev->state.pdsk >= D_INCONSISTENT &&
3051 mdev->state.disk >= D_INCONSISTENT) {
3052 if (ddsf & DDSF_NO_RESYNC)
3053 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3055 resync_after_online_grow(mdev);
3057 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3064 static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
3066 struct p_uuids *p = (struct p_uuids *)h;
3070 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3071 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3074 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3076 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3077 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3079 kfree(mdev->p_uuid);
3080 mdev->p_uuid = p_uuid;
3082 if (mdev->state.conn < C_CONNECTED &&
3083 mdev->state.disk < D_INCONSISTENT &&
3084 mdev->state.role == R_PRIMARY &&
3085 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3086 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3087 (unsigned long long)mdev->ed_uuid);
3088 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3092 if (get_ldev(mdev)) {
3093 int skip_initial_sync =
3094 mdev->state.conn == C_CONNECTED &&
3095 mdev->agreed_pro_version >= 90 &&
3096 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3097 (p_uuid[UI_FLAGS] & 8);
3098 if (skip_initial_sync) {
3099 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3100 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3101 "clear_n_write from receive_uuids");
3102 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3103 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3104 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3111 /* Before we test for the disk state, we should wait until an eventually
3112 ongoing cluster wide state change is finished. That is important if
3113 we are primary and are detaching from our disk. We need to see the
3114 new disk state... */
3115 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3116 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3117 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3123 * convert_state() - Converts the peer's view of the cluster state to our point of view
3124 * @ps: The state as seen by the peer.
3126 static union drbd_state convert_state(union drbd_state ps)
3128 union drbd_state ms;
3130 static enum drbd_conns c_tab[] = {
3131 [C_CONNECTED] = C_CONNECTED,
3133 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3134 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3135 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3136 [C_VERIFY_S] = C_VERIFY_T,
3142 ms.conn = c_tab[ps.conn];
3147 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3152 static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3154 struct p_req_state *p = (struct p_req_state *)h;
3155 union drbd_state mask, val;
3158 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3159 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3162 mask.i = be32_to_cpu(p->mask);
3163 val.i = be32_to_cpu(p->val);
3165 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3166 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3167 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3171 mask = convert_state(mask);
3172 val = convert_state(val);
3174 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3176 drbd_send_sr_reply(mdev, rv);
3182 static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3184 struct p_state *p = (struct p_state *)h;
3185 enum drbd_conns nconn, oconn;
3186 union drbd_state ns, peer_state;
3187 enum drbd_disk_state real_peer_disk;
3190 ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3193 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3196 peer_state.i = be32_to_cpu(p->state);
3198 real_peer_disk = peer_state.disk;
3199 if (peer_state.disk == D_NEGOTIATING) {
3200 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3201 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3204 spin_lock_irq(&mdev->req_lock);
3206 oconn = nconn = mdev->state.conn;
3207 spin_unlock_irq(&mdev->req_lock);
3209 if (nconn == C_WF_REPORT_PARAMS)
3210 nconn = C_CONNECTED;
3212 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3213 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3214 int cr; /* consider resync */
3216 /* if we established a new connection */
3217 cr = (oconn < C_CONNECTED);
3218 /* if we had an established connection
3219 * and one of the nodes newly attaches a disk */
3220 cr |= (oconn == C_CONNECTED &&
3221 (peer_state.disk == D_NEGOTIATING ||
3222 mdev->state.disk == D_NEGOTIATING));
3223 /* if we have both been inconsistent, and the peer has been
3224 * forced to be UpToDate with --overwrite-data */
3225 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3226 /* if we had been plain connected, and the admin requested to
3227 * start a sync by "invalidate" or "invalidate-remote" */
3228 cr |= (oconn == C_CONNECTED &&
3229 (peer_state.conn >= C_STARTING_SYNC_S &&
3230 peer_state.conn <= C_WF_BITMAP_T));
3233 nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3236 if (nconn == C_MASK) {
3237 nconn = C_CONNECTED;
3238 if (mdev->state.disk == D_NEGOTIATING) {
3239 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3240 } else if (peer_state.disk == D_NEGOTIATING) {
3241 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3242 peer_state.disk = D_DISKLESS;
3243 real_peer_disk = D_DISKLESS;
3245 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3247 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3248 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3254 spin_lock_irq(&mdev->req_lock);
3255 if (mdev->state.conn != oconn)
3257 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3258 ns.i = mdev->state.i;
3260 ns.peer = peer_state.role;
3261 ns.pdsk = real_peer_disk;
3262 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3263 if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3264 ns.disk = mdev->new_state_tmp.disk;
3266 rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL);
3268 spin_unlock_irq(&mdev->req_lock);
3270 if (rv < SS_SUCCESS) {
3271 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3275 if (oconn > C_WF_REPORT_PARAMS) {
3276 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3277 peer_state.disk != D_NEGOTIATING ) {
3278 /* we want resync, peer has not yet decided to sync... */
3279 /* Nowadays only used when forcing a node into primary role and
3280 setting its disk to UpToDate with that */
3281 drbd_send_uuids(mdev);
3282 drbd_send_state(mdev);
3286 mdev->net_conf->want_lose = 0;
3288 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3293 static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3295 struct p_rs_uuid *p = (struct p_rs_uuid *)h;
3297 wait_event(mdev->misc_wait,
3298 mdev->state.conn == C_WF_SYNC_UUID ||
3299 mdev->state.conn < C_CONNECTED ||
3300 mdev->state.disk < D_NEGOTIATING);
3302 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3304 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3305 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3308 /* Here the _drbd_uuid_ functions are right, current should
3309 _not_ be rotated into the history */
3310 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3311 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3312 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3314 drbd_start_resync(mdev, C_SYNC_TARGET);
3318 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3323 enum receive_bitmap_ret { OK, DONE, FAILED };
3325 static enum receive_bitmap_ret
3326 receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
3327 unsigned long *buffer, struct bm_xfer_ctx *c)
3329 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3330 unsigned want = num_words * sizeof(long);
3332 if (want != h->length) {
3333 dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length);
3338 if (drbd_recv(mdev, buffer, want) != want)
3341 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3343 c->word_offset += num_words;
3344 c->bit_offset = c->word_offset * BITS_PER_LONG;
3345 if (c->bit_offset > c->bm_bits)
3346 c->bit_offset = c->bm_bits;
3351 static enum receive_bitmap_ret
3352 recv_bm_rle_bits(struct drbd_conf *mdev,
3353 struct p_compressed_bm *p,
3354 struct bm_xfer_ctx *c)
3356 struct bitstream bs;
3360 unsigned long s = c->bit_offset;
3362 int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3363 int toggle = DCBP_get_start(p);
3367 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3369 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3373 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3374 bits = vli_decode_bits(&rl, look_ahead);
3380 if (e >= c->bm_bits) {
3381 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3384 _drbd_bm_set_bits(mdev, s, e);
3388 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3389 have, bits, look_ahead,
3390 (unsigned int)(bs.cur.b - p->code),
3391 (unsigned int)bs.buf_len);
3394 look_ahead >>= bits;
3397 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3400 look_ahead |= tmp << have;
3405 bm_xfer_ctx_bit_to_word_offset(c);
3407 return (s == c->bm_bits) ? DONE : OK;
3410 static enum receive_bitmap_ret
3411 decode_bitmap_c(struct drbd_conf *mdev,
3412 struct p_compressed_bm *p,
3413 struct bm_xfer_ctx *c)
3415 if (DCBP_get_code(p) == RLE_VLI_Bits)
3416 return recv_bm_rle_bits(mdev, p, c);
3418 /* other variants had been implemented for evaluation,
3419 * but have been dropped as this one turned out to be "best"
3420 * during all our tests. */
3422 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3423 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3427 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3428 const char *direction, struct bm_xfer_ctx *c)
3430 /* what would it take to transfer it "plaintext" */
3431 unsigned plain = sizeof(struct p_header) *
3432 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3433 + c->bm_words * sizeof(long);
3434 unsigned total = c->bytes[0] + c->bytes[1];
3437 /* total can not be zero. but just in case: */
3441 /* don't report if not compressed */
3445 /* total < plain. check for overflow, still */
3446 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3447 : (1000 * total / plain);
3453 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3454 "total %u; compression: %u.%u%%\n",
3456 c->bytes[1], c->packets[1],
3457 c->bytes[0], c->packets[0],
3458 total, r/10, r % 10);
3461 /* Since we are processing the bitfield from lower addresses to higher,
3462 it does not matter if the process it in 32 bit chunks or 64 bit
3463 chunks as long as it is little endian. (Understand it as byte stream,
3464 beginning with the lowest byte...) If we would use big endian
3465 we would need to process it from the highest address to the lowest,
3466 in order to be agnostic to the 32 vs 64 bits issue.
3468 returns 0 on failure, 1 if we successfully received it. */
3469 static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3471 struct bm_xfer_ctx c;
3473 enum receive_bitmap_ret ret;
3476 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3478 drbd_bm_lock(mdev, "receive bitmap");
3480 /* maybe we should use some per thread scratch page,
3481 * and allocate that during initial device creation? */
3482 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3484 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3488 c = (struct bm_xfer_ctx) {
3489 .bm_bits = drbd_bm_bits(mdev),
3490 .bm_words = drbd_bm_words(mdev),
3494 if (h->command == P_BITMAP) {
3495 ret = receive_bitmap_plain(mdev, h, buffer, &c);
3496 } else if (h->command == P_COMPRESSED_BITMAP) {
3497 /* MAYBE: sanity check that we speak proto >= 90,
3498 * and the feature is enabled! */
3499 struct p_compressed_bm *p;
3501 if (h->length > BM_PACKET_PAYLOAD_BYTES) {
3502 dev_err(DEV, "ReportCBitmap packet too large\n");
3505 /* use the page buff */
3507 memcpy(p, h, sizeof(*h));
3508 if (drbd_recv(mdev, p->head.payload, h->length) != h->length)
3510 if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3511 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3514 ret = decode_bitmap_c(mdev, p, &c);
3516 dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command);
3520 c.packets[h->command == P_BITMAP]++;
3521 c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
3526 if (!drbd_recv_header(mdev, h))
3528 } while (ret == OK);
3532 INFO_bm_xfer_stats(mdev, "receive", &c);
3534 if (mdev->state.conn == C_WF_BITMAP_T) {
3535 ok = !drbd_send_bitmap(mdev);
3538 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3539 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3540 D_ASSERT(ok == SS_SUCCESS);
3541 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3542 /* admin may have requested C_DISCONNECTING,
3543 * other threads may have noticed network errors */
3544 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3545 drbd_conn_str(mdev->state.conn));
3550 drbd_bm_unlock(mdev);
3551 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3552 drbd_start_resync(mdev, C_SYNC_SOURCE);
3553 free_page((unsigned long) buffer);
3557 static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
3559 /* TODO zero copy sink :) */
3560 static char sink[128];
3563 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3564 h->command, h->length);
3568 want = min_t(int, size, sizeof(sink));
3569 r = drbd_recv(mdev, sink, want);
3570 ERR_IF(r <= 0) break;
3576 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3578 if (mdev->state.disk >= D_INCONSISTENT)
3581 /* Make sure we've acked all the TCP data associated
3582 * with the data requests being unplugged */
3583 drbd_tcp_quickack(mdev->data.socket);
3588 static void timeval_sub_us(struct timeval* tv, unsigned int us)
3590 tv->tv_sec -= us / 1000000;
3592 if (tv->tv_usec > us) {
3593 tv->tv_usec += 1000000;
3599 static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
3601 struct delay_probe *dp;
3602 struct list_head *le;
3608 seq_num = be32_to_cpu(p->seq_num);
3609 offset = be32_to_cpu(p->offset);
3611 spin_lock(&mdev->peer_seq_lock);
3612 if (!list_empty(&mdev->delay_probes)) {
3613 if (from == USE_DATA_SOCKET)
3614 le = mdev->delay_probes.next;
3616 le = mdev->delay_probes.prev;
3618 dp = list_entry(le, struct delay_probe, list);
3620 if (dp->seq_num == seq_num) {
3622 spin_unlock(&mdev->peer_seq_lock);
3623 do_gettimeofday(&now);
3624 timeval_sub_us(&now, offset);
3626 now.tv_usec - dp->time.tv_usec +
3627 (now.tv_sec - dp->time.tv_sec) * 1000000;
3630 mdev->data_delay = data_delay;
3636 if (dp->seq_num > seq_num) {
3637 spin_unlock(&mdev->peer_seq_lock);
3638 dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
3639 return; /* Do not alloca a struct delay_probe.... */
3642 spin_unlock(&mdev->peer_seq_lock);
3644 dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
3646 dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
3650 dp->seq_num = seq_num;
3651 do_gettimeofday(&dp->time);
3652 timeval_sub_us(&dp->time, offset);
3654 spin_lock(&mdev->peer_seq_lock);
3655 if (from == USE_DATA_SOCKET)
3656 list_add(&dp->list, &mdev->delay_probes);
3658 list_add_tail(&dp->list, &mdev->delay_probes);
3659 spin_unlock(&mdev->peer_seq_lock);
3662 static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
3664 struct p_delay_probe *p = (struct p_delay_probe *)h;
3666 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3667 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3670 got_delay_probe(mdev, USE_DATA_SOCKET, p);
3674 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
3676 static drbd_cmd_handler_f drbd_default_handler[] = {
3677 [P_DATA] = receive_Data,
3678 [P_DATA_REPLY] = receive_DataReply,
3679 [P_RS_DATA_REPLY] = receive_RSDataReply,
3680 [P_BARRIER] = receive_Barrier,
3681 [P_BITMAP] = receive_bitmap,
3682 [P_COMPRESSED_BITMAP] = receive_bitmap,
3683 [P_UNPLUG_REMOTE] = receive_UnplugRemote,
3684 [P_DATA_REQUEST] = receive_DataRequest,
3685 [P_RS_DATA_REQUEST] = receive_DataRequest,
3686 [P_SYNC_PARAM] = receive_SyncParam,
3687 [P_SYNC_PARAM89] = receive_SyncParam,
3688 [P_PROTOCOL] = receive_protocol,
3689 [P_UUIDS] = receive_uuids,
3690 [P_SIZES] = receive_sizes,
3691 [P_STATE] = receive_state,
3692 [P_STATE_CHG_REQ] = receive_req_state,
3693 [P_SYNC_UUID] = receive_sync_uuid,
3694 [P_OV_REQUEST] = receive_DataRequest,
3695 [P_OV_REPLY] = receive_DataRequest,
3696 [P_CSUM_RS_REQUEST] = receive_DataRequest,
3697 [P_DELAY_PROBE] = receive_delay_probe,
3698 /* anything missing from this table is in
3699 * the asender_tbl, see get_asender_cmd */
3703 static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
3704 static drbd_cmd_handler_f *drbd_opt_cmd_handler;
3706 static void drbdd(struct drbd_conf *mdev)
3708 drbd_cmd_handler_f handler;
3709 struct p_header *header = &mdev->data.rbuf.header;
3711 while (get_t_state(&mdev->receiver) == Running) {
3712 drbd_thread_current_set_cpu(mdev);
3713 if (!drbd_recv_header(mdev, header)) {
3714 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3718 if (header->command < P_MAX_CMD)
3719 handler = drbd_cmd_handler[header->command];
3720 else if (P_MAY_IGNORE < header->command
3721 && header->command < P_MAX_OPT_CMD)
3722 handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3723 else if (header->command > P_MAX_OPT_CMD)
3724 handler = receive_skip;
3728 if (unlikely(!handler)) {
3729 dev_err(DEV, "unknown packet type %d, l: %d!\n",
3730 header->command, header->length);
3731 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3734 if (unlikely(!handler(mdev, header))) {
3735 dev_err(DEV, "error receiving %s, l: %d!\n",
3736 cmdname(header->command), header->length);
3737 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3743 static void drbd_fail_pending_reads(struct drbd_conf *mdev)
3745 struct hlist_head *slot;
3746 struct hlist_node *pos;
3747 struct hlist_node *tmp;
3748 struct drbd_request *req;
3752 * Application READ requests
3754 spin_lock_irq(&mdev->req_lock);
3755 for (i = 0; i < APP_R_HSIZE; i++) {
3756 slot = mdev->app_reads_hash+i;
3757 hlist_for_each_entry_safe(req, pos, tmp, slot, colision) {
3758 /* it may (but should not any longer!)
3759 * be on the work queue; if that assert triggers,
3760 * we need to also grab the
3761 * spin_lock_irq(&mdev->data.work.q_lock);
3762 * and list_del_init here. */
3763 D_ASSERT(list_empty(&req->w.list));
3764 /* It would be nice to complete outside of spinlock.
3765 * But this is easier for now. */
3766 _req_mod(req, connection_lost_while_pending);
3769 for (i = 0; i < APP_R_HSIZE; i++)
3770 if (!hlist_empty(mdev->app_reads_hash+i))
3771 dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: "
3772 "%p, should be NULL\n", i, mdev->app_reads_hash[i].first);
3774 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
3775 spin_unlock_irq(&mdev->req_lock);
3778 void drbd_flush_workqueue(struct drbd_conf *mdev)
3780 struct drbd_wq_barrier barr;
3782 barr.w.cb = w_prev_work_done;
3783 init_completion(&barr.done);
3784 drbd_queue_work(&mdev->data.work, &barr.w);
3785 wait_for_completion(&barr.done);
3788 static void drbd_disconnect(struct drbd_conf *mdev)
3790 enum drbd_fencing_p fp;
3791 union drbd_state os, ns;
3792 int rv = SS_UNKNOWN_ERROR;
3795 if (mdev->state.conn == C_STANDALONE)
3797 if (mdev->state.conn >= C_WF_CONNECTION)
3798 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3799 drbd_conn_str(mdev->state.conn));
3801 /* asender does not clean up anything. it must not interfere, either */
3802 drbd_thread_stop(&mdev->asender);
3803 drbd_free_sock(mdev);
3805 spin_lock_irq(&mdev->req_lock);
3806 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3807 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3808 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3809 spin_unlock_irq(&mdev->req_lock);
3811 /* We do not have data structures that would allow us to
3812 * get the rs_pending_cnt down to 0 again.
3813 * * On C_SYNC_TARGET we do not have any data structures describing
3814 * the pending RSDataRequest's we have sent.
3815 * * On C_SYNC_SOURCE there is no data structure that tracks
3816 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3817 * And no, it is not the sum of the reference counts in the
3818 * resync_LRU. The resync_LRU tracks the whole operation including
3819 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3821 drbd_rs_cancel_all(mdev);
3823 mdev->rs_failed = 0;
3824 atomic_set(&mdev->rs_pending_cnt, 0);
3825 wake_up(&mdev->misc_wait);
3827 /* make sure syncer is stopped and w_resume_next_sg queued */
3828 del_timer_sync(&mdev->resync_timer);
3829 set_bit(STOP_SYNC_TIMER, &mdev->flags);
3830 resync_timer_fn((unsigned long)mdev);
3832 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3833 * w_make_resync_request etc. which may still be on the worker queue
3834 * to be "canceled" */
3835 drbd_flush_workqueue(mdev);
3837 /* This also does reclaim_net_ee(). If we do this too early, we might
3838 * miss some resync ee and pages.*/
3839 drbd_process_done_ee(mdev);
3841 kfree(mdev->p_uuid);
3842 mdev->p_uuid = NULL;
3844 if (!mdev->state.susp)
3847 drbd_fail_pending_reads(mdev);
3849 dev_info(DEV, "Connection closed\n");
3854 if (get_ldev(mdev)) {
3855 fp = mdev->ldev->dc.fencing;
3859 if (mdev->state.role == R_PRIMARY) {
3860 if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) {
3861 enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
3862 drbd_request_state(mdev, NS(pdsk, nps));
3866 spin_lock_irq(&mdev->req_lock);
3868 if (os.conn >= C_UNCONNECTED) {
3869 /* Do not restart in case we are C_DISCONNECTING */
3871 ns.conn = C_UNCONNECTED;
3872 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3874 spin_unlock_irq(&mdev->req_lock);
3876 if (os.conn == C_DISCONNECTING) {
3877 struct hlist_head *h;
3878 wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
3880 /* we must not free the tl_hash
3881 * while application io is still on the fly */
3882 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0);
3884 spin_lock_irq(&mdev->req_lock);
3886 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3888 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3889 (int)(h - mdev->ee_hash), h->first);
3890 kfree(mdev->ee_hash);
3891 mdev->ee_hash = NULL;
3892 mdev->ee_hash_s = 0;
3895 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3897 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3898 (int)(h - mdev->tl_hash), h->first);
3899 kfree(mdev->tl_hash);
3900 mdev->tl_hash = NULL;
3901 mdev->tl_hash_s = 0;
3902 spin_unlock_irq(&mdev->req_lock);
3904 crypto_free_hash(mdev->cram_hmac_tfm);
3905 mdev->cram_hmac_tfm = NULL;
3907 kfree(mdev->net_conf);
3908 mdev->net_conf = NULL;
3909 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3912 /* tcp_close and release of sendpage pages can be deferred. I don't
3913 * want to use SO_LINGER, because apparently it can be deferred for
3914 * more than 20 seconds (longest time I checked).
3916 * Actually we don't care for exactly when the network stack does its
3917 * put_page(), but release our reference on these pages right here.
3919 i = drbd_release_ee(mdev, &mdev->net_ee);
3921 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3922 i = atomic_read(&mdev->pp_in_use);
3924 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3926 D_ASSERT(list_empty(&mdev->read_ee));
3927 D_ASSERT(list_empty(&mdev->active_ee));
3928 D_ASSERT(list_empty(&mdev->sync_ee));
3929 D_ASSERT(list_empty(&mdev->done_ee));
3931 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3932 atomic_set(&mdev->current_epoch->epoch_size, 0);
3933 D_ASSERT(list_empty(&mdev->current_epoch->list));
3937 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3938 * we can agree on is stored in agreed_pro_version.
3940 * feature flags and the reserved array should be enough room for future
3941 * enhancements of the handshake protocol, and possible plugins...
3943 * for now, they are expected to be zero, but ignored.
3945 static int drbd_send_handshake(struct drbd_conf *mdev)
3947 /* ASSERT current == mdev->receiver ... */
3948 struct p_handshake *p = &mdev->data.sbuf.handshake;
3951 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3952 dev_err(DEV, "interrupted during initial handshake\n");
3953 return 0; /* interrupted. not ok. */
3956 if (mdev->data.socket == NULL) {
3957 mutex_unlock(&mdev->data.mutex);
3961 memset(p, 0, sizeof(*p));
3962 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3963 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3964 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3965 (struct p_header *)p, sizeof(*p), 0 );
3966 mutex_unlock(&mdev->data.mutex);
3972 * 1 yes, we have a valid connection
3973 * 0 oops, did not work out, please try again
3974 * -1 peer talks different language,
3975 * no point in trying again, please go standalone.
3977 static int drbd_do_handshake(struct drbd_conf *mdev)
3979 /* ASSERT current == mdev->receiver ... */
3980 struct p_handshake *p = &mdev->data.rbuf.handshake;
3981 const int expect = sizeof(struct p_handshake)
3982 -sizeof(struct p_header);
3985 rv = drbd_send_handshake(mdev);
3989 rv = drbd_recv_header(mdev, &p->head);
3993 if (p->head.command != P_HAND_SHAKE) {
3994 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3995 cmdname(p->head.command), p->head.command);
3999 if (p->head.length != expect) {
4000 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
4001 expect, p->head.length);
4005 rv = drbd_recv(mdev, &p->head.payload, expect);
4008 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
4012 p->protocol_min = be32_to_cpu(p->protocol_min);
4013 p->protocol_max = be32_to_cpu(p->protocol_max);
4014 if (p->protocol_max == 0)
4015 p->protocol_max = p->protocol_min;
4017 if (PRO_VERSION_MAX < p->protocol_min ||
4018 PRO_VERSION_MIN > p->protocol_max)
4021 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4023 dev_info(DEV, "Handshake successful: "
4024 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
4029 dev_err(DEV, "incompatible DRBD dialects: "
4030 "I support %d-%d, peer supports %d-%d\n",
4031 PRO_VERSION_MIN, PRO_VERSION_MAX,
4032 p->protocol_min, p->protocol_max);
4036 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4037 static int drbd_do_auth(struct drbd_conf *mdev)
4039 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4040 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4044 #define CHALLENGE_LEN 64
4048 0 - failed, try again (network error),
4049 -1 - auth failed, don't try again.
4052 static int drbd_do_auth(struct drbd_conf *mdev)
4054 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4055 struct scatterlist sg;
4056 char *response = NULL;
4057 char *right_response = NULL;
4058 char *peers_ch = NULL;
4060 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4061 unsigned int resp_size;
4062 struct hash_desc desc;
4065 desc.tfm = mdev->cram_hmac_tfm;
4068 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4069 (u8 *)mdev->net_conf->shared_secret, key_len);
4071 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4076 get_random_bytes(my_challenge, CHALLENGE_LEN);
4078 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4082 rv = drbd_recv_header(mdev, &p);
4086 if (p.command != P_AUTH_CHALLENGE) {
4087 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4088 cmdname(p.command), p.command);
4093 if (p.length > CHALLENGE_LEN*2) {
4094 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4099 peers_ch = kmalloc(p.length, GFP_NOIO);
4100 if (peers_ch == NULL) {
4101 dev_err(DEV, "kmalloc of peers_ch failed\n");
4106 rv = drbd_recv(mdev, peers_ch, p.length);
4108 if (rv != p.length) {
4109 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4114 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4115 response = kmalloc(resp_size, GFP_NOIO);
4116 if (response == NULL) {
4117 dev_err(DEV, "kmalloc of response failed\n");
4122 sg_init_table(&sg, 1);
4123 sg_set_buf(&sg, peers_ch, p.length);
4125 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4127 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4132 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4136 rv = drbd_recv_header(mdev, &p);
4140 if (p.command != P_AUTH_RESPONSE) {
4141 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4142 cmdname(p.command), p.command);
4147 if (p.length != resp_size) {
4148 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4153 rv = drbd_recv(mdev, response , resp_size);
4155 if (rv != resp_size) {
4156 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4161 right_response = kmalloc(resp_size, GFP_NOIO);
4162 if (right_response == NULL) {
4163 dev_err(DEV, "kmalloc of right_response failed\n");
4168 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4170 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4172 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4177 rv = !memcmp(response, right_response, resp_size);
4180 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4181 resp_size, mdev->net_conf->cram_hmac_alg);
4188 kfree(right_response);
4194 int drbdd_init(struct drbd_thread *thi)
4196 struct drbd_conf *mdev = thi->mdev;
4197 unsigned int minor = mdev_to_minor(mdev);
4200 sprintf(current->comm, "drbd%d_receiver", minor);
4202 dev_info(DEV, "receiver (re)started\n");
4205 h = drbd_connect(mdev);
4207 drbd_disconnect(mdev);
4208 __set_current_state(TASK_INTERRUPTIBLE);
4209 schedule_timeout(HZ);
4212 dev_warn(DEV, "Discarding network configuration.\n");
4213 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4218 if (get_net_conf(mdev)) {
4224 drbd_disconnect(mdev);
4226 dev_info(DEV, "receiver terminated\n");
4230 /* ********* acknowledge sender ******** */
4232 static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4234 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4236 int retcode = be32_to_cpu(p->retcode);
4238 if (retcode >= SS_SUCCESS) {
4239 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4241 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4242 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4243 drbd_set_st_err_str(retcode), retcode);
4245 wake_up(&mdev->state_wait);
4250 static int got_Ping(struct drbd_conf *mdev, struct p_header *h)
4252 return drbd_send_ping_ack(mdev);
4256 static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4258 /* restore idle timeout */
4259 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4260 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4261 wake_up(&mdev->misc_wait);
4266 static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4268 struct p_block_ack *p = (struct p_block_ack *)h;
4269 sector_t sector = be64_to_cpu(p->sector);
4270 int blksize = be32_to_cpu(p->blksize);
4272 D_ASSERT(mdev->agreed_pro_version >= 89);
4274 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4276 drbd_rs_complete_io(mdev, sector);
4277 drbd_set_in_sync(mdev, sector, blksize);
4278 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4279 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4280 dec_rs_pending(mdev);
4285 /* when we receive the ACK for a write request,
4286 * verify that we actually know about it */
4287 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4288 u64 id, sector_t sector)
4290 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4291 struct hlist_node *n;
4292 struct drbd_request *req;
4294 hlist_for_each_entry(req, n, slot, colision) {
4295 if ((unsigned long)req == (unsigned long)id) {
4296 if (req->sector != sector) {
4297 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4298 "wrong sector (%llus versus %llus)\n", req,
4299 (unsigned long long)req->sector,
4300 (unsigned long long)sector);
4306 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4307 (void *)(unsigned long)id, (unsigned long long)sector);
4311 typedef struct drbd_request *(req_validator_fn)
4312 (struct drbd_conf *mdev, u64 id, sector_t sector);
4314 static int validate_req_change_req_state(struct drbd_conf *mdev,
4315 u64 id, sector_t sector, req_validator_fn validator,
4316 const char *func, enum drbd_req_event what)
4318 struct drbd_request *req;
4319 struct bio_and_error m;
4321 spin_lock_irq(&mdev->req_lock);
4322 req = validator(mdev, id, sector);
4323 if (unlikely(!req)) {
4324 spin_unlock_irq(&mdev->req_lock);
4325 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4328 __req_mod(req, what, &m);
4329 spin_unlock_irq(&mdev->req_lock);
4332 complete_master_bio(mdev, &m);
4336 static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4338 struct p_block_ack *p = (struct p_block_ack *)h;
4339 sector_t sector = be64_to_cpu(p->sector);
4340 int blksize = be32_to_cpu(p->blksize);
4341 enum drbd_req_event what;
4343 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4345 if (is_syncer_block_id(p->block_id)) {
4346 drbd_set_in_sync(mdev, sector, blksize);
4347 dec_rs_pending(mdev);
4350 switch (be16_to_cpu(h->command)) {
4351 case P_RS_WRITE_ACK:
4352 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4353 what = write_acked_by_peer_and_sis;
4356 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4357 what = write_acked_by_peer;
4360 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4361 what = recv_acked_by_peer;
4364 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4365 what = conflict_discarded_by_peer;
4372 return validate_req_change_req_state(mdev, p->block_id, sector,
4373 _ack_id_to_req, __func__ , what);
4376 static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4378 struct p_block_ack *p = (struct p_block_ack *)h;
4379 sector_t sector = be64_to_cpu(p->sector);
4381 if (__ratelimit(&drbd_ratelimit_state))
4382 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4384 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4386 if (is_syncer_block_id(p->block_id)) {
4387 int size = be32_to_cpu(p->blksize);
4388 dec_rs_pending(mdev);
4389 drbd_rs_failed_io(mdev, sector, size);
4392 return validate_req_change_req_state(mdev, p->block_id, sector,
4393 _ack_id_to_req, __func__ , neg_acked);
4396 static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4398 struct p_block_ack *p = (struct p_block_ack *)h;
4399 sector_t sector = be64_to_cpu(p->sector);
4401 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4402 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4403 (unsigned long long)sector, be32_to_cpu(p->blksize));
4405 return validate_req_change_req_state(mdev, p->block_id, sector,
4406 _ar_id_to_req, __func__ , neg_acked);
4409 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4413 struct p_block_ack *p = (struct p_block_ack *)h;
4415 sector = be64_to_cpu(p->sector);
4416 size = be32_to_cpu(p->blksize);
4418 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4420 dec_rs_pending(mdev);
4422 if (get_ldev_if_state(mdev, D_FAILED)) {
4423 drbd_rs_complete_io(mdev, sector);
4424 drbd_rs_failed_io(mdev, sector, size);
4431 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4433 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4435 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4440 static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4442 struct p_block_ack *p = (struct p_block_ack *)h;
4443 struct drbd_work *w;
4447 sector = be64_to_cpu(p->sector);
4448 size = be32_to_cpu(p->blksize);
4450 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4452 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4453 drbd_ov_oos_found(mdev, sector, size);
4457 drbd_rs_complete_io(mdev, sector);
4458 dec_rs_pending(mdev);
4460 if (--mdev->ov_left == 0) {
4461 w = kmalloc(sizeof(*w), GFP_NOIO);
4463 w->cb = w_ov_finished;
4464 drbd_queue_work_front(&mdev->data.work, w);
4466 dev_err(DEV, "kmalloc(w) failed.");
4468 drbd_resync_finished(mdev);
4474 static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
4476 struct p_delay_probe *p = (struct p_delay_probe *)h;
4478 got_delay_probe(mdev, USE_META_SOCKET, p);
4482 struct asender_cmd {
4484 int (*process)(struct drbd_conf *mdev, struct p_header *h);
4487 static struct asender_cmd *get_asender_cmd(int cmd)
4489 static struct asender_cmd asender_tbl[] = {
4490 /* anything missing from this table is in
4491 * the drbd_cmd_handler (drbd_default_handler) table,
4492 * see the beginning of drbdd() */
4493 [P_PING] = { sizeof(struct p_header), got_Ping },
4494 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
4495 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4496 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4497 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4498 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4499 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4500 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4501 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4502 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4503 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4504 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4505 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4506 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m },
4507 [P_MAX_CMD] = { 0, NULL },
4509 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4511 return &asender_tbl[cmd];
4514 int drbd_asender(struct drbd_thread *thi)
4516 struct drbd_conf *mdev = thi->mdev;
4517 struct p_header *h = &mdev->meta.rbuf.header;
4518 struct asender_cmd *cmd = NULL;
4523 int expect = sizeof(struct p_header);
4526 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4528 current->policy = SCHED_RR; /* Make this a realtime task! */
4529 current->rt_priority = 2; /* more important than all other tasks */
4531 while (get_t_state(thi) == Running) {
4532 drbd_thread_current_set_cpu(mdev);
4533 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4534 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4535 mdev->meta.socket->sk->sk_rcvtimeo =
4536 mdev->net_conf->ping_timeo*HZ/10;
4539 /* conditionally cork;
4540 * it may hurt latency if we cork without much to send */
4541 if (!mdev->net_conf->no_cork &&
4542 3 < atomic_read(&mdev->unacked_cnt))
4543 drbd_tcp_cork(mdev->meta.socket);
4545 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4546 flush_signals(current);
4547 if (!drbd_process_done_ee(mdev)) {
4548 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4551 /* to avoid race with newly queued ACKs */
4552 set_bit(SIGNAL_ASENDER, &mdev->flags);
4553 spin_lock_irq(&mdev->req_lock);
4554 empty = list_empty(&mdev->done_ee);
4555 spin_unlock_irq(&mdev->req_lock);
4556 /* new ack may have been queued right here,
4557 * but then there is also a signal pending,
4558 * and we start over... */
4562 /* but unconditionally uncork unless disabled */
4563 if (!mdev->net_conf->no_cork)
4564 drbd_tcp_uncork(mdev->meta.socket);
4566 /* short circuit, recv_msg would return EINTR anyways. */
4567 if (signal_pending(current))
4570 rv = drbd_recv_short(mdev, mdev->meta.socket,
4571 buf, expect-received, 0);
4572 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4574 flush_signals(current);
4577 * -EINTR (on meta) we got a signal
4578 * -EAGAIN (on meta) rcvtimeo expired
4579 * -ECONNRESET other side closed the connection
4580 * -ERESTARTSYS (on data) we got a signal
4581 * rv < 0 other than above: unexpected error!
4582 * rv == expected: full header or command
4583 * rv < expected: "woken" by signal during receive
4584 * rv == 0 : "connection shut down by peer"
4586 if (likely(rv > 0)) {
4589 } else if (rv == 0) {
4590 dev_err(DEV, "meta connection shut down by peer.\n");
4592 } else if (rv == -EAGAIN) {
4593 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4594 mdev->net_conf->ping_timeo*HZ/10) {
4595 dev_err(DEV, "PingAck did not arrive in time.\n");
4598 set_bit(SEND_PING, &mdev->flags);
4600 } else if (rv == -EINTR) {
4603 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4607 if (received == expect && cmd == NULL) {
4608 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4609 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4610 (long)be32_to_cpu(h->magic),
4611 h->command, h->length);
4614 cmd = get_asender_cmd(be16_to_cpu(h->command));
4615 len = be16_to_cpu(h->length);
4616 if (unlikely(cmd == NULL)) {
4617 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4618 (long)be32_to_cpu(h->magic),
4619 h->command, h->length);
4622 expect = cmd->pkt_size;
4623 ERR_IF(len != expect-sizeof(struct p_header))
4626 if (received == expect) {
4627 D_ASSERT(cmd != NULL);
4628 if (!cmd->process(mdev, h))
4633 expect = sizeof(struct p_header);
4640 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4644 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4646 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4648 D_ASSERT(mdev->state.conn < C_CONNECTED);
4649 dev_info(DEV, "asender terminated\n");