4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
55 struct drbd_epoch *epoch;
64 static int drbd_do_handshake(struct drbd_conf *mdev);
65 static int drbd_do_auth(struct drbd_conf *mdev);
67 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
68 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
70 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
72 struct drbd_epoch *prev;
73 spin_lock(&mdev->epoch_lock);
74 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
75 if (prev == epoch || prev == mdev->current_epoch)
77 spin_unlock(&mdev->epoch_lock);
81 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
83 static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
85 struct page *page = NULL;
87 /* Yes, testing drbd_pp_vacant outside the lock is racy.
88 * So what. It saves a spin_lock. */
89 if (drbd_pp_vacant > 0) {
90 spin_lock(&drbd_pp_lock);
93 drbd_pp_pool = (struct page *)page_private(page);
94 set_page_private(page, 0); /* just to be polite */
97 spin_unlock(&drbd_pp_lock);
99 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
100 * "criss-cross" setup, that might cause write-out on some other DRBD,
101 * which in turn might block on the other node at this very place. */
103 page = alloc_page(GFP_TRY);
105 atomic_inc(&mdev->pp_in_use);
109 /* kick lower level device, if we have more than (arbitrary number)
110 * reference counts on it, which typically are locally submitted io
111 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
112 static void maybe_kick_lo(struct drbd_conf *mdev)
114 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
118 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
120 struct drbd_epoch_entry *e;
121 struct list_head *le, *tle;
123 /* The EEs are always appended to the end of the list. Since
124 they are sent in order over the wire, they have to finish
125 in order. As soon as we see the first not finished we can
126 stop to examine the list... */
128 list_for_each_safe(le, tle, &mdev->net_ee) {
129 e = list_entry(le, struct drbd_epoch_entry, w.list);
130 if (drbd_bio_has_active_page(e->private_bio))
132 list_move(le, to_be_freed);
136 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
138 LIST_HEAD(reclaimed);
139 struct drbd_epoch_entry *e, *t;
142 spin_lock_irq(&mdev->req_lock);
143 reclaim_net_ee(mdev, &reclaimed);
144 spin_unlock_irq(&mdev->req_lock);
146 list_for_each_entry_safe(e, t, &reclaimed, w.list)
147 drbd_free_ee(mdev, e);
151 * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
152 * @mdev: DRBD device.
153 * @retry: whether or not to retry allocation forever (or until signalled)
155 * Tries to allocate a page, first from our own page pool, then from the
156 * kernel, unless this allocation would exceed the max_buffers setting.
157 * If @retry is non-zero, retry until DRBD frees a page somewhere else.
159 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
161 struct page *page = NULL;
164 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
165 page = drbd_pp_first_page_or_try_alloc(mdev);
171 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
173 drbd_kick_lo_and_reclaim_net(mdev);
175 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
176 page = drbd_pp_first_page_or_try_alloc(mdev);
184 if (signal_pending(current)) {
185 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
191 finish_wait(&drbd_pp_wait, &wait);
196 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
197 * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
198 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
202 spin_lock(&drbd_pp_lock);
203 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
206 set_page_private(page, (unsigned long)drbd_pp_pool);
211 spin_unlock(&drbd_pp_lock);
213 atomic_dec(&mdev->pp_in_use);
218 wake_up(&drbd_pp_wait);
221 static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
223 struct page *p_to_be_freed = NULL;
225 struct bio_vec *bvec;
228 spin_lock(&drbd_pp_lock);
229 __bio_for_each_segment(bvec, bio, i, 0) {
230 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
231 set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
232 p_to_be_freed = bvec->bv_page;
234 set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
235 drbd_pp_pool = bvec->bv_page;
239 spin_unlock(&drbd_pp_lock);
240 atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
242 while (p_to_be_freed) {
243 page = p_to_be_freed;
244 p_to_be_freed = (struct page *)page_private(page);
245 set_page_private(page, 0); /* just to be polite */
249 wake_up(&drbd_pp_wait);
253 You need to hold the req_lock:
254 _drbd_wait_ee_list_empty()
256 You must not have the req_lock:
262 drbd_process_done_ee()
264 drbd_wait_ee_list_empty()
267 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
270 unsigned int data_size,
271 gfp_t gfp_mask) __must_hold(local)
273 struct request_queue *q;
274 struct drbd_epoch_entry *e;
279 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
282 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
284 if (!(gfp_mask & __GFP_NOWARN))
285 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
289 bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
291 if (!(gfp_mask & __GFP_NOWARN))
292 dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
296 bio->bi_bdev = mdev->ldev->backing_bdev;
297 bio->bi_sector = sector;
301 page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
303 if (!(gfp_mask & __GFP_NOWARN))
304 dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
307 if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
308 drbd_pp_free(mdev, page);
309 dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
310 "data_size=%u,ds=%u) failed\n",
311 (unsigned long long)sector, data_size, ds);
313 q = bdev_get_queue(bio->bi_bdev);
314 if (q->merge_bvec_fn) {
315 struct bvec_merge_data bvm = {
316 .bi_bdev = bio->bi_bdev,
317 .bi_sector = bio->bi_sector,
318 .bi_size = bio->bi_size,
321 int l = q->merge_bvec_fn(q, &bvm,
322 &bio->bi_io_vec[bio->bi_vcnt]);
323 dev_err(DEV, "merge_bvec_fn() = %d\n", l);
326 /* dump more of the bio. */
327 dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
328 dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
329 dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
330 dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
335 ds -= min_t(int, ds, PAGE_SIZE);
338 D_ASSERT(data_size == bio->bi_size);
343 e->size = bio->bi_size;
345 e->private_bio = bio;
347 INIT_HLIST_NODE(&e->colision);
354 drbd_pp_free_bio_pages(mdev, bio);
357 mempool_free(e, drbd_ee_mempool);
362 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
364 struct bio *bio = e->private_bio;
365 drbd_pp_free_bio_pages(mdev, bio);
367 D_ASSERT(hlist_unhashed(&e->colision));
368 mempool_free(e, drbd_ee_mempool);
371 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
373 LIST_HEAD(work_list);
374 struct drbd_epoch_entry *e, *t;
377 spin_lock_irq(&mdev->req_lock);
378 list_splice_init(list, &work_list);
379 spin_unlock_irq(&mdev->req_lock);
381 list_for_each_entry_safe(e, t, &work_list, w.list) {
382 drbd_free_ee(mdev, e);
390 * This function is called from _asender only_
391 * but see also comments in _req_mod(,barrier_acked)
392 * and receive_Barrier.
394 * Move entries from net_ee to done_ee, if ready.
395 * Grab done_ee, call all callbacks, free the entries.
396 * The callbacks typically send out ACKs.
398 static int drbd_process_done_ee(struct drbd_conf *mdev)
400 LIST_HEAD(work_list);
401 LIST_HEAD(reclaimed);
402 struct drbd_epoch_entry *e, *t;
403 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
405 spin_lock_irq(&mdev->req_lock);
406 reclaim_net_ee(mdev, &reclaimed);
407 list_splice_init(&mdev->done_ee, &work_list);
408 spin_unlock_irq(&mdev->req_lock);
410 list_for_each_entry_safe(e, t, &reclaimed, w.list)
411 drbd_free_ee(mdev, e);
413 /* possible callbacks here:
414 * e_end_block, and e_end_resync_block, e_send_discard_ack.
415 * all ignore the last argument.
417 list_for_each_entry_safe(e, t, &work_list, w.list) {
418 /* list_del not necessary, next/prev members not touched */
419 ok = e->w.cb(mdev, &e->w, !ok) && ok;
420 drbd_free_ee(mdev, e);
422 wake_up(&mdev->ee_wait);
427 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
431 /* avoids spin_lock/unlock
432 * and calling prepare_to_wait in the fast path */
433 while (!list_empty(head)) {
434 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
435 spin_unlock_irq(&mdev->req_lock);
438 finish_wait(&mdev->ee_wait, &wait);
439 spin_lock_irq(&mdev->req_lock);
443 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
445 spin_lock_irq(&mdev->req_lock);
446 _drbd_wait_ee_list_empty(mdev, head);
447 spin_unlock_irq(&mdev->req_lock);
450 /* see also kernel_accept; which is only present since 2.6.18.
451 * also we want to log which part of it failed, exactly */
452 static int drbd_accept(struct drbd_conf *mdev, const char **what,
453 struct socket *sock, struct socket **newsock)
455 struct sock *sk = sock->sk;
459 err = sock->ops->listen(sock, 5);
463 *what = "sock_create_lite";
464 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
470 err = sock->ops->accept(sock, *newsock, 0);
472 sock_release(*newsock);
476 (*newsock)->ops = sock->ops;
482 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
483 void *buf, size_t size, int flags)
490 struct msghdr msg = {
492 .msg_iov = (struct iovec *)&iov,
493 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
499 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
505 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
512 struct msghdr msg = {
514 .msg_iov = (struct iovec *)&iov,
515 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
523 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
528 * ECONNRESET other side closed the connection
529 * ERESTARTSYS (on sock) we got a signal
533 if (rv == -ECONNRESET)
534 dev_info(DEV, "sock was reset by peer\n");
535 else if (rv != -ERESTARTSYS)
536 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
538 } else if (rv == 0) {
539 dev_info(DEV, "sock was shut down by peer\n");
542 /* signal came in, or peer/link went down,
543 * after we read a partial message
545 /* D_ASSERT(signal_pending(current)); */
553 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
558 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
562 struct sockaddr_in6 src_in6;
564 int disconnect_on_error = 1;
566 if (!get_net_conf(mdev))
569 what = "sock_create_kern";
570 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
571 SOCK_STREAM, IPPROTO_TCP, &sock);
577 sock->sk->sk_rcvtimeo =
578 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
580 /* explicitly bind to the configured IP as source IP
581 * for the outgoing connections.
582 * This is needed for multihomed hosts and to be
583 * able to use lo: interfaces for drbd.
584 * Make sure to use 0 as port number, so linux selects
585 * a free one dynamically.
587 memcpy(&src_in6, mdev->net_conf->my_addr,
588 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
589 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
590 src_in6.sin6_port = 0;
592 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
594 what = "bind before connect";
595 err = sock->ops->bind(sock,
596 (struct sockaddr *) &src_in6,
597 mdev->net_conf->my_addr_len);
601 /* connect may fail, peer not yet available.
602 * stay C_WF_CONNECTION, don't go Disconnecting! */
603 disconnect_on_error = 0;
605 err = sock->ops->connect(sock,
606 (struct sockaddr *)mdev->net_conf->peer_addr,
607 mdev->net_conf->peer_addr_len, 0);
616 /* timeout, busy, signal pending */
617 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
618 case EINTR: case ERESTARTSYS:
619 /* peer not (yet) available, network problem */
620 case ECONNREFUSED: case ENETUNREACH:
621 case EHOSTDOWN: case EHOSTUNREACH:
622 disconnect_on_error = 0;
625 dev_err(DEV, "%s failed, err = %d\n", what, err);
627 if (disconnect_on_error)
628 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
634 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
637 struct socket *s_estab = NULL, *s_listen;
640 if (!get_net_conf(mdev))
643 what = "sock_create_kern";
644 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
645 SOCK_STREAM, IPPROTO_TCP, &s_listen);
651 timeo = mdev->net_conf->try_connect_int * HZ;
652 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
654 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
655 s_listen->sk->sk_rcvtimeo = timeo;
656 s_listen->sk->sk_sndtimeo = timeo;
658 what = "bind before listen";
659 err = s_listen->ops->bind(s_listen,
660 (struct sockaddr *) mdev->net_conf->my_addr,
661 mdev->net_conf->my_addr_len);
665 err = drbd_accept(mdev, &what, s_listen, &s_estab);
669 sock_release(s_listen);
671 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
672 dev_err(DEV, "%s failed, err = %d\n", what, err);
673 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
681 static int drbd_send_fp(struct drbd_conf *mdev,
682 struct socket *sock, enum drbd_packets cmd)
684 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
686 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
689 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
691 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
694 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
696 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
697 return be16_to_cpu(h->command);
703 * drbd_socket_okay() - Free the socket if its connection is not okay
704 * @mdev: DRBD device.
705 * @sock: pointer to the pointer to the socket.
707 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
715 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
717 if (rr > 0 || rr == -EAGAIN) {
728 * 1 yes, we have a valid connection
729 * 0 oops, did not work out, please try again
730 * -1 peer talks different language,
731 * no point in trying again, please go standalone.
732 * -2 We do not have a network config...
734 static int drbd_connect(struct drbd_conf *mdev)
736 struct socket *s, *sock, *msock;
739 D_ASSERT(!mdev->data.socket);
741 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
742 dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
744 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
747 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
754 /* 3 tries, this should take less than a second! */
755 s = drbd_try_connect(mdev);
758 /* give the other side time to call bind() & listen() */
759 __set_current_state(TASK_INTERRUPTIBLE);
760 schedule_timeout(HZ / 10);
765 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
769 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
773 dev_err(DEV, "Logic error in drbd_connect()\n");
774 goto out_release_sockets;
779 __set_current_state(TASK_INTERRUPTIBLE);
780 schedule_timeout(HZ / 10);
781 ok = drbd_socket_okay(mdev, &sock);
782 ok = drbd_socket_okay(mdev, &msock) && ok;
788 s = drbd_wait_for_connect(mdev);
790 try = drbd_recv_fp(mdev, s);
791 drbd_socket_okay(mdev, &sock);
792 drbd_socket_okay(mdev, &msock);
796 dev_warn(DEV, "initial packet S crossed\n");
803 dev_warn(DEV, "initial packet M crossed\n");
807 set_bit(DISCARD_CONCURRENT, &mdev->flags);
810 dev_warn(DEV, "Error receiving initial packet\n");
817 if (mdev->state.conn <= C_DISCONNECTING)
818 goto out_release_sockets;
819 if (signal_pending(current)) {
820 flush_signals(current);
822 if (get_t_state(&mdev->receiver) == Exiting)
823 goto out_release_sockets;
827 ok = drbd_socket_okay(mdev, &sock);
828 ok = drbd_socket_okay(mdev, &msock) && ok;
834 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
835 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
837 sock->sk->sk_allocation = GFP_NOIO;
838 msock->sk->sk_allocation = GFP_NOIO;
840 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
841 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
843 if (mdev->net_conf->sndbuf_size) {
844 sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size;
845 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
848 if (mdev->net_conf->rcvbuf_size) {
849 sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size;
850 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
864 /* we don't want delays.
865 * we use TCP_CORK where apropriate, though */
866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
873 D_ASSERT(mdev->asender.task == NULL);
875 h = drbd_do_handshake(mdev);
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 if (!drbd_do_auth(mdev)) {
882 dev_err(DEV, "Authentication of peer failed\n");
887 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
890 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
891 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
893 atomic_set(&mdev->packet_seq, 0);
896 drbd_thread_start(&mdev->asender);
898 drbd_send_protocol(mdev);
899 drbd_send_sync_param(mdev, &mdev->sync_conf);
900 drbd_send_sizes(mdev, 0);
901 drbd_send_uuids(mdev);
902 drbd_send_state(mdev);
903 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
904 clear_bit(RESIZE_PENDING, &mdev->flags);
916 static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
920 r = drbd_recv(mdev, h, sizeof(*h));
922 if (unlikely(r != sizeof(*h))) {
923 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
926 h->command = be16_to_cpu(h->command);
927 h->length = be16_to_cpu(h->length);
928 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
929 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
930 (long)be32_to_cpu(h->magic),
931 h->command, h->length);
934 mdev->last_received = jiffies;
939 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
943 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
944 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
946 dev_err(DEV, "local disk flush failed with status %d\n", rv);
947 /* would rather check on EOPNOTSUPP, but that is not reliable.
948 * don't try again for ANY return value != 0
949 * if (rv == -EOPNOTSUPP) */
950 drbd_bump_write_ordering(mdev, WO_drain_io);
955 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
958 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
960 struct flush_work *fw = (struct flush_work *)w;
961 struct drbd_epoch *epoch = fw->epoch;
965 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
966 drbd_flush_after_epoch(mdev, epoch);
968 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
969 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
975 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
976 * @mdev: DRBD device.
977 * @epoch: Epoch object.
980 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
981 struct drbd_epoch *epoch,
984 int finish, epoch_size;
985 struct drbd_epoch *next_epoch;
986 int schedule_flush = 0;
987 enum finish_epoch rv = FE_STILL_LIVE;
989 spin_lock(&mdev->epoch_lock);
994 epoch_size = atomic_read(&epoch->epoch_size);
996 switch (ev & ~EV_CLEANUP) {
998 atomic_dec(&epoch->active);
1000 case EV_GOT_BARRIER_NR:
1001 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1003 /* Special case: If we just switched from WO_bio_barrier to
1004 WO_bdev_flush we should not finish the current epoch */
1005 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1006 mdev->write_ordering != WO_bio_barrier &&
1007 epoch == mdev->current_epoch)
1008 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1010 case EV_BARRIER_DONE:
1011 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1013 case EV_BECAME_LAST:
1018 if (epoch_size != 0 &&
1019 atomic_read(&epoch->active) == 0 &&
1020 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1021 epoch->list.prev == &mdev->current_epoch->list &&
1022 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1023 /* Nearly all conditions are met to finish that epoch... */
1024 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1025 mdev->write_ordering == WO_none ||
1026 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1029 set_bit(DE_IS_FINISHING, &epoch->flags);
1030 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1031 mdev->write_ordering == WO_bio_barrier) {
1032 atomic_inc(&epoch->active);
1037 if (!(ev & EV_CLEANUP)) {
1038 spin_unlock(&mdev->epoch_lock);
1039 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1040 spin_lock(&mdev->epoch_lock);
1044 if (mdev->current_epoch != epoch) {
1045 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1046 list_del(&epoch->list);
1047 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1051 if (rv == FE_STILL_LIVE)
1055 atomic_set(&epoch->epoch_size, 0);
1056 /* atomic_set(&epoch->active, 0); is alrady zero */
1057 if (rv == FE_STILL_LIVE)
1068 spin_unlock(&mdev->epoch_lock);
1070 if (schedule_flush) {
1071 struct flush_work *fw;
1072 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1076 drbd_queue_work(&mdev->data.work, &fw->w);
1078 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1079 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1080 /* That is not a recursion, only one level */
1081 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1082 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1090 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1091 * @mdev: DRBD device.
1092 * @wo: Write ordering method to try.
1094 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1096 enum write_ordering_e pwo;
1097 static char *write_ordering_str[] = {
1099 [WO_drain_io] = "drain",
1100 [WO_bdev_flush] = "flush",
1101 [WO_bio_barrier] = "barrier",
1104 pwo = mdev->write_ordering;
1106 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1108 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1110 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1112 mdev->write_ordering = wo;
1113 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1114 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1118 * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
1119 * @mdev: DRBD device.
1121 * @cancel: The connection will be closed anyways (unused in this callback)
1123 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1125 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1126 struct bio *bio = e->private_bio;
1128 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1129 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1130 so that we can finish that epoch in drbd_may_finish_epoch().
1131 That is necessary if we already have a long chain of Epochs, before
1132 we realize that BIO_RW_BARRIER is actually not supported */
1134 /* As long as the -ENOTSUPP on the barrier is reported immediately
1135 that will never trigger. If it is reported late, we will just
1136 print that warning and continue correctly for all future requests
1137 with WO_bdev_flush */
1138 if (previous_epoch(mdev, e->epoch))
1139 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1141 /* prepare bio for re-submit,
1142 * re-init volatile members */
1143 /* we still have a local reference,
1144 * get_ldev was done in receive_Data. */
1145 bio->bi_bdev = mdev->ldev->backing_bdev;
1146 bio->bi_sector = e->sector;
1147 bio->bi_size = e->size;
1150 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1151 bio->bi_flags |= 1 << BIO_UPTODATE;
1153 /* don't know whether this is necessary: */
1154 bio->bi_phys_segments = 0;
1155 bio->bi_next = NULL;
1157 /* these should be unchanged: */
1158 /* bio->bi_end_io = drbd_endio_write_sec; */
1159 /* bio->bi_vcnt = whatever; */
1161 e->w.cb = e_end_block;
1163 /* This is no longer a barrier request. */
1164 bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
1166 drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
1171 static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1173 int rv, issue_flush;
1174 struct p_barrier *p = (struct p_barrier *)h;
1175 struct drbd_epoch *epoch;
1177 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1179 rv = drbd_recv(mdev, h->payload, h->length);
1180 ERR_IF(rv != h->length) return FALSE;
1184 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1187 mdev->current_epoch->barrier_nr = p->barrier;
1188 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1190 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1191 * the activity log, which means it would not be resynced in case the
1192 * R_PRIMARY crashes now.
1193 * Therefore we must send the barrier_ack after the barrier request was
1195 switch (mdev->write_ordering) {
1196 case WO_bio_barrier:
1198 if (rv == FE_RECYCLED)
1204 if (rv == FE_STILL_LIVE) {
1205 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1206 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1207 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1209 if (rv == FE_RECYCLED)
1212 /* The asender will send all the ACKs and barrier ACKs out, since
1213 all EEs moved from the active_ee to the done_ee. We need to
1214 provide a new epoch object for the EEs that come in soon */
1218 /* receiver context, in the writeout path of the other node.
1219 * avoid potential distributed deadlock */
1220 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1222 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1223 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1224 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1226 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1227 if (rv == FE_RECYCLED)
1231 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1237 atomic_set(&epoch->epoch_size, 0);
1238 atomic_set(&epoch->active, 0);
1240 spin_lock(&mdev->epoch_lock);
1241 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1242 list_add(&epoch->list, &mdev->current_epoch->list);
1243 mdev->current_epoch = epoch;
1246 /* The current_epoch got recycled while we allocated this one... */
1249 spin_unlock(&mdev->epoch_lock);
1254 /* used from receive_RSDataReply (recv_resync_read)
1255 * and from receive_Data */
1256 static struct drbd_epoch_entry *
1257 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1259 struct drbd_epoch_entry *e;
1260 struct bio_vec *bvec;
1264 void *dig_in = mdev->int_dig_in;
1265 void *dig_vv = mdev->int_dig_vv;
1267 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1268 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1271 rr = drbd_recv(mdev, dig_in, dgs);
1273 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1281 ERR_IF(data_size & 0x1ff) return NULL;
1282 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1284 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1285 * "criss-cross" setup, that might cause write-out on some other DRBD,
1286 * which in turn might block on the other node at this very place. */
1287 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1290 bio = e->private_bio;
1292 bio_for_each_segment(bvec, bio, i) {
1293 page = bvec->bv_page;
1294 rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
1296 if (rr != min_t(int, ds, PAGE_SIZE)) {
1297 drbd_free_ee(mdev, e);
1298 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1299 rr, min_t(int, ds, PAGE_SIZE));
1306 drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1307 if (memcmp(dig_in, dig_vv, dgs)) {
1308 dev_err(DEV, "Digest integrity check FAILED.\n");
1309 drbd_bcast_ee(mdev, "digest failed",
1310 dgs, dig_in, dig_vv, e);
1311 drbd_free_ee(mdev, e);
1315 mdev->recv_cnt += data_size>>9;
1319 /* drbd_drain_block() just takes a data block
1320 * out of the socket input buffer, and discards it.
1322 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1328 page = drbd_pp_alloc(mdev, 1);
1332 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1333 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1335 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1336 rr, min_t(int, data_size, PAGE_SIZE));
1342 drbd_pp_free(mdev, page);
1346 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1347 sector_t sector, int data_size)
1349 struct bio_vec *bvec;
1351 int dgs, rr, i, expect;
1352 void *dig_in = mdev->int_dig_in;
1353 void *dig_vv = mdev->int_dig_vv;
1355 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1356 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1359 rr = drbd_recv(mdev, dig_in, dgs);
1361 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1369 /* optimistically update recv_cnt. if receiving fails below,
1370 * we disconnect anyways, and counters will be reset. */
1371 mdev->recv_cnt += data_size>>9;
1373 bio = req->master_bio;
1374 D_ASSERT(sector == bio->bi_sector);
1376 bio_for_each_segment(bvec, bio, i) {
1377 expect = min_t(int, data_size, bvec->bv_len);
1378 rr = drbd_recv(mdev,
1379 kmap(bvec->bv_page)+bvec->bv_offset,
1381 kunmap(bvec->bv_page);
1383 dev_warn(DEV, "short read receiving data reply: "
1384 "read %d expected %d\n",
1392 drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1393 if (memcmp(dig_in, dig_vv, dgs)) {
1394 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1399 D_ASSERT(data_size == 0);
1403 /* e_end_resync_block() is called via
1404 * drbd_process_done_ee() by asender only */
1405 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1407 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1408 sector_t sector = e->sector;
1411 D_ASSERT(hlist_unhashed(&e->colision));
1413 if (likely(drbd_bio_uptodate(e->private_bio))) {
1414 drbd_set_in_sync(mdev, sector, e->size);
1415 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1417 /* Record failure to sync */
1418 drbd_rs_failed_io(mdev, sector, e->size);
1420 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1427 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1429 struct drbd_epoch_entry *e;
1431 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1437 dec_rs_pending(mdev);
1439 e->private_bio->bi_end_io = drbd_endio_write_sec;
1440 e->private_bio->bi_rw = WRITE;
1441 e->w.cb = e_end_resync_block;
1444 /* corresponding dec_unacked() in e_end_resync_block()
1445 * respective _drbd_clear_done_ee */
1447 spin_lock_irq(&mdev->req_lock);
1448 list_add(&e->w.list, &mdev->sync_ee);
1449 spin_unlock_irq(&mdev->req_lock);
1451 drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
1452 /* accounting done in endio */
1454 maybe_kick_lo(mdev);
1458 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1460 struct drbd_request *req;
1462 unsigned int header_size, data_size;
1464 struct p_data *p = (struct p_data *)h;
1466 header_size = sizeof(*p) - sizeof(*h);
1467 data_size = h->length - header_size;
1469 ERR_IF(data_size == 0) return FALSE;
1471 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1474 sector = be64_to_cpu(p->sector);
1476 spin_lock_irq(&mdev->req_lock);
1477 req = _ar_id_to_req(mdev, p->block_id, sector);
1478 spin_unlock_irq(&mdev->req_lock);
1479 if (unlikely(!req)) {
1480 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1484 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1485 * special casing it there for the various failure cases.
1486 * still no race with drbd_fail_pending_reads */
1487 ok = recv_dless_read(mdev, req, sector, data_size);
1490 req_mod(req, data_received);
1491 /* else: nothing. handled from drbd_disconnect...
1492 * I don't think we may complete this just yet
1493 * in case we are "on-disconnect: freeze" */
1498 static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1501 unsigned int header_size, data_size;
1503 struct p_data *p = (struct p_data *)h;
1505 header_size = sizeof(*p) - sizeof(*h);
1506 data_size = h->length - header_size;
1508 ERR_IF(data_size == 0) return FALSE;
1510 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1513 sector = be64_to_cpu(p->sector);
1514 D_ASSERT(p->block_id == ID_SYNCER);
1516 if (get_ldev(mdev)) {
1517 /* data is submitted to disk within recv_resync_read.
1518 * corresponding put_ldev done below on error,
1519 * or in drbd_endio_write_sec. */
1520 ok = recv_resync_read(mdev, sector, data_size);
1522 if (__ratelimit(&drbd_ratelimit_state))
1523 dev_err(DEV, "Can not write resync data to local disk.\n");
1525 ok = drbd_drain_block(mdev, data_size);
1527 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1533 /* e_end_block() is called via drbd_process_done_ee().
1534 * this means this function only runs in the asender thread
1536 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1538 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1539 sector_t sector = e->sector;
1540 struct drbd_epoch *epoch;
1543 if (e->flags & EE_IS_BARRIER) {
1544 epoch = previous_epoch(mdev, e->epoch);
1546 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1549 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1550 if (likely(drbd_bio_uptodate(e->private_bio))) {
1551 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1552 mdev->state.conn <= C_PAUSED_SYNC_T &&
1553 e->flags & EE_MAY_SET_IN_SYNC) ?
1554 P_RS_WRITE_ACK : P_WRITE_ACK;
1555 ok &= drbd_send_ack(mdev, pcmd, e);
1556 if (pcmd == P_RS_WRITE_ACK)
1557 drbd_set_in_sync(mdev, sector, e->size);
1559 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1560 /* we expect it to be marked out of sync anyways...
1561 * maybe assert this? */
1565 /* we delete from the conflict detection hash _after_ we sent out the
1566 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1567 if (mdev->net_conf->two_primaries) {
1568 spin_lock_irq(&mdev->req_lock);
1569 D_ASSERT(!hlist_unhashed(&e->colision));
1570 hlist_del_init(&e->colision);
1571 spin_unlock_irq(&mdev->req_lock);
1573 D_ASSERT(hlist_unhashed(&e->colision));
1576 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1581 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1583 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1586 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1587 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1589 spin_lock_irq(&mdev->req_lock);
1590 D_ASSERT(!hlist_unhashed(&e->colision));
1591 hlist_del_init(&e->colision);
1592 spin_unlock_irq(&mdev->req_lock);
1599 /* Called from receive_Data.
1600 * Synchronize packets on sock with packets on msock.
1602 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1603 * packet traveling on msock, they are still processed in the order they have
1606 * Note: we don't care for Ack packets overtaking P_DATA packets.
1608 * In case packet_seq is larger than mdev->peer_seq number, there are
1609 * outstanding packets on the msock. We wait for them to arrive.
1610 * In case we are the logically next packet, we update mdev->peer_seq
1611 * ourselves. Correctly handles 32bit wrap around.
1613 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1614 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1615 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1616 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1618 * returns 0 if we may process the packet,
1619 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1620 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1626 spin_lock(&mdev->peer_seq_lock);
1628 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1629 if (seq_le(packet_seq, mdev->peer_seq+1))
1631 if (signal_pending(current)) {
1635 p_seq = mdev->peer_seq;
1636 spin_unlock(&mdev->peer_seq_lock);
1637 timeout = schedule_timeout(30*HZ);
1638 spin_lock(&mdev->peer_seq_lock);
1639 if (timeout == 0 && p_seq == mdev->peer_seq) {
1641 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1645 finish_wait(&mdev->seq_wait, &wait);
1646 if (mdev->peer_seq+1 == packet_seq)
1648 spin_unlock(&mdev->peer_seq_lock);
1652 /* mirrored write */
1653 static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1656 struct drbd_epoch_entry *e;
1657 struct p_data *p = (struct p_data *)h;
1658 int header_size, data_size;
1662 header_size = sizeof(*p) - sizeof(*h);
1663 data_size = h->length - header_size;
1665 ERR_IF(data_size == 0) return FALSE;
1667 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1670 if (!get_ldev(mdev)) {
1671 if (__ratelimit(&drbd_ratelimit_state))
1672 dev_err(DEV, "Can not write mirrored data block "
1673 "to local disk.\n");
1674 spin_lock(&mdev->peer_seq_lock);
1675 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1677 spin_unlock(&mdev->peer_seq_lock);
1679 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1680 atomic_inc(&mdev->current_epoch->epoch_size);
1681 return drbd_drain_block(mdev, data_size);
1684 /* get_ldev(mdev) successful.
1685 * Corresponding put_ldev done either below (on various errors),
1686 * or in drbd_endio_write_sec, if we successfully submit the data at
1687 * the end of this function. */
1689 sector = be64_to_cpu(p->sector);
1690 e = read_in_block(mdev, p->block_id, sector, data_size);
1696 e->private_bio->bi_end_io = drbd_endio_write_sec;
1697 e->w.cb = e_end_block;
1699 spin_lock(&mdev->epoch_lock);
1700 e->epoch = mdev->current_epoch;
1701 atomic_inc(&e->epoch->epoch_size);
1702 atomic_inc(&e->epoch->active);
1704 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1705 struct drbd_epoch *epoch;
1706 /* Issue a barrier if we start a new epoch, and the previous epoch
1707 was not a epoch containing a single request which already was
1709 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1710 if (epoch == e->epoch) {
1711 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1712 rw |= (1<<BIO_RW_BARRIER);
1713 e->flags |= EE_IS_BARRIER;
1715 if (atomic_read(&epoch->epoch_size) > 1 ||
1716 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1717 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1718 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1719 rw |= (1<<BIO_RW_BARRIER);
1720 e->flags |= EE_IS_BARRIER;
1724 spin_unlock(&mdev->epoch_lock);
1726 dp_flags = be32_to_cpu(p->dp_flags);
1727 if (dp_flags & DP_HARDBARRIER) {
1728 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1729 /* rw |= (1<<BIO_RW_BARRIER); */
1731 if (dp_flags & DP_RW_SYNC)
1732 rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
1733 if (dp_flags & DP_MAY_SET_IN_SYNC)
1734 e->flags |= EE_MAY_SET_IN_SYNC;
1736 /* I'm the receiver, I do hold a net_cnt reference. */
1737 if (!mdev->net_conf->two_primaries) {
1738 spin_lock_irq(&mdev->req_lock);
1740 /* don't get the req_lock yet,
1741 * we may sleep in drbd_wait_peer_seq */
1742 const int size = e->size;
1743 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1745 struct drbd_request *i;
1746 struct hlist_node *n;
1747 struct hlist_head *slot;
1750 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1751 BUG_ON(mdev->ee_hash == NULL);
1752 BUG_ON(mdev->tl_hash == NULL);
1754 /* conflict detection and handling:
1755 * 1. wait on the sequence number,
1756 * in case this data packet overtook ACK packets.
1757 * 2. check our hash tables for conflicting requests.
1758 * we only need to walk the tl_hash, since an ee can not
1759 * have a conflict with an other ee: on the submitting
1760 * node, the corresponding req had already been conflicting,
1761 * and a conflicting req is never sent.
1763 * Note: for two_primaries, we are protocol C,
1764 * so there cannot be any request that is DONE
1765 * but still on the transfer log.
1767 * unconditionally add to the ee_hash.
1769 * if no conflicting request is found:
1772 * if any conflicting request is found
1773 * that has not yet been acked,
1774 * AND I have the "discard concurrent writes" flag:
1775 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1777 * if any conflicting request is found:
1778 * block the receiver, waiting on misc_wait
1779 * until no more conflicting requests are there,
1780 * or we get interrupted (disconnect).
1782 * we do not just write after local io completion of those
1783 * requests, but only after req is done completely, i.e.
1784 * we wait for the P_DISCARD_ACK to arrive!
1786 * then proceed normally, i.e. submit.
1788 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1789 goto out_interrupted;
1791 spin_lock_irq(&mdev->req_lock);
1793 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1795 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1796 slot = tl_hash_slot(mdev, sector);
1799 int have_unacked = 0;
1800 int have_conflict = 0;
1801 prepare_to_wait(&mdev->misc_wait, &wait,
1802 TASK_INTERRUPTIBLE);
1803 hlist_for_each_entry(i, n, slot, colision) {
1805 /* only ALERT on first iteration,
1806 * we may be woken up early... */
1808 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1809 " new: %llus +%u; pending: %llus +%u\n",
1810 current->comm, current->pid,
1811 (unsigned long long)sector, size,
1812 (unsigned long long)i->sector, i->size);
1813 if (i->rq_state & RQ_NET_PENDING)
1822 /* Discard Ack only for the _first_ iteration */
1823 if (first && discard && have_unacked) {
1824 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1825 (unsigned long long)sector);
1827 e->w.cb = e_send_discard_ack;
1828 list_add_tail(&e->w.list, &mdev->done_ee);
1830 spin_unlock_irq(&mdev->req_lock);
1832 /* we could probably send that P_DISCARD_ACK ourselves,
1833 * but I don't like the receiver using the msock */
1837 finish_wait(&mdev->misc_wait, &wait);
1841 if (signal_pending(current)) {
1842 hlist_del_init(&e->colision);
1844 spin_unlock_irq(&mdev->req_lock);
1846 finish_wait(&mdev->misc_wait, &wait);
1847 goto out_interrupted;
1850 spin_unlock_irq(&mdev->req_lock);
1853 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1854 "sec=%llus\n", (unsigned long long)sector);
1855 } else if (discard) {
1856 /* we had none on the first iteration.
1857 * there must be none now. */
1858 D_ASSERT(have_unacked == 0);
1861 spin_lock_irq(&mdev->req_lock);
1863 finish_wait(&mdev->misc_wait, &wait);
1866 list_add(&e->w.list, &mdev->active_ee);
1867 spin_unlock_irq(&mdev->req_lock);
1869 switch (mdev->net_conf->wire_protocol) {
1872 /* corresponding dec_unacked() in e_end_block()
1873 * respective _drbd_clear_done_ee */
1876 /* I really don't like it that the receiver thread
1877 * sends on the msock, but anyways */
1878 drbd_send_ack(mdev, P_RECV_ACK, e);
1885 if (mdev->state.pdsk == D_DISKLESS) {
1886 /* In case we have the only disk of the cluster, */
1887 drbd_set_out_of_sync(mdev, e->sector, e->size);
1888 e->flags |= EE_CALL_AL_COMPLETE_IO;
1889 drbd_al_begin_io(mdev, e->sector);
1892 e->private_bio->bi_rw = rw;
1893 drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
1894 /* accounting done in endio */
1896 maybe_kick_lo(mdev);
1900 /* yes, the epoch_size now is imbalanced.
1901 * but we drop the connection anyways, so we don't have a chance to
1902 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1904 drbd_free_ee(mdev, e);
1908 static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
1911 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1912 struct drbd_epoch_entry *e;
1913 struct digest_info *di = NULL;
1914 int size, digest_size;
1915 unsigned int fault_type;
1916 struct p_block_req *p =
1917 (struct p_block_req *)h;
1918 const int brps = sizeof(*p)-sizeof(*h);
1920 if (drbd_recv(mdev, h->payload, brps) != brps)
1923 sector = be64_to_cpu(p->sector);
1924 size = be32_to_cpu(p->blksize);
1926 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1927 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1928 (unsigned long long)sector, size);
1931 if (sector + (size>>9) > capacity) {
1932 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1933 (unsigned long long)sector, size);
1937 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1938 if (__ratelimit(&drbd_ratelimit_state))
1939 dev_err(DEV, "Can not satisfy peer's read request, "
1940 "no local data.\n");
1941 drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
1942 P_NEG_RS_DREPLY , p);
1946 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1947 * "criss-cross" setup, that might cause write-out on some other DRBD,
1948 * which in turn might block on the other node at this very place. */
1949 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1955 e->private_bio->bi_rw = READ;
1956 e->private_bio->bi_end_io = drbd_endio_read_sec;
1958 switch (h->command) {
1959 case P_DATA_REQUEST:
1960 e->w.cb = w_e_end_data_req;
1961 fault_type = DRBD_FAULT_DT_RD;
1963 case P_RS_DATA_REQUEST:
1964 e->w.cb = w_e_end_rsdata_req;
1965 fault_type = DRBD_FAULT_RS_RD;
1966 /* Eventually this should become asynchronously. Currently it
1967 * blocks the whole receiver just to delay the reading of a
1968 * resync data block.
1969 * the drbd_work_queue mechanism is made for this...
1971 if (!drbd_rs_begin_io(mdev, sector)) {
1972 /* we have been interrupted,
1973 * probably connection lost! */
1974 D_ASSERT(signal_pending(current));
1980 case P_CSUM_RS_REQUEST:
1981 fault_type = DRBD_FAULT_RS_RD;
1982 digest_size = h->length - brps ;
1983 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
1987 di->digest_size = digest_size;
1988 di->digest = (((char *)di)+sizeof(struct digest_info));
1990 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
1993 e->block_id = (u64)(unsigned long)di;
1994 if (h->command == P_CSUM_RS_REQUEST) {
1995 D_ASSERT(mdev->agreed_pro_version >= 89);
1996 e->w.cb = w_e_end_csum_rs_req;
1997 } else if (h->command == P_OV_REPLY) {
1998 e->w.cb = w_e_end_ov_reply;
1999 dec_rs_pending(mdev);
2003 if (!drbd_rs_begin_io(mdev, sector)) {
2004 /* we have been interrupted, probably connection lost! */
2005 D_ASSERT(signal_pending(current));
2011 if (mdev->state.conn >= C_CONNECTED &&
2012 mdev->state.conn != C_VERIFY_T)
2013 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2014 drbd_conn_str(mdev->state.conn));
2015 if (mdev->ov_start_sector == ~(sector_t)0 &&
2016 mdev->agreed_pro_version >= 90) {
2017 mdev->ov_start_sector = sector;
2018 mdev->ov_position = sector;
2019 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2020 dev_info(DEV, "Online Verify start sector: %llu\n",
2021 (unsigned long long)sector);
2023 e->w.cb = w_e_end_ov_req;
2024 fault_type = DRBD_FAULT_RS_RD;
2025 /* Eventually this should become asynchronous. Currently it
2026 * blocks the whole receiver just to delay the reading of a
2027 * resync data block.
2028 * the drbd_work_queue mechanism is made for this...
2030 if (!drbd_rs_begin_io(mdev, sector)) {
2031 /* we have been interrupted,
2032 * probably connection lost! */
2033 D_ASSERT(signal_pending(current));
2040 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2041 cmdname(h->command));
2042 fault_type = DRBD_FAULT_MAX;
2045 spin_lock_irq(&mdev->req_lock);
2046 list_add(&e->w.list, &mdev->read_ee);
2047 spin_unlock_irq(&mdev->req_lock);
2051 drbd_generic_make_request(mdev, fault_type, e->private_bio);
2052 maybe_kick_lo(mdev);
2059 drbd_free_ee(mdev, e);
2063 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2065 int self, peer, rv = -100;
2066 unsigned long ch_self, ch_peer;
2068 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2069 peer = mdev->p_uuid[UI_BITMAP] & 1;
2071 ch_peer = mdev->p_uuid[UI_SIZE];
2072 ch_self = mdev->comm_bm_set;
2074 switch (mdev->net_conf->after_sb_0p) {
2076 case ASB_DISCARD_SECONDARY:
2077 case ASB_CALL_HELPER:
2078 dev_err(DEV, "Configuration error.\n");
2080 case ASB_DISCONNECT:
2082 case ASB_DISCARD_YOUNGER_PRI:
2083 if (self == 0 && peer == 1) {
2087 if (self == 1 && peer == 0) {
2091 /* Else fall through to one of the other strategies... */
2092 case ASB_DISCARD_OLDER_PRI:
2093 if (self == 0 && peer == 1) {
2097 if (self == 1 && peer == 0) {
2101 /* Else fall through to one of the other strategies... */
2102 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2103 "Using discard-least-changes instead\n");
2104 case ASB_DISCARD_ZERO_CHG:
2105 if (ch_peer == 0 && ch_self == 0) {
2106 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2110 if (ch_peer == 0) { rv = 1; break; }
2111 if (ch_self == 0) { rv = -1; break; }
2113 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2115 case ASB_DISCARD_LEAST_CHG:
2116 if (ch_self < ch_peer)
2118 else if (ch_self > ch_peer)
2120 else /* ( ch_self == ch_peer ) */
2121 /* Well, then use something else. */
2122 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2125 case ASB_DISCARD_LOCAL:
2128 case ASB_DISCARD_REMOTE:
2135 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2137 int self, peer, hg, rv = -100;
2139 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2140 peer = mdev->p_uuid[UI_BITMAP] & 1;
2142 switch (mdev->net_conf->after_sb_1p) {
2143 case ASB_DISCARD_YOUNGER_PRI:
2144 case ASB_DISCARD_OLDER_PRI:
2145 case ASB_DISCARD_LEAST_CHG:
2146 case ASB_DISCARD_LOCAL:
2147 case ASB_DISCARD_REMOTE:
2148 dev_err(DEV, "Configuration error.\n");
2150 case ASB_DISCONNECT:
2153 hg = drbd_asb_recover_0p(mdev);
2154 if (hg == -1 && mdev->state.role == R_SECONDARY)
2156 if (hg == 1 && mdev->state.role == R_PRIMARY)
2160 rv = drbd_asb_recover_0p(mdev);
2162 case ASB_DISCARD_SECONDARY:
2163 return mdev->state.role == R_PRIMARY ? 1 : -1;
2164 case ASB_CALL_HELPER:
2165 hg = drbd_asb_recover_0p(mdev);
2166 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2167 self = drbd_set_role(mdev, R_SECONDARY, 0);
2168 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2169 * we might be here in C_WF_REPORT_PARAMS which is transient.
2170 * we do not need to wait for the after state change work either. */
2171 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2172 if (self != SS_SUCCESS) {
2173 drbd_khelper(mdev, "pri-lost-after-sb");
2175 dev_warn(DEV, "Successfully gave up primary role.\n");
2185 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2187 int self, peer, hg, rv = -100;
2189 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2190 peer = mdev->p_uuid[UI_BITMAP] & 1;
2192 switch (mdev->net_conf->after_sb_2p) {
2193 case ASB_DISCARD_YOUNGER_PRI:
2194 case ASB_DISCARD_OLDER_PRI:
2195 case ASB_DISCARD_LEAST_CHG:
2196 case ASB_DISCARD_LOCAL:
2197 case ASB_DISCARD_REMOTE:
2199 case ASB_DISCARD_SECONDARY:
2200 dev_err(DEV, "Configuration error.\n");
2203 rv = drbd_asb_recover_0p(mdev);
2205 case ASB_DISCONNECT:
2207 case ASB_CALL_HELPER:
2208 hg = drbd_asb_recover_0p(mdev);
2210 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2211 * we might be here in C_WF_REPORT_PARAMS which is transient.
2212 * we do not need to wait for the after state change work either. */
2213 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2214 if (self != SS_SUCCESS) {
2215 drbd_khelper(mdev, "pri-lost-after-sb");
2217 dev_warn(DEV, "Successfully gave up primary role.\n");
2227 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2228 u64 bits, u64 flags)
2231 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2234 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2236 (unsigned long long)uuid[UI_CURRENT],
2237 (unsigned long long)uuid[UI_BITMAP],
2238 (unsigned long long)uuid[UI_HISTORY_START],
2239 (unsigned long long)uuid[UI_HISTORY_END],
2240 (unsigned long long)bits,
2241 (unsigned long long)flags);
2245 100 after split brain try auto recover
2246 2 C_SYNC_SOURCE set BitMap
2247 1 C_SYNC_SOURCE use BitMap
2249 -1 C_SYNC_TARGET use BitMap
2250 -2 C_SYNC_TARGET set BitMap
2251 -100 after split brain, disconnect
2252 -1000 unrelated data
2254 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2259 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2260 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2263 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2267 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2268 peer != UUID_JUST_CREATED)
2272 if (self != UUID_JUST_CREATED &&
2273 (peer == UUID_JUST_CREATED || peer == (u64)0))
2277 int rct, dc; /* roles at crash time */
2279 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2281 if (mdev->agreed_pro_version < 91)
2284 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2285 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2286 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2287 drbd_uuid_set_bm(mdev, 0UL);
2289 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2290 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2293 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2300 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2302 if (mdev->agreed_pro_version < 91)
2305 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2306 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2307 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2309 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2310 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2311 mdev->p_uuid[UI_BITMAP] = 0UL;
2313 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2316 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2323 /* Common power [off|failure] */
2324 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2325 (mdev->p_uuid[UI_FLAGS] & 2);
2326 /* lowest bit is set when we were primary,
2327 * next bit (weight 2) is set when peer was primary */
2331 case 0: /* !self_pri && !peer_pri */ return 0;
2332 case 1: /* self_pri && !peer_pri */ return 1;
2333 case 2: /* !self_pri && peer_pri */ return -1;
2334 case 3: /* self_pri && peer_pri */
2335 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2341 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2346 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2348 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2349 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2351 /* The last P_SYNC_UUID did not get though. Undo the last start of
2352 resync as sync source modifications of the peer's UUIDs. */
2354 if (mdev->agreed_pro_version < 91)
2357 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2358 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2364 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2365 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2366 peer = mdev->p_uuid[i] & ~((u64)1);
2372 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2373 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2378 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2380 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2381 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2383 /* The last P_SYNC_UUID did not get though. Undo the last start of
2384 resync as sync source modifications of our UUIDs. */
2386 if (mdev->agreed_pro_version < 91)
2389 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2390 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2392 dev_info(DEV, "Undid last start of resync:\n");
2394 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2395 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2403 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2404 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2405 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2411 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2412 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2413 if (self == peer && self != ((u64)0))
2417 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2418 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2419 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2420 peer = mdev->p_uuid[j] & ~((u64)1);
2429 /* drbd_sync_handshake() returns the new conn state on success, or
2430 CONN_MASK (-1) on failure.
2432 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2433 enum drbd_disk_state peer_disk) __must_hold(local)
2436 enum drbd_conns rv = C_MASK;
2437 enum drbd_disk_state mydisk;
2439 mydisk = mdev->state.disk;
2440 if (mydisk == D_NEGOTIATING)
2441 mydisk = mdev->new_state_tmp.disk;
2443 dev_info(DEV, "drbd_sync_handshake:\n");
2444 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2445 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2446 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2448 hg = drbd_uuid_compare(mdev, &rule_nr);
2450 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2453 dev_alert(DEV, "Unrelated data, aborting!\n");
2457 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2461 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2462 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2463 int f = (hg == -100) || abs(hg) == 2;
2464 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2467 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2468 hg > 0 ? "source" : "target");
2471 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2472 int pcount = (mdev->state.role == R_PRIMARY)
2473 + (peer_role == R_PRIMARY);
2474 int forced = (hg == -100);
2478 hg = drbd_asb_recover_0p(mdev);
2481 hg = drbd_asb_recover_1p(mdev);
2484 hg = drbd_asb_recover_2p(mdev);
2487 if (abs(hg) < 100) {
2488 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2489 "automatically solved. Sync from %s node\n",
2490 pcount, (hg < 0) ? "peer" : "this");
2492 dev_warn(DEV, "Doing a full sync, since"
2493 " UUIDs where ambiguous.\n");
2500 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2502 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2506 dev_warn(DEV, "Split-Brain detected, manually solved. "
2507 "Sync from %s node\n",
2508 (hg < 0) ? "peer" : "this");
2512 dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
2513 drbd_khelper(mdev, "split-brain");
2517 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2518 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2522 if (hg < 0 && /* by intention we do not use mydisk here. */
2523 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2524 switch (mdev->net_conf->rr_conflict) {
2525 case ASB_CALL_HELPER:
2526 drbd_khelper(mdev, "pri-lost");
2528 case ASB_DISCONNECT:
2529 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2532 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2538 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2539 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2543 if (hg > 0) { /* become sync source. */
2545 } else if (hg < 0) { /* become sync target */
2549 if (drbd_bm_total_weight(mdev)) {
2550 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2551 drbd_bm_total_weight(mdev));
2558 /* returns 1 if invalid */
2559 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2561 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2562 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2563 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2566 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2567 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2568 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2571 /* everything else is valid if they are equal on both sides. */
2575 /* everything es is invalid. */
2579 static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2581 struct p_protocol *p = (struct p_protocol *)h;
2582 int header_size, data_size;
2583 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2584 int p_want_lose, p_two_primaries;
2585 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2587 header_size = sizeof(*p) - sizeof(*h);
2588 data_size = h->length - header_size;
2590 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2593 p_proto = be32_to_cpu(p->protocol);
2594 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2595 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2596 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2597 p_want_lose = be32_to_cpu(p->want_lose);
2598 p_two_primaries = be32_to_cpu(p->two_primaries);
2600 if (p_proto != mdev->net_conf->wire_protocol) {
2601 dev_err(DEV, "incompatible communication protocols\n");
2605 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2606 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2610 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2611 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2615 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2616 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2620 if (p_want_lose && mdev->net_conf->want_lose) {
2621 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2625 if (p_two_primaries != mdev->net_conf->two_primaries) {
2626 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2630 if (mdev->agreed_pro_version >= 87) {
2631 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2633 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2636 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2637 if (strcmp(p_integrity_alg, my_alg)) {
2638 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2641 dev_info(DEV, "data-integrity-alg: %s\n",
2642 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2648 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2653 * input: alg name, feature name
2654 * return: NULL (alg name was "")
2655 * ERR_PTR(error) if something goes wrong
2656 * or the crypto hash ptr, if it worked out ok. */
2657 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2658 const char *alg, const char *name)
2660 struct crypto_hash *tfm;
2665 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2667 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2668 alg, name, PTR_ERR(tfm));
2671 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2672 crypto_free_hash(tfm);
2673 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2674 return ERR_PTR(-EINVAL);
2679 static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2682 struct p_rs_param_89 *p = (struct p_rs_param_89 *)h;
2683 unsigned int header_size, data_size, exp_max_sz;
2684 struct crypto_hash *verify_tfm = NULL;
2685 struct crypto_hash *csums_tfm = NULL;
2686 const int apv = mdev->agreed_pro_version;
2688 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2689 : apv == 88 ? sizeof(struct p_rs_param)
2691 : /* 89 */ sizeof(struct p_rs_param_89);
2693 if (h->length > exp_max_sz) {
2694 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2695 h->length, exp_max_sz);
2700 header_size = sizeof(struct p_rs_param) - sizeof(*h);
2701 data_size = h->length - header_size;
2702 } else /* apv >= 89 */ {
2703 header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
2704 data_size = h->length - header_size;
2705 D_ASSERT(data_size == 0);
2708 /* initialize verify_alg and csums_alg */
2709 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2711 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2714 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2718 if (data_size > SHARED_SECRET_MAX) {
2719 dev_err(DEV, "verify-alg too long, "
2720 "peer wants %u, accepting only %u byte\n",
2721 data_size, SHARED_SECRET_MAX);
2725 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2728 /* we expect NUL terminated string */
2729 /* but just in case someone tries to be evil */
2730 D_ASSERT(p->verify_alg[data_size-1] == 0);
2731 p->verify_alg[data_size-1] = 0;
2733 } else /* apv >= 89 */ {
2734 /* we still expect NUL terminated strings */
2735 /* but just in case someone tries to be evil */
2736 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2737 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2738 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2739 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2742 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2743 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2744 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2745 mdev->sync_conf.verify_alg, p->verify_alg);
2748 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2749 p->verify_alg, "verify-alg");
2750 if (IS_ERR(verify_tfm)) {
2756 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2757 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2758 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2759 mdev->sync_conf.csums_alg, p->csums_alg);
2762 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2763 p->csums_alg, "csums-alg");
2764 if (IS_ERR(csums_tfm)) {
2771 spin_lock(&mdev->peer_seq_lock);
2772 /* lock against drbd_nl_syncer_conf() */
2774 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2775 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2776 crypto_free_hash(mdev->verify_tfm);
2777 mdev->verify_tfm = verify_tfm;
2778 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2781 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2782 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2783 crypto_free_hash(mdev->csums_tfm);
2784 mdev->csums_tfm = csums_tfm;
2785 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2787 spin_unlock(&mdev->peer_seq_lock);
2792 /* just for completeness: actually not needed,
2793 * as this is not reached if csums_tfm was ok. */
2794 crypto_free_hash(csums_tfm);
2795 /* but free the verify_tfm again, if csums_tfm did not work out */
2796 crypto_free_hash(verify_tfm);
2797 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2801 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2803 /* sorry, we currently have no working implementation
2804 * of distributed TCQ */
2807 /* warn if the arguments differ by more than 12.5% */
2808 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2809 const char *s, sector_t a, sector_t b)
2812 if (a == 0 || b == 0)
2814 d = (a > b) ? (a - b) : (b - a);
2815 if (d > (a>>3) || d > (b>>3))
2816 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2817 (unsigned long long)a, (unsigned long long)b);
2820 static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2822 struct p_sizes *p = (struct p_sizes *)h;
2823 enum determine_dev_size dd = unchanged;
2824 unsigned int max_seg_s;
2825 sector_t p_size, p_usize, my_usize;
2826 int ldsc = 0; /* local disk size changed */
2827 enum drbd_conns nconn;
2829 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2830 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2833 p_size = be64_to_cpu(p->d_size);
2834 p_usize = be64_to_cpu(p->u_size);
2836 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2837 dev_err(DEV, "some backing storage is needed\n");
2838 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2842 /* just store the peer's disk size for now.
2843 * we still need to figure out whether we accept that. */
2844 mdev->p_size = p_size;
2846 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2847 if (get_ldev(mdev)) {
2848 warn_if_differ_considerably(mdev, "lower level device sizes",
2849 p_size, drbd_get_max_capacity(mdev->ldev));
2850 warn_if_differ_considerably(mdev, "user requested size",
2851 p_usize, mdev->ldev->dc.disk_size);
2853 /* if this is the first connect, or an otherwise expected
2854 * param exchange, choose the minimum */
2855 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2856 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2859 my_usize = mdev->ldev->dc.disk_size;
2861 if (mdev->ldev->dc.disk_size != p_usize) {
2862 mdev->ldev->dc.disk_size = p_usize;
2863 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2864 (unsigned long)mdev->ldev->dc.disk_size);
2867 /* Never shrink a device with usable data during connect.
2868 But allow online shrinking if we are connected. */
2869 if (drbd_new_dev_size(mdev, mdev->ldev) <
2870 drbd_get_capacity(mdev->this_bdev) &&
2871 mdev->state.disk >= D_OUTDATED &&
2872 mdev->state.conn < C_CONNECTED) {
2873 dev_err(DEV, "The peer's disk size is too small!\n");
2874 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2875 mdev->ldev->dc.disk_size = my_usize;
2883 if (get_ldev(mdev)) {
2884 dd = drbd_determin_dev_size(mdev);
2886 if (dd == dev_size_error)
2890 /* I am diskless, need to accept the peer's size. */
2891 drbd_set_my_capacity(mdev, p_size);
2894 if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
2895 nconn = drbd_sync_handshake(mdev,
2896 mdev->state.peer, mdev->state.pdsk);
2899 if (nconn == C_MASK) {
2900 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2904 if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
2905 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2910 if (get_ldev(mdev)) {
2911 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2912 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2916 max_seg_s = be32_to_cpu(p->max_segment_size);
2917 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
2918 drbd_setup_queue_param(mdev, max_seg_s);
2920 drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
2924 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
2925 if (be64_to_cpu(p->c_size) !=
2926 drbd_get_capacity(mdev->this_bdev) || ldsc) {
2927 /* we have different sizes, probably peer
2928 * needs to know my new size... */
2929 drbd_send_sizes(mdev, 0);
2931 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
2932 (dd == grew && mdev->state.conn == C_CONNECTED)) {
2933 if (mdev->state.pdsk >= D_INCONSISTENT &&
2934 mdev->state.disk >= D_INCONSISTENT)
2935 resync_after_online_grow(mdev);
2937 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
2944 static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
2946 struct p_uuids *p = (struct p_uuids *)h;
2950 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2951 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2954 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
2956 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
2957 p_uuid[i] = be64_to_cpu(p->uuid[i]);
2959 kfree(mdev->p_uuid);
2960 mdev->p_uuid = p_uuid;
2962 if (mdev->state.conn < C_CONNECTED &&
2963 mdev->state.disk < D_INCONSISTENT &&
2964 mdev->state.role == R_PRIMARY &&
2965 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
2966 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
2967 (unsigned long long)mdev->ed_uuid);
2968 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2972 if (get_ldev(mdev)) {
2973 int skip_initial_sync =
2974 mdev->state.conn == C_CONNECTED &&
2975 mdev->agreed_pro_version >= 90 &&
2976 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
2977 (p_uuid[UI_FLAGS] & 8);
2978 if (skip_initial_sync) {
2979 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
2980 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2981 "clear_n_write from receive_uuids");
2982 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
2983 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2984 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2991 /* Before we test for the disk state, we should wait until an eventually
2992 ongoing cluster wide state change is finished. That is important if
2993 we are primary and are detaching from our disk. We need to see the
2994 new disk state... */
2995 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
2996 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
2997 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3003 * convert_state() - Converts the peer's view of the cluster state to our point of view
3004 * @ps: The state as seen by the peer.
3006 static union drbd_state convert_state(union drbd_state ps)
3008 union drbd_state ms;
3010 static enum drbd_conns c_tab[] = {
3011 [C_CONNECTED] = C_CONNECTED,
3013 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3014 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3015 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3016 [C_VERIFY_S] = C_VERIFY_T,
3022 ms.conn = c_tab[ps.conn];
3027 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3032 static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3034 struct p_req_state *p = (struct p_req_state *)h;
3035 union drbd_state mask, val;
3038 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3039 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3042 mask.i = be32_to_cpu(p->mask);
3043 val.i = be32_to_cpu(p->val);
3045 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3046 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3047 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3051 mask = convert_state(mask);
3052 val = convert_state(val);
3054 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3056 drbd_send_sr_reply(mdev, rv);
3062 static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3064 struct p_state *p = (struct p_state *)h;
3065 enum drbd_conns nconn, oconn;
3066 union drbd_state ns, peer_state;
3067 enum drbd_disk_state real_peer_disk;
3070 ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3073 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3076 peer_state.i = be32_to_cpu(p->state);
3078 real_peer_disk = peer_state.disk;
3079 if (peer_state.disk == D_NEGOTIATING) {
3080 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3081 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3084 spin_lock_irq(&mdev->req_lock);
3086 oconn = nconn = mdev->state.conn;
3087 spin_unlock_irq(&mdev->req_lock);
3089 if (nconn == C_WF_REPORT_PARAMS)
3090 nconn = C_CONNECTED;
3092 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3093 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3094 int cr; /* consider resync */
3096 /* if we established a new connection */
3097 cr = (oconn < C_CONNECTED);
3098 /* if we had an established connection
3099 * and one of the nodes newly attaches a disk */
3100 cr |= (oconn == C_CONNECTED &&
3101 (peer_state.disk == D_NEGOTIATING ||
3102 mdev->state.disk == D_NEGOTIATING));
3103 /* if we have both been inconsistent, and the peer has been
3104 * forced to be UpToDate with --overwrite-data */
3105 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3106 /* if we had been plain connected, and the admin requested to
3107 * start a sync by "invalidate" or "invalidate-remote" */
3108 cr |= (oconn == C_CONNECTED &&
3109 (peer_state.conn >= C_STARTING_SYNC_S &&
3110 peer_state.conn <= C_WF_BITMAP_T));
3113 nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3116 if (nconn == C_MASK) {
3117 if (mdev->state.disk == D_NEGOTIATING) {
3118 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3119 nconn = C_CONNECTED;
3120 } else if (peer_state.disk == D_NEGOTIATING) {
3121 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3122 peer_state.disk = D_DISKLESS;
3124 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3125 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3131 spin_lock_irq(&mdev->req_lock);
3132 if (mdev->state.conn != oconn)
3134 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3135 ns.i = mdev->state.i;
3137 ns.peer = peer_state.role;
3138 ns.pdsk = real_peer_disk;
3139 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3140 if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3141 ns.disk = mdev->new_state_tmp.disk;
3143 rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL);
3145 spin_unlock_irq(&mdev->req_lock);
3147 if (rv < SS_SUCCESS) {
3148 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3152 if (oconn > C_WF_REPORT_PARAMS) {
3153 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3154 peer_state.disk != D_NEGOTIATING ) {
3155 /* we want resync, peer has not yet decided to sync... */
3156 /* Nowadays only used when forcing a node into primary role and
3157 setting its disk to UpToDate with that */
3158 drbd_send_uuids(mdev);
3159 drbd_send_state(mdev);
3163 mdev->net_conf->want_lose = 0;
3165 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3170 static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3172 struct p_rs_uuid *p = (struct p_rs_uuid *)h;
3174 wait_event(mdev->misc_wait,
3175 mdev->state.conn == C_WF_SYNC_UUID ||
3176 mdev->state.conn < C_CONNECTED ||
3177 mdev->state.disk < D_NEGOTIATING);
3179 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3181 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3182 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3185 /* Here the _drbd_uuid_ functions are right, current should
3186 _not_ be rotated into the history */
3187 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3188 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3189 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3191 drbd_start_resync(mdev, C_SYNC_TARGET);
3195 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3200 enum receive_bitmap_ret { OK, DONE, FAILED };
3202 static enum receive_bitmap_ret
3203 receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
3204 unsigned long *buffer, struct bm_xfer_ctx *c)
3206 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3207 unsigned want = num_words * sizeof(long);
3209 if (want != h->length) {
3210 dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length);
3215 if (drbd_recv(mdev, buffer, want) != want)
3218 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3220 c->word_offset += num_words;
3221 c->bit_offset = c->word_offset * BITS_PER_LONG;
3222 if (c->bit_offset > c->bm_bits)
3223 c->bit_offset = c->bm_bits;
3228 static enum receive_bitmap_ret
3229 recv_bm_rle_bits(struct drbd_conf *mdev,
3230 struct p_compressed_bm *p,
3231 struct bm_xfer_ctx *c)
3233 struct bitstream bs;
3237 unsigned long s = c->bit_offset;
3239 int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3240 int toggle = DCBP_get_start(p);
3244 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3246 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3250 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3251 bits = vli_decode_bits(&rl, look_ahead);
3257 if (e >= c->bm_bits) {
3258 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3261 _drbd_bm_set_bits(mdev, s, e);
3265 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3266 have, bits, look_ahead,
3267 (unsigned int)(bs.cur.b - p->code),
3268 (unsigned int)bs.buf_len);
3271 look_ahead >>= bits;
3274 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3277 look_ahead |= tmp << have;
3282 bm_xfer_ctx_bit_to_word_offset(c);
3284 return (s == c->bm_bits) ? DONE : OK;
3287 static enum receive_bitmap_ret
3288 decode_bitmap_c(struct drbd_conf *mdev,
3289 struct p_compressed_bm *p,
3290 struct bm_xfer_ctx *c)
3292 if (DCBP_get_code(p) == RLE_VLI_Bits)
3293 return recv_bm_rle_bits(mdev, p, c);
3295 /* other variants had been implemented for evaluation,
3296 * but have been dropped as this one turned out to be "best"
3297 * during all our tests. */
3299 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3300 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3304 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3305 const char *direction, struct bm_xfer_ctx *c)
3307 /* what would it take to transfer it "plaintext" */
3308 unsigned plain = sizeof(struct p_header) *
3309 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3310 + c->bm_words * sizeof(long);
3311 unsigned total = c->bytes[0] + c->bytes[1];
3314 /* total can not be zero. but just in case: */
3318 /* don't report if not compressed */
3322 /* total < plain. check for overflow, still */
3323 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3324 : (1000 * total / plain);
3330 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3331 "total %u; compression: %u.%u%%\n",
3333 c->bytes[1], c->packets[1],
3334 c->bytes[0], c->packets[0],
3335 total, r/10, r % 10);
3338 /* Since we are processing the bitfield from lower addresses to higher,
3339 it does not matter if the process it in 32 bit chunks or 64 bit
3340 chunks as long as it is little endian. (Understand it as byte stream,
3341 beginning with the lowest byte...) If we would use big endian
3342 we would need to process it from the highest address to the lowest,
3343 in order to be agnostic to the 32 vs 64 bits issue.
3345 returns 0 on failure, 1 if we successfully received it. */
3346 static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3348 struct bm_xfer_ctx c;
3350 enum receive_bitmap_ret ret;
3353 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3355 drbd_bm_lock(mdev, "receive bitmap");
3357 /* maybe we should use some per thread scratch page,
3358 * and allocate that during initial device creation? */
3359 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3361 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3365 c = (struct bm_xfer_ctx) {
3366 .bm_bits = drbd_bm_bits(mdev),
3367 .bm_words = drbd_bm_words(mdev),
3371 if (h->command == P_BITMAP) {
3372 ret = receive_bitmap_plain(mdev, h, buffer, &c);
3373 } else if (h->command == P_COMPRESSED_BITMAP) {
3374 /* MAYBE: sanity check that we speak proto >= 90,
3375 * and the feature is enabled! */
3376 struct p_compressed_bm *p;
3378 if (h->length > BM_PACKET_PAYLOAD_BYTES) {
3379 dev_err(DEV, "ReportCBitmap packet too large\n");
3382 /* use the page buff */
3384 memcpy(p, h, sizeof(*h));
3385 if (drbd_recv(mdev, p->head.payload, h->length) != h->length)
3387 if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3388 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3391 ret = decode_bitmap_c(mdev, p, &c);
3393 dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command);
3397 c.packets[h->command == P_BITMAP]++;
3398 c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
3403 if (!drbd_recv_header(mdev, h))
3405 } while (ret == OK);
3409 INFO_bm_xfer_stats(mdev, "receive", &c);
3411 if (mdev->state.conn == C_WF_BITMAP_T) {
3412 ok = !drbd_send_bitmap(mdev);
3415 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3416 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3417 D_ASSERT(ok == SS_SUCCESS);
3418 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3419 /* admin may have requested C_DISCONNECTING,
3420 * other threads may have noticed network errors */
3421 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3422 drbd_conn_str(mdev->state.conn));
3427 drbd_bm_unlock(mdev);
3428 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3429 drbd_start_resync(mdev, C_SYNC_SOURCE);
3430 free_page((unsigned long) buffer);
3434 static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
3436 /* TODO zero copy sink :) */
3437 static char sink[128];
3440 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3441 h->command, h->length);
3445 want = min_t(int, size, sizeof(sink));
3446 r = drbd_recv(mdev, sink, want);
3447 ERR_IF(r <= 0) break;
3453 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3455 if (mdev->state.disk >= D_INCONSISTENT)
3458 /* Make sure we've acked all the TCP data associated
3459 * with the data requests being unplugged */
3460 drbd_tcp_quickack(mdev->data.socket);
3465 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
3467 static drbd_cmd_handler_f drbd_default_handler[] = {
3468 [P_DATA] = receive_Data,
3469 [P_DATA_REPLY] = receive_DataReply,
3470 [P_RS_DATA_REPLY] = receive_RSDataReply,
3471 [P_BARRIER] = receive_Barrier,
3472 [P_BITMAP] = receive_bitmap,
3473 [P_COMPRESSED_BITMAP] = receive_bitmap,
3474 [P_UNPLUG_REMOTE] = receive_UnplugRemote,
3475 [P_DATA_REQUEST] = receive_DataRequest,
3476 [P_RS_DATA_REQUEST] = receive_DataRequest,
3477 [P_SYNC_PARAM] = receive_SyncParam,
3478 [P_SYNC_PARAM89] = receive_SyncParam,
3479 [P_PROTOCOL] = receive_protocol,
3480 [P_UUIDS] = receive_uuids,
3481 [P_SIZES] = receive_sizes,
3482 [P_STATE] = receive_state,
3483 [P_STATE_CHG_REQ] = receive_req_state,
3484 [P_SYNC_UUID] = receive_sync_uuid,
3485 [P_OV_REQUEST] = receive_DataRequest,
3486 [P_OV_REPLY] = receive_DataRequest,
3487 [P_CSUM_RS_REQUEST] = receive_DataRequest,
3488 /* anything missing from this table is in
3489 * the asender_tbl, see get_asender_cmd */
3493 static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
3494 static drbd_cmd_handler_f *drbd_opt_cmd_handler;
3496 static void drbdd(struct drbd_conf *mdev)
3498 drbd_cmd_handler_f handler;
3499 struct p_header *header = &mdev->data.rbuf.header;
3501 while (get_t_state(&mdev->receiver) == Running) {
3502 drbd_thread_current_set_cpu(mdev);
3503 if (!drbd_recv_header(mdev, header)) {
3504 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3508 if (header->command < P_MAX_CMD)
3509 handler = drbd_cmd_handler[header->command];
3510 else if (P_MAY_IGNORE < header->command
3511 && header->command < P_MAX_OPT_CMD)
3512 handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3513 else if (header->command > P_MAX_OPT_CMD)
3514 handler = receive_skip;
3518 if (unlikely(!handler)) {
3519 dev_err(DEV, "unknown packet type %d, l: %d!\n",
3520 header->command, header->length);
3521 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3524 if (unlikely(!handler(mdev, header))) {
3525 dev_err(DEV, "error receiving %s, l: %d!\n",
3526 cmdname(header->command), header->length);
3527 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3533 static void drbd_fail_pending_reads(struct drbd_conf *mdev)
3535 struct hlist_head *slot;
3536 struct hlist_node *pos;
3537 struct hlist_node *tmp;
3538 struct drbd_request *req;
3542 * Application READ requests
3544 spin_lock_irq(&mdev->req_lock);
3545 for (i = 0; i < APP_R_HSIZE; i++) {
3546 slot = mdev->app_reads_hash+i;
3547 hlist_for_each_entry_safe(req, pos, tmp, slot, colision) {
3548 /* it may (but should not any longer!)
3549 * be on the work queue; if that assert triggers,
3550 * we need to also grab the
3551 * spin_lock_irq(&mdev->data.work.q_lock);
3552 * and list_del_init here. */
3553 D_ASSERT(list_empty(&req->w.list));
3554 /* It would be nice to complete outside of spinlock.
3555 * But this is easier for now. */
3556 _req_mod(req, connection_lost_while_pending);
3559 for (i = 0; i < APP_R_HSIZE; i++)
3560 if (!hlist_empty(mdev->app_reads_hash+i))
3561 dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: "
3562 "%p, should be NULL\n", i, mdev->app_reads_hash[i].first);
3564 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
3565 spin_unlock_irq(&mdev->req_lock);
3568 void drbd_flush_workqueue(struct drbd_conf *mdev)
3570 struct drbd_wq_barrier barr;
3572 barr.w.cb = w_prev_work_done;
3573 init_completion(&barr.done);
3574 drbd_queue_work(&mdev->data.work, &barr.w);
3575 wait_for_completion(&barr.done);
3578 static void drbd_disconnect(struct drbd_conf *mdev)
3580 enum drbd_fencing_p fp;
3581 union drbd_state os, ns;
3582 int rv = SS_UNKNOWN_ERROR;
3585 if (mdev->state.conn == C_STANDALONE)
3587 if (mdev->state.conn >= C_WF_CONNECTION)
3588 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3589 drbd_conn_str(mdev->state.conn));
3591 /* asender does not clean up anything. it must not interfere, either */
3592 drbd_thread_stop(&mdev->asender);
3594 mutex_lock(&mdev->data.mutex);
3595 drbd_free_sock(mdev);
3596 mutex_unlock(&mdev->data.mutex);
3598 spin_lock_irq(&mdev->req_lock);
3599 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3600 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3601 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3602 spin_unlock_irq(&mdev->req_lock);
3604 /* We do not have data structures that would allow us to
3605 * get the rs_pending_cnt down to 0 again.
3606 * * On C_SYNC_TARGET we do not have any data structures describing
3607 * the pending RSDataRequest's we have sent.
3608 * * On C_SYNC_SOURCE there is no data structure that tracks
3609 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3610 * And no, it is not the sum of the reference counts in the
3611 * resync_LRU. The resync_LRU tracks the whole operation including
3612 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3614 drbd_rs_cancel_all(mdev);
3616 mdev->rs_failed = 0;
3617 atomic_set(&mdev->rs_pending_cnt, 0);
3618 wake_up(&mdev->misc_wait);
3620 /* make sure syncer is stopped and w_resume_next_sg queued */
3621 del_timer_sync(&mdev->resync_timer);
3622 set_bit(STOP_SYNC_TIMER, &mdev->flags);
3623 resync_timer_fn((unsigned long)mdev);
3625 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3626 * w_make_resync_request etc. which may still be on the worker queue
3627 * to be "canceled" */
3628 drbd_flush_workqueue(mdev);
3630 /* This also does reclaim_net_ee(). If we do this too early, we might
3631 * miss some resync ee and pages.*/
3632 drbd_process_done_ee(mdev);
3634 kfree(mdev->p_uuid);
3635 mdev->p_uuid = NULL;
3637 if (!mdev->state.susp)
3640 drbd_fail_pending_reads(mdev);
3642 dev_info(DEV, "Connection closed\n");
3647 if (get_ldev(mdev)) {
3648 fp = mdev->ldev->dc.fencing;
3652 if (mdev->state.role == R_PRIMARY) {
3653 if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) {
3654 enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
3655 drbd_request_state(mdev, NS(pdsk, nps));
3659 spin_lock_irq(&mdev->req_lock);
3661 if (os.conn >= C_UNCONNECTED) {
3662 /* Do not restart in case we are C_DISCONNECTING */
3664 ns.conn = C_UNCONNECTED;
3665 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3667 spin_unlock_irq(&mdev->req_lock);
3669 if (os.conn == C_DISCONNECTING) {
3670 struct hlist_head *h;
3671 wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
3673 /* we must not free the tl_hash
3674 * while application io is still on the fly */
3675 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0);
3677 spin_lock_irq(&mdev->req_lock);
3679 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3681 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3682 (int)(h - mdev->ee_hash), h->first);
3683 kfree(mdev->ee_hash);
3684 mdev->ee_hash = NULL;
3685 mdev->ee_hash_s = 0;
3688 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3690 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3691 (int)(h - mdev->tl_hash), h->first);
3692 kfree(mdev->tl_hash);
3693 mdev->tl_hash = NULL;
3694 mdev->tl_hash_s = 0;
3695 spin_unlock_irq(&mdev->req_lock);
3697 crypto_free_hash(mdev->cram_hmac_tfm);
3698 mdev->cram_hmac_tfm = NULL;
3700 kfree(mdev->net_conf);
3701 mdev->net_conf = NULL;
3702 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3705 /* tcp_close and release of sendpage pages can be deferred. I don't
3706 * want to use SO_LINGER, because apparently it can be deferred for
3707 * more than 20 seconds (longest time I checked).
3709 * Actually we don't care for exactly when the network stack does its
3710 * put_page(), but release our reference on these pages right here.
3712 i = drbd_release_ee(mdev, &mdev->net_ee);
3714 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3715 i = atomic_read(&mdev->pp_in_use);
3717 dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
3719 D_ASSERT(list_empty(&mdev->read_ee));
3720 D_ASSERT(list_empty(&mdev->active_ee));
3721 D_ASSERT(list_empty(&mdev->sync_ee));
3722 D_ASSERT(list_empty(&mdev->done_ee));
3724 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3725 atomic_set(&mdev->current_epoch->epoch_size, 0);
3726 D_ASSERT(list_empty(&mdev->current_epoch->list));
3730 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3731 * we can agree on is stored in agreed_pro_version.
3733 * feature flags and the reserved array should be enough room for future
3734 * enhancements of the handshake protocol, and possible plugins...
3736 * for now, they are expected to be zero, but ignored.
3738 static int drbd_send_handshake(struct drbd_conf *mdev)
3740 /* ASSERT current == mdev->receiver ... */
3741 struct p_handshake *p = &mdev->data.sbuf.handshake;
3744 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3745 dev_err(DEV, "interrupted during initial handshake\n");
3746 return 0; /* interrupted. not ok. */
3749 if (mdev->data.socket == NULL) {
3750 mutex_unlock(&mdev->data.mutex);
3754 memset(p, 0, sizeof(*p));
3755 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3756 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3757 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3758 (struct p_header *)p, sizeof(*p), 0 );
3759 mutex_unlock(&mdev->data.mutex);
3765 * 1 yes, we have a valid connection
3766 * 0 oops, did not work out, please try again
3767 * -1 peer talks different language,
3768 * no point in trying again, please go standalone.
3770 static int drbd_do_handshake(struct drbd_conf *mdev)
3772 /* ASSERT current == mdev->receiver ... */
3773 struct p_handshake *p = &mdev->data.rbuf.handshake;
3774 const int expect = sizeof(struct p_handshake)
3775 -sizeof(struct p_header);
3778 rv = drbd_send_handshake(mdev);
3782 rv = drbd_recv_header(mdev, &p->head);
3786 if (p->head.command != P_HAND_SHAKE) {
3787 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3788 cmdname(p->head.command), p->head.command);
3792 if (p->head.length != expect) {
3793 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3794 expect, p->head.length);
3798 rv = drbd_recv(mdev, &p->head.payload, expect);
3801 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3805 p->protocol_min = be32_to_cpu(p->protocol_min);
3806 p->protocol_max = be32_to_cpu(p->protocol_max);
3807 if (p->protocol_max == 0)
3808 p->protocol_max = p->protocol_min;
3810 if (PRO_VERSION_MAX < p->protocol_min ||
3811 PRO_VERSION_MIN > p->protocol_max)
3814 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3816 dev_info(DEV, "Handshake successful: "
3817 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3822 dev_err(DEV, "incompatible DRBD dialects: "
3823 "I support %d-%d, peer supports %d-%d\n",
3824 PRO_VERSION_MIN, PRO_VERSION_MAX,
3825 p->protocol_min, p->protocol_max);
3829 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3830 static int drbd_do_auth(struct drbd_conf *mdev)
3832 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3833 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3837 #define CHALLENGE_LEN 64
3838 static int drbd_do_auth(struct drbd_conf *mdev)
3840 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3841 struct scatterlist sg;
3842 char *response = NULL;
3843 char *right_response = NULL;
3844 char *peers_ch = NULL;
3846 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3847 unsigned int resp_size;
3848 struct hash_desc desc;
3851 desc.tfm = mdev->cram_hmac_tfm;
3854 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3855 (u8 *)mdev->net_conf->shared_secret, key_len);
3857 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3862 get_random_bytes(my_challenge, CHALLENGE_LEN);
3864 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
3868 rv = drbd_recv_header(mdev, &p);
3872 if (p.command != P_AUTH_CHALLENGE) {
3873 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3874 cmdname(p.command), p.command);
3879 if (p.length > CHALLENGE_LEN*2) {
3880 dev_err(DEV, "expected AuthChallenge payload too big.\n");
3885 peers_ch = kmalloc(p.length, GFP_NOIO);
3886 if (peers_ch == NULL) {
3887 dev_err(DEV, "kmalloc of peers_ch failed\n");
3892 rv = drbd_recv(mdev, peers_ch, p.length);
3894 if (rv != p.length) {
3895 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
3900 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
3901 response = kmalloc(resp_size, GFP_NOIO);
3902 if (response == NULL) {
3903 dev_err(DEV, "kmalloc of response failed\n");
3908 sg_init_table(&sg, 1);
3909 sg_set_buf(&sg, peers_ch, p.length);
3911 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
3913 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3918 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
3922 rv = drbd_recv_header(mdev, &p);
3926 if (p.command != P_AUTH_RESPONSE) {
3927 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
3928 cmdname(p.command), p.command);
3933 if (p.length != resp_size) {
3934 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
3939 rv = drbd_recv(mdev, response , resp_size);
3941 if (rv != resp_size) {
3942 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
3947 right_response = kmalloc(resp_size, GFP_NOIO);
3948 if (right_response == NULL) {
3949 dev_err(DEV, "kmalloc of right_response failed\n");
3954 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
3956 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
3958 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3963 rv = !memcmp(response, right_response, resp_size);
3966 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
3967 resp_size, mdev->net_conf->cram_hmac_alg);
3972 kfree(right_response);
3978 int drbdd_init(struct drbd_thread *thi)
3980 struct drbd_conf *mdev = thi->mdev;
3981 unsigned int minor = mdev_to_minor(mdev);
3984 sprintf(current->comm, "drbd%d_receiver", minor);
3986 dev_info(DEV, "receiver (re)started\n");
3989 h = drbd_connect(mdev);
3991 drbd_disconnect(mdev);
3992 __set_current_state(TASK_INTERRUPTIBLE);
3993 schedule_timeout(HZ);
3996 dev_warn(DEV, "Discarding network configuration.\n");
3997 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4002 if (get_net_conf(mdev)) {
4008 drbd_disconnect(mdev);
4010 dev_info(DEV, "receiver terminated\n");
4014 /* ********* acknowledge sender ******** */
4016 static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4018 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4020 int retcode = be32_to_cpu(p->retcode);
4022 if (retcode >= SS_SUCCESS) {
4023 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4025 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4026 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4027 drbd_set_st_err_str(retcode), retcode);
4029 wake_up(&mdev->state_wait);
4034 static int got_Ping(struct drbd_conf *mdev, struct p_header *h)
4036 return drbd_send_ping_ack(mdev);
4040 static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4042 /* restore idle timeout */
4043 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4048 static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4050 struct p_block_ack *p = (struct p_block_ack *)h;
4051 sector_t sector = be64_to_cpu(p->sector);
4052 int blksize = be32_to_cpu(p->blksize);
4054 D_ASSERT(mdev->agreed_pro_version >= 89);
4056 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4058 drbd_rs_complete_io(mdev, sector);
4059 drbd_set_in_sync(mdev, sector, blksize);
4060 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4061 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4062 dec_rs_pending(mdev);
4067 /* when we receive the ACK for a write request,
4068 * verify that we actually know about it */
4069 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4070 u64 id, sector_t sector)
4072 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4073 struct hlist_node *n;
4074 struct drbd_request *req;
4076 hlist_for_each_entry(req, n, slot, colision) {
4077 if ((unsigned long)req == (unsigned long)id) {
4078 if (req->sector != sector) {
4079 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4080 "wrong sector (%llus versus %llus)\n", req,
4081 (unsigned long long)req->sector,
4082 (unsigned long long)sector);
4088 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4089 (void *)(unsigned long)id, (unsigned long long)sector);
4093 typedef struct drbd_request *(req_validator_fn)
4094 (struct drbd_conf *mdev, u64 id, sector_t sector);
4096 static int validate_req_change_req_state(struct drbd_conf *mdev,
4097 u64 id, sector_t sector, req_validator_fn validator,
4098 const char *func, enum drbd_req_event what)
4100 struct drbd_request *req;
4101 struct bio_and_error m;
4103 spin_lock_irq(&mdev->req_lock);
4104 req = validator(mdev, id, sector);
4105 if (unlikely(!req)) {
4106 spin_unlock_irq(&mdev->req_lock);
4107 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4110 __req_mod(req, what, &m);
4111 spin_unlock_irq(&mdev->req_lock);
4114 complete_master_bio(mdev, &m);
4118 static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4120 struct p_block_ack *p = (struct p_block_ack *)h;
4121 sector_t sector = be64_to_cpu(p->sector);
4122 int blksize = be32_to_cpu(p->blksize);
4123 enum drbd_req_event what;
4125 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4127 if (is_syncer_block_id(p->block_id)) {
4128 drbd_set_in_sync(mdev, sector, blksize);
4129 dec_rs_pending(mdev);
4132 switch (be16_to_cpu(h->command)) {
4133 case P_RS_WRITE_ACK:
4134 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4135 what = write_acked_by_peer_and_sis;
4138 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4139 what = write_acked_by_peer;
4142 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4143 what = recv_acked_by_peer;
4146 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4147 what = conflict_discarded_by_peer;
4154 return validate_req_change_req_state(mdev, p->block_id, sector,
4155 _ack_id_to_req, __func__ , what);
4158 static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4160 struct p_block_ack *p = (struct p_block_ack *)h;
4161 sector_t sector = be64_to_cpu(p->sector);
4163 if (__ratelimit(&drbd_ratelimit_state))
4164 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4166 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4168 if (is_syncer_block_id(p->block_id)) {
4169 int size = be32_to_cpu(p->blksize);
4170 dec_rs_pending(mdev);
4171 drbd_rs_failed_io(mdev, sector, size);
4174 return validate_req_change_req_state(mdev, p->block_id, sector,
4175 _ack_id_to_req, __func__ , neg_acked);
4178 static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4180 struct p_block_ack *p = (struct p_block_ack *)h;
4181 sector_t sector = be64_to_cpu(p->sector);
4183 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4184 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4185 (unsigned long long)sector, be32_to_cpu(p->blksize));
4187 return validate_req_change_req_state(mdev, p->block_id, sector,
4188 _ar_id_to_req, __func__ , neg_acked);
4191 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4195 struct p_block_ack *p = (struct p_block_ack *)h;
4197 sector = be64_to_cpu(p->sector);
4198 size = be32_to_cpu(p->blksize);
4199 D_ASSERT(p->block_id == ID_SYNCER);
4201 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4203 dec_rs_pending(mdev);
4205 if (get_ldev_if_state(mdev, D_FAILED)) {
4206 drbd_rs_complete_io(mdev, sector);
4207 drbd_rs_failed_io(mdev, sector, size);
4214 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4216 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4218 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4223 static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4225 struct p_block_ack *p = (struct p_block_ack *)h;
4226 struct drbd_work *w;
4230 sector = be64_to_cpu(p->sector);
4231 size = be32_to_cpu(p->blksize);
4233 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4235 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4236 drbd_ov_oos_found(mdev, sector, size);
4240 drbd_rs_complete_io(mdev, sector);
4241 dec_rs_pending(mdev);
4243 if (--mdev->ov_left == 0) {
4244 w = kmalloc(sizeof(*w), GFP_NOIO);
4246 w->cb = w_ov_finished;
4247 drbd_queue_work_front(&mdev->data.work, w);
4249 dev_err(DEV, "kmalloc(w) failed.");
4251 drbd_resync_finished(mdev);
4257 struct asender_cmd {
4259 int (*process)(struct drbd_conf *mdev, struct p_header *h);
4262 static struct asender_cmd *get_asender_cmd(int cmd)
4264 static struct asender_cmd asender_tbl[] = {
4265 /* anything missing from this table is in
4266 * the drbd_cmd_handler (drbd_default_handler) table,
4267 * see the beginning of drbdd() */
4268 [P_PING] = { sizeof(struct p_header), got_Ping },
4269 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
4270 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4271 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4272 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4273 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4274 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4275 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4276 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4277 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4278 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4279 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4280 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4281 [P_MAX_CMD] = { 0, NULL },
4283 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4285 return &asender_tbl[cmd];
4288 int drbd_asender(struct drbd_thread *thi)
4290 struct drbd_conf *mdev = thi->mdev;
4291 struct p_header *h = &mdev->meta.rbuf.header;
4292 struct asender_cmd *cmd = NULL;
4297 int expect = sizeof(struct p_header);
4300 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4302 current->policy = SCHED_RR; /* Make this a realtime task! */
4303 current->rt_priority = 2; /* more important than all other tasks */
4305 while (get_t_state(thi) == Running) {
4306 drbd_thread_current_set_cpu(mdev);
4307 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4308 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4309 mdev->meta.socket->sk->sk_rcvtimeo =
4310 mdev->net_conf->ping_timeo*HZ/10;
4313 /* conditionally cork;
4314 * it may hurt latency if we cork without much to send */
4315 if (!mdev->net_conf->no_cork &&
4316 3 < atomic_read(&mdev->unacked_cnt))
4317 drbd_tcp_cork(mdev->meta.socket);
4319 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4320 flush_signals(current);
4321 if (!drbd_process_done_ee(mdev)) {
4322 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4325 /* to avoid race with newly queued ACKs */
4326 set_bit(SIGNAL_ASENDER, &mdev->flags);
4327 spin_lock_irq(&mdev->req_lock);
4328 empty = list_empty(&mdev->done_ee);
4329 spin_unlock_irq(&mdev->req_lock);
4330 /* new ack may have been queued right here,
4331 * but then there is also a signal pending,
4332 * and we start over... */
4336 /* but unconditionally uncork unless disabled */
4337 if (!mdev->net_conf->no_cork)
4338 drbd_tcp_uncork(mdev->meta.socket);
4340 /* short circuit, recv_msg would return EINTR anyways. */
4341 if (signal_pending(current))
4344 rv = drbd_recv_short(mdev, mdev->meta.socket,
4345 buf, expect-received, 0);
4346 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4348 flush_signals(current);
4351 * -EINTR (on meta) we got a signal
4352 * -EAGAIN (on meta) rcvtimeo expired
4353 * -ECONNRESET other side closed the connection
4354 * -ERESTARTSYS (on data) we got a signal
4355 * rv < 0 other than above: unexpected error!
4356 * rv == expected: full header or command
4357 * rv < expected: "woken" by signal during receive
4358 * rv == 0 : "connection shut down by peer"
4360 if (likely(rv > 0)) {
4363 } else if (rv == 0) {
4364 dev_err(DEV, "meta connection shut down by peer.\n");
4366 } else if (rv == -EAGAIN) {
4367 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4368 mdev->net_conf->ping_timeo*HZ/10) {
4369 dev_err(DEV, "PingAck did not arrive in time.\n");
4372 set_bit(SEND_PING, &mdev->flags);
4374 } else if (rv == -EINTR) {
4377 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4381 if (received == expect && cmd == NULL) {
4382 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4383 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4384 (long)be32_to_cpu(h->magic),
4385 h->command, h->length);
4388 cmd = get_asender_cmd(be16_to_cpu(h->command));
4389 len = be16_to_cpu(h->length);
4390 if (unlikely(cmd == NULL)) {
4391 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4392 (long)be32_to_cpu(h->magic),
4393 h->command, h->length);
4396 expect = cmd->pkt_size;
4397 ERR_IF(len != expect-sizeof(struct p_header))
4400 if (received == expect) {
4401 D_ASSERT(cmd != NULL);
4402 if (!cmd->process(mdev, h))
4407 expect = sizeof(struct p_header);
4414 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4418 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4420 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4422 D_ASSERT(mdev->state.conn < C_CONNECTED);
4423 dev_info(DEV, "asender terminated\n");