4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
46 #include <linux/string.h>
47 #include <linux/scatterlist.h>
55 struct drbd_epoch *epoch;
64 static int drbd_do_handshake(struct drbd_conf *mdev);
65 static int drbd_do_auth(struct drbd_conf *mdev);
67 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
68 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
70 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
72 struct drbd_epoch *prev;
73 spin_lock(&mdev->epoch_lock);
74 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
75 if (prev == epoch || prev == mdev->current_epoch)
77 spin_unlock(&mdev->epoch_lock);
81 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
83 static struct page *drbd_pp_first_page_or_try_alloc(struct drbd_conf *mdev)
85 struct page *page = NULL;
87 /* Yes, testing drbd_pp_vacant outside the lock is racy.
88 * So what. It saves a spin_lock. */
89 if (drbd_pp_vacant > 0) {
90 spin_lock(&drbd_pp_lock);
93 drbd_pp_pool = (struct page *)page_private(page);
94 set_page_private(page, 0); /* just to be polite */
97 spin_unlock(&drbd_pp_lock);
99 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
100 * "criss-cross" setup, that might cause write-out on some other DRBD,
101 * which in turn might block on the other node at this very place. */
103 page = alloc_page(GFP_TRY);
105 atomic_inc(&mdev->pp_in_use);
109 /* kick lower level device, if we have more than (arbitrary number)
110 * reference counts on it, which typically are locally submitted io
111 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
112 static void maybe_kick_lo(struct drbd_conf *mdev)
114 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
118 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
120 struct drbd_epoch_entry *e;
121 struct list_head *le, *tle;
123 /* The EEs are always appended to the end of the list. Since
124 they are sent in order over the wire, they have to finish
125 in order. As soon as we see the first not finished we can
126 stop to examine the list... */
128 list_for_each_safe(le, tle, &mdev->net_ee) {
129 e = list_entry(le, struct drbd_epoch_entry, w.list);
130 if (drbd_bio_has_active_page(e->private_bio))
132 list_move(le, to_be_freed);
136 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
138 LIST_HEAD(reclaimed);
139 struct drbd_epoch_entry *e, *t;
142 spin_lock_irq(&mdev->req_lock);
143 reclaim_net_ee(mdev, &reclaimed);
144 spin_unlock_irq(&mdev->req_lock);
146 list_for_each_entry_safe(e, t, &reclaimed, w.list)
147 drbd_free_ee(mdev, e);
151 * drbd_pp_alloc() - Returns a page, fails only if a signal comes in
152 * @mdev: DRBD device.
153 * @retry: whether or not to retry allocation forever (or until signalled)
155 * Tries to allocate a page, first from our own page pool, then from the
156 * kernel, unless this allocation would exceed the max_buffers setting.
157 * If @retry is non-zero, retry until DRBD frees a page somewhere else.
159 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, int retry)
161 struct page *page = NULL;
164 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
165 page = drbd_pp_first_page_or_try_alloc(mdev);
171 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
173 drbd_kick_lo_and_reclaim_net(mdev);
175 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
176 page = drbd_pp_first_page_or_try_alloc(mdev);
184 if (signal_pending(current)) {
185 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
191 finish_wait(&drbd_pp_wait, &wait);
196 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
197 * Is also used from inside an other spin_lock_irq(&mdev->req_lock) */
198 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
202 spin_lock(&drbd_pp_lock);
203 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
206 set_page_private(page, (unsigned long)drbd_pp_pool);
211 spin_unlock(&drbd_pp_lock);
213 atomic_dec(&mdev->pp_in_use);
218 wake_up(&drbd_pp_wait);
221 static void drbd_pp_free_bio_pages(struct drbd_conf *mdev, struct bio *bio)
223 struct page *p_to_be_freed = NULL;
225 struct bio_vec *bvec;
228 spin_lock(&drbd_pp_lock);
229 __bio_for_each_segment(bvec, bio, i, 0) {
230 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) {
231 set_page_private(bvec->bv_page, (unsigned long)p_to_be_freed);
232 p_to_be_freed = bvec->bv_page;
234 set_page_private(bvec->bv_page, (unsigned long)drbd_pp_pool);
235 drbd_pp_pool = bvec->bv_page;
239 spin_unlock(&drbd_pp_lock);
240 atomic_sub(bio->bi_vcnt, &mdev->pp_in_use);
242 while (p_to_be_freed) {
243 page = p_to_be_freed;
244 p_to_be_freed = (struct page *)page_private(page);
245 set_page_private(page, 0); /* just to be polite */
249 wake_up(&drbd_pp_wait);
253 You need to hold the req_lock:
254 _drbd_wait_ee_list_empty()
256 You must not have the req_lock:
262 drbd_process_done_ee()
264 drbd_wait_ee_list_empty()
267 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
270 unsigned int data_size,
271 gfp_t gfp_mask) __must_hold(local)
273 struct request_queue *q;
274 struct drbd_epoch_entry *e;
279 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
282 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
284 if (!(gfp_mask & __GFP_NOWARN))
285 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
289 bio = bio_alloc(gfp_mask & ~__GFP_HIGHMEM, div_ceil(data_size, PAGE_SIZE));
291 if (!(gfp_mask & __GFP_NOWARN))
292 dev_err(DEV, "alloc_ee: Allocation of a bio failed\n");
296 bio->bi_bdev = mdev->ldev->backing_bdev;
297 bio->bi_sector = sector;
301 page = drbd_pp_alloc(mdev, (gfp_mask & __GFP_WAIT));
303 if (!(gfp_mask & __GFP_NOWARN))
304 dev_err(DEV, "alloc_ee: Allocation of a page failed\n");
307 if (!bio_add_page(bio, page, min_t(int, ds, PAGE_SIZE), 0)) {
308 drbd_pp_free(mdev, page);
309 dev_err(DEV, "alloc_ee: bio_add_page(s=%llu,"
310 "data_size=%u,ds=%u) failed\n",
311 (unsigned long long)sector, data_size, ds);
313 q = bdev_get_queue(bio->bi_bdev);
314 if (q->merge_bvec_fn) {
315 struct bvec_merge_data bvm = {
316 .bi_bdev = bio->bi_bdev,
317 .bi_sector = bio->bi_sector,
318 .bi_size = bio->bi_size,
321 int l = q->merge_bvec_fn(q, &bvm,
322 &bio->bi_io_vec[bio->bi_vcnt]);
323 dev_err(DEV, "merge_bvec_fn() = %d\n", l);
326 /* dump more of the bio. */
327 dev_err(DEV, "bio->bi_max_vecs = %d\n", bio->bi_max_vecs);
328 dev_err(DEV, "bio->bi_vcnt = %d\n", bio->bi_vcnt);
329 dev_err(DEV, "bio->bi_size = %d\n", bio->bi_size);
330 dev_err(DEV, "bio->bi_phys_segments = %d\n", bio->bi_phys_segments);
335 ds -= min_t(int, ds, PAGE_SIZE);
338 D_ASSERT(data_size == bio->bi_size);
343 e->size = bio->bi_size;
345 e->private_bio = bio;
347 INIT_HLIST_NODE(&e->colision);
354 drbd_pp_free_bio_pages(mdev, bio);
357 mempool_free(e, drbd_ee_mempool);
362 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
364 struct bio *bio = e->private_bio;
365 drbd_pp_free_bio_pages(mdev, bio);
367 D_ASSERT(hlist_unhashed(&e->colision));
368 mempool_free(e, drbd_ee_mempool);
371 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
373 LIST_HEAD(work_list);
374 struct drbd_epoch_entry *e, *t;
377 spin_lock_irq(&mdev->req_lock);
378 list_splice_init(list, &work_list);
379 spin_unlock_irq(&mdev->req_lock);
381 list_for_each_entry_safe(e, t, &work_list, w.list) {
382 drbd_free_ee(mdev, e);
390 * This function is called from _asender only_
391 * but see also comments in _req_mod(,barrier_acked)
392 * and receive_Barrier.
394 * Move entries from net_ee to done_ee, if ready.
395 * Grab done_ee, call all callbacks, free the entries.
396 * The callbacks typically send out ACKs.
398 static int drbd_process_done_ee(struct drbd_conf *mdev)
400 LIST_HEAD(work_list);
401 LIST_HEAD(reclaimed);
402 struct drbd_epoch_entry *e, *t;
403 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
405 spin_lock_irq(&mdev->req_lock);
406 reclaim_net_ee(mdev, &reclaimed);
407 list_splice_init(&mdev->done_ee, &work_list);
408 spin_unlock_irq(&mdev->req_lock);
410 list_for_each_entry_safe(e, t, &reclaimed, w.list)
411 drbd_free_ee(mdev, e);
413 /* possible callbacks here:
414 * e_end_block, and e_end_resync_block, e_send_discard_ack.
415 * all ignore the last argument.
417 list_for_each_entry_safe(e, t, &work_list, w.list) {
418 /* list_del not necessary, next/prev members not touched */
419 ok = e->w.cb(mdev, &e->w, !ok) && ok;
420 drbd_free_ee(mdev, e);
422 wake_up(&mdev->ee_wait);
427 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
431 /* avoids spin_lock/unlock
432 * and calling prepare_to_wait in the fast path */
433 while (!list_empty(head)) {
434 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
435 spin_unlock_irq(&mdev->req_lock);
438 finish_wait(&mdev->ee_wait, &wait);
439 spin_lock_irq(&mdev->req_lock);
443 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
445 spin_lock_irq(&mdev->req_lock);
446 _drbd_wait_ee_list_empty(mdev, head);
447 spin_unlock_irq(&mdev->req_lock);
450 /* see also kernel_accept; which is only present since 2.6.18.
451 * also we want to log which part of it failed, exactly */
452 static int drbd_accept(struct drbd_conf *mdev, const char **what,
453 struct socket *sock, struct socket **newsock)
455 struct sock *sk = sock->sk;
459 err = sock->ops->listen(sock, 5);
463 *what = "sock_create_lite";
464 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
470 err = sock->ops->accept(sock, *newsock, 0);
472 sock_release(*newsock);
476 (*newsock)->ops = sock->ops;
482 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
483 void *buf, size_t size, int flags)
490 struct msghdr msg = {
492 .msg_iov = (struct iovec *)&iov,
493 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
499 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
505 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
512 struct msghdr msg = {
514 .msg_iov = (struct iovec *)&iov,
515 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
523 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
528 * ECONNRESET other side closed the connection
529 * ERESTARTSYS (on sock) we got a signal
533 if (rv == -ECONNRESET)
534 dev_info(DEV, "sock was reset by peer\n");
535 else if (rv != -ERESTARTSYS)
536 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
538 } else if (rv == 0) {
539 dev_info(DEV, "sock was shut down by peer\n");
542 /* signal came in, or peer/link went down,
543 * after we read a partial message
545 /* D_ASSERT(signal_pending(current)); */
553 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
558 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
562 struct sockaddr_in6 src_in6;
564 int disconnect_on_error = 1;
566 if (!get_net_conf(mdev))
569 what = "sock_create_kern";
570 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
571 SOCK_STREAM, IPPROTO_TCP, &sock);
577 sock->sk->sk_rcvtimeo =
578 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
580 /* explicitly bind to the configured IP as source IP
581 * for the outgoing connections.
582 * This is needed for multihomed hosts and to be
583 * able to use lo: interfaces for drbd.
584 * Make sure to use 0 as port number, so linux selects
585 * a free one dynamically.
587 memcpy(&src_in6, mdev->net_conf->my_addr,
588 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
589 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
590 src_in6.sin6_port = 0;
592 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
594 what = "bind before connect";
595 err = sock->ops->bind(sock,
596 (struct sockaddr *) &src_in6,
597 mdev->net_conf->my_addr_len);
601 /* connect may fail, peer not yet available.
602 * stay C_WF_CONNECTION, don't go Disconnecting! */
603 disconnect_on_error = 0;
605 err = sock->ops->connect(sock,
606 (struct sockaddr *)mdev->net_conf->peer_addr,
607 mdev->net_conf->peer_addr_len, 0);
616 /* timeout, busy, signal pending */
617 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
618 case EINTR: case ERESTARTSYS:
619 /* peer not (yet) available, network problem */
620 case ECONNREFUSED: case ENETUNREACH:
621 case EHOSTDOWN: case EHOSTUNREACH:
622 disconnect_on_error = 0;
625 dev_err(DEV, "%s failed, err = %d\n", what, err);
627 if (disconnect_on_error)
628 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
634 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
637 struct socket *s_estab = NULL, *s_listen;
640 if (!get_net_conf(mdev))
643 what = "sock_create_kern";
644 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
645 SOCK_STREAM, IPPROTO_TCP, &s_listen);
651 timeo = mdev->net_conf->try_connect_int * HZ;
652 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
654 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
655 s_listen->sk->sk_rcvtimeo = timeo;
656 s_listen->sk->sk_sndtimeo = timeo;
658 what = "bind before listen";
659 err = s_listen->ops->bind(s_listen,
660 (struct sockaddr *) mdev->net_conf->my_addr,
661 mdev->net_conf->my_addr_len);
665 err = drbd_accept(mdev, &what, s_listen, &s_estab);
669 sock_release(s_listen);
671 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
672 dev_err(DEV, "%s failed, err = %d\n", what, err);
673 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
681 static int drbd_send_fp(struct drbd_conf *mdev,
682 struct socket *sock, enum drbd_packets cmd)
684 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
686 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
689 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
691 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
694 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
696 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
697 return be16_to_cpu(h->command);
703 * drbd_socket_okay() - Free the socket if its connection is not okay
704 * @mdev: DRBD device.
705 * @sock: pointer to the pointer to the socket.
707 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
715 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
717 if (rr > 0 || rr == -EAGAIN) {
728 * 1 yes, we have a valid connection
729 * 0 oops, did not work out, please try again
730 * -1 peer talks different language,
731 * no point in trying again, please go standalone.
732 * -2 We do not have a network config...
734 static int drbd_connect(struct drbd_conf *mdev)
736 struct socket *s, *sock, *msock;
739 D_ASSERT(!mdev->data.socket);
741 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags))
742 dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n");
744 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
747 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
754 /* 3 tries, this should take less than a second! */
755 s = drbd_try_connect(mdev);
758 /* give the other side time to call bind() & listen() */
759 __set_current_state(TASK_INTERRUPTIBLE);
760 schedule_timeout(HZ / 10);
765 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
769 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
773 dev_err(DEV, "Logic error in drbd_connect()\n");
774 goto out_release_sockets;
779 __set_current_state(TASK_INTERRUPTIBLE);
780 schedule_timeout(HZ / 10);
781 ok = drbd_socket_okay(mdev, &sock);
782 ok = drbd_socket_okay(mdev, &msock) && ok;
788 s = drbd_wait_for_connect(mdev);
790 try = drbd_recv_fp(mdev, s);
791 drbd_socket_okay(mdev, &sock);
792 drbd_socket_okay(mdev, &msock);
796 dev_warn(DEV, "initial packet S crossed\n");
803 dev_warn(DEV, "initial packet M crossed\n");
807 set_bit(DISCARD_CONCURRENT, &mdev->flags);
810 dev_warn(DEV, "Error receiving initial packet\n");
817 if (mdev->state.conn <= C_DISCONNECTING)
818 goto out_release_sockets;
819 if (signal_pending(current)) {
820 flush_signals(current);
822 if (get_t_state(&mdev->receiver) == Exiting)
823 goto out_release_sockets;
827 ok = drbd_socket_okay(mdev, &sock);
828 ok = drbd_socket_okay(mdev, &msock) && ok;
834 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
835 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
837 sock->sk->sk_allocation = GFP_NOIO;
838 msock->sk->sk_allocation = GFP_NOIO;
840 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
841 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
843 if (mdev->net_conf->sndbuf_size) {
844 sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size;
845 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
848 if (mdev->net_conf->rcvbuf_size) {
849 sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size;
850 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock->sk->sk_sndtimeo =
859 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
861 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
862 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
864 /* we don't want delays.
865 * we use TCP_CORK where apropriate, though */
866 drbd_tcp_nodelay(sock);
867 drbd_tcp_nodelay(msock);
869 mdev->data.socket = sock;
870 mdev->meta.socket = msock;
871 mdev->last_received = jiffies;
873 D_ASSERT(mdev->asender.task == NULL);
875 h = drbd_do_handshake(mdev);
879 if (mdev->cram_hmac_tfm) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 switch (drbd_do_auth(mdev)) {
883 dev_err(DEV, "Authentication of peer failed\n");
886 dev_err(DEV, "Authentication of peer failed, trying again.\n");
891 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
894 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
895 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
897 atomic_set(&mdev->packet_seq, 0);
900 drbd_thread_start(&mdev->asender);
902 drbd_send_protocol(mdev);
903 drbd_send_sync_param(mdev, &mdev->sync_conf);
904 drbd_send_sizes(mdev, 0);
905 drbd_send_uuids(mdev);
906 drbd_send_state(mdev);
907 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
908 clear_bit(RESIZE_PENDING, &mdev->flags);
920 static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
924 r = drbd_recv(mdev, h, sizeof(*h));
926 if (unlikely(r != sizeof(*h))) {
927 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
930 h->command = be16_to_cpu(h->command);
931 h->length = be16_to_cpu(h->length);
932 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
933 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
934 (long)be32_to_cpu(h->magic),
935 h->command, h->length);
938 mdev->last_received = jiffies;
943 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
947 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
948 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, NULL);
950 dev_err(DEV, "local disk flush failed with status %d\n", rv);
951 /* would rather check on EOPNOTSUPP, but that is not reliable.
952 * don't try again for ANY return value != 0
953 * if (rv == -EOPNOTSUPP) */
954 drbd_bump_write_ordering(mdev, WO_drain_io);
959 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
962 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
964 struct flush_work *fw = (struct flush_work *)w;
965 struct drbd_epoch *epoch = fw->epoch;
969 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
970 drbd_flush_after_epoch(mdev, epoch);
972 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
973 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
979 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
980 * @mdev: DRBD device.
981 * @epoch: Epoch object.
984 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
985 struct drbd_epoch *epoch,
988 int finish, epoch_size;
989 struct drbd_epoch *next_epoch;
990 int schedule_flush = 0;
991 enum finish_epoch rv = FE_STILL_LIVE;
993 spin_lock(&mdev->epoch_lock);
998 epoch_size = atomic_read(&epoch->epoch_size);
1000 switch (ev & ~EV_CLEANUP) {
1002 atomic_dec(&epoch->active);
1004 case EV_GOT_BARRIER_NR:
1005 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1007 /* Special case: If we just switched from WO_bio_barrier to
1008 WO_bdev_flush we should not finish the current epoch */
1009 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1010 mdev->write_ordering != WO_bio_barrier &&
1011 epoch == mdev->current_epoch)
1012 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1014 case EV_BARRIER_DONE:
1015 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1017 case EV_BECAME_LAST:
1022 if (epoch_size != 0 &&
1023 atomic_read(&epoch->active) == 0 &&
1024 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1025 epoch->list.prev == &mdev->current_epoch->list &&
1026 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1027 /* Nearly all conditions are met to finish that epoch... */
1028 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1029 mdev->write_ordering == WO_none ||
1030 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1033 set_bit(DE_IS_FINISHING, &epoch->flags);
1034 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1035 mdev->write_ordering == WO_bio_barrier) {
1036 atomic_inc(&epoch->active);
1041 if (!(ev & EV_CLEANUP)) {
1042 spin_unlock(&mdev->epoch_lock);
1043 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1044 spin_lock(&mdev->epoch_lock);
1048 if (mdev->current_epoch != epoch) {
1049 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1050 list_del(&epoch->list);
1051 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1055 if (rv == FE_STILL_LIVE)
1059 atomic_set(&epoch->epoch_size, 0);
1060 /* atomic_set(&epoch->active, 0); is alrady zero */
1061 if (rv == FE_STILL_LIVE)
1072 spin_unlock(&mdev->epoch_lock);
1074 if (schedule_flush) {
1075 struct flush_work *fw;
1076 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1080 drbd_queue_work(&mdev->data.work, &fw->w);
1082 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1083 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1084 /* That is not a recursion, only one level */
1085 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1086 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1094 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1095 * @mdev: DRBD device.
1096 * @wo: Write ordering method to try.
1098 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1100 enum write_ordering_e pwo;
1101 static char *write_ordering_str[] = {
1103 [WO_drain_io] = "drain",
1104 [WO_bdev_flush] = "flush",
1105 [WO_bio_barrier] = "barrier",
1108 pwo = mdev->write_ordering;
1110 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1112 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1114 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1116 mdev->write_ordering = wo;
1117 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1118 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1122 * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
1123 * @mdev: DRBD device.
1125 * @cancel: The connection will be closed anyways (unused in this callback)
1127 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1129 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1130 struct bio *bio = e->private_bio;
1132 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1133 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1134 so that we can finish that epoch in drbd_may_finish_epoch().
1135 That is necessary if we already have a long chain of Epochs, before
1136 we realize that BIO_RW_BARRIER is actually not supported */
1138 /* As long as the -ENOTSUPP on the barrier is reported immediately
1139 that will never trigger. If it is reported late, we will just
1140 print that warning and continue correctly for all future requests
1141 with WO_bdev_flush */
1142 if (previous_epoch(mdev, e->epoch))
1143 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1145 /* prepare bio for re-submit,
1146 * re-init volatile members */
1147 /* we still have a local reference,
1148 * get_ldev was done in receive_Data. */
1149 bio->bi_bdev = mdev->ldev->backing_bdev;
1150 bio->bi_sector = e->sector;
1151 bio->bi_size = e->size;
1154 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1155 bio->bi_flags |= 1 << BIO_UPTODATE;
1157 /* don't know whether this is necessary: */
1158 bio->bi_phys_segments = 0;
1159 bio->bi_next = NULL;
1161 /* these should be unchanged: */
1162 /* bio->bi_end_io = drbd_endio_write_sec; */
1163 /* bio->bi_vcnt = whatever; */
1165 e->w.cb = e_end_block;
1167 /* This is no longer a barrier request. */
1168 bio->bi_rw &= ~(1UL << BIO_RW_BARRIER);
1170 drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, bio);
1175 static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1177 int rv, issue_flush;
1178 struct p_barrier *p = (struct p_barrier *)h;
1179 struct drbd_epoch *epoch;
1181 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1183 rv = drbd_recv(mdev, h->payload, h->length);
1184 ERR_IF(rv != h->length) return FALSE;
1188 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1191 mdev->current_epoch->barrier_nr = p->barrier;
1192 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1194 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1195 * the activity log, which means it would not be resynced in case the
1196 * R_PRIMARY crashes now.
1197 * Therefore we must send the barrier_ack after the barrier request was
1199 switch (mdev->write_ordering) {
1200 case WO_bio_barrier:
1202 if (rv == FE_RECYCLED)
1208 if (rv == FE_STILL_LIVE) {
1209 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1210 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1211 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1213 if (rv == FE_RECYCLED)
1216 /* The asender will send all the ACKs and barrier ACKs out, since
1217 all EEs moved from the active_ee to the done_ee. We need to
1218 provide a new epoch object for the EEs that come in soon */
1222 /* receiver context, in the writeout path of the other node.
1223 * avoid potential distributed deadlock */
1224 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1226 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1227 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1228 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1230 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1231 if (rv == FE_RECYCLED)
1235 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1241 atomic_set(&epoch->epoch_size, 0);
1242 atomic_set(&epoch->active, 0);
1244 spin_lock(&mdev->epoch_lock);
1245 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1246 list_add(&epoch->list, &mdev->current_epoch->list);
1247 mdev->current_epoch = epoch;
1250 /* The current_epoch got recycled while we allocated this one... */
1253 spin_unlock(&mdev->epoch_lock);
1258 /* used from receive_RSDataReply (recv_resync_read)
1259 * and from receive_Data */
1260 static struct drbd_epoch_entry *
1261 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1263 struct drbd_epoch_entry *e;
1264 struct bio_vec *bvec;
1268 void *dig_in = mdev->int_dig_in;
1269 void *dig_vv = mdev->int_dig_vv;
1271 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1272 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1275 rr = drbd_recv(mdev, dig_in, dgs);
1277 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1285 ERR_IF(data_size & 0x1ff) return NULL;
1286 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1288 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1289 * "criss-cross" setup, that might cause write-out on some other DRBD,
1290 * which in turn might block on the other node at this very place. */
1291 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1294 bio = e->private_bio;
1296 bio_for_each_segment(bvec, bio, i) {
1297 page = bvec->bv_page;
1298 rr = drbd_recv(mdev, kmap(page), min_t(int, ds, PAGE_SIZE));
1300 if (rr != min_t(int, ds, PAGE_SIZE)) {
1301 drbd_free_ee(mdev, e);
1302 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1303 rr, min_t(int, ds, PAGE_SIZE));
1310 drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1311 if (memcmp(dig_in, dig_vv, dgs)) {
1312 dev_err(DEV, "Digest integrity check FAILED.\n");
1313 drbd_bcast_ee(mdev, "digest failed",
1314 dgs, dig_in, dig_vv, e);
1315 drbd_free_ee(mdev, e);
1319 mdev->recv_cnt += data_size>>9;
1323 /* drbd_drain_block() just takes a data block
1324 * out of the socket input buffer, and discards it.
1326 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1332 page = drbd_pp_alloc(mdev, 1);
1336 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1337 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1339 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1340 rr, min_t(int, data_size, PAGE_SIZE));
1346 drbd_pp_free(mdev, page);
1350 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1351 sector_t sector, int data_size)
1353 struct bio_vec *bvec;
1355 int dgs, rr, i, expect;
1356 void *dig_in = mdev->int_dig_in;
1357 void *dig_vv = mdev->int_dig_vv;
1359 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1360 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1363 rr = drbd_recv(mdev, dig_in, dgs);
1365 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1373 /* optimistically update recv_cnt. if receiving fails below,
1374 * we disconnect anyways, and counters will be reset. */
1375 mdev->recv_cnt += data_size>>9;
1377 bio = req->master_bio;
1378 D_ASSERT(sector == bio->bi_sector);
1380 bio_for_each_segment(bvec, bio, i) {
1381 expect = min_t(int, data_size, bvec->bv_len);
1382 rr = drbd_recv(mdev,
1383 kmap(bvec->bv_page)+bvec->bv_offset,
1385 kunmap(bvec->bv_page);
1387 dev_warn(DEV, "short read receiving data reply: "
1388 "read %d expected %d\n",
1396 drbd_csum(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1397 if (memcmp(dig_in, dig_vv, dgs)) {
1398 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1403 D_ASSERT(data_size == 0);
1407 /* e_end_resync_block() is called via
1408 * drbd_process_done_ee() by asender only */
1409 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1411 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1412 sector_t sector = e->sector;
1415 D_ASSERT(hlist_unhashed(&e->colision));
1417 if (likely(drbd_bio_uptodate(e->private_bio))) {
1418 drbd_set_in_sync(mdev, sector, e->size);
1419 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1421 /* Record failure to sync */
1422 drbd_rs_failed_io(mdev, sector, e->size);
1424 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1431 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1433 struct drbd_epoch_entry *e;
1435 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1441 dec_rs_pending(mdev);
1443 e->private_bio->bi_end_io = drbd_endio_write_sec;
1444 e->private_bio->bi_rw = WRITE;
1445 e->w.cb = e_end_resync_block;
1448 /* corresponding dec_unacked() in e_end_resync_block()
1449 * respective _drbd_clear_done_ee */
1451 spin_lock_irq(&mdev->req_lock);
1452 list_add(&e->w.list, &mdev->sync_ee);
1453 spin_unlock_irq(&mdev->req_lock);
1455 drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
1456 /* accounting done in endio */
1458 maybe_kick_lo(mdev);
1462 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1464 struct drbd_request *req;
1466 unsigned int header_size, data_size;
1468 struct p_data *p = (struct p_data *)h;
1470 header_size = sizeof(*p) - sizeof(*h);
1471 data_size = h->length - header_size;
1473 ERR_IF(data_size == 0) return FALSE;
1475 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1478 sector = be64_to_cpu(p->sector);
1480 spin_lock_irq(&mdev->req_lock);
1481 req = _ar_id_to_req(mdev, p->block_id, sector);
1482 spin_unlock_irq(&mdev->req_lock);
1483 if (unlikely(!req)) {
1484 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1488 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1489 * special casing it there for the various failure cases.
1490 * still no race with drbd_fail_pending_reads */
1491 ok = recv_dless_read(mdev, req, sector, data_size);
1494 req_mod(req, data_received);
1495 /* else: nothing. handled from drbd_disconnect...
1496 * I don't think we may complete this just yet
1497 * in case we are "on-disconnect: freeze" */
1502 static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1505 unsigned int header_size, data_size;
1507 struct p_data *p = (struct p_data *)h;
1509 header_size = sizeof(*p) - sizeof(*h);
1510 data_size = h->length - header_size;
1512 ERR_IF(data_size == 0) return FALSE;
1514 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1517 sector = be64_to_cpu(p->sector);
1518 D_ASSERT(p->block_id == ID_SYNCER);
1520 if (get_ldev(mdev)) {
1521 /* data is submitted to disk within recv_resync_read.
1522 * corresponding put_ldev done below on error,
1523 * or in drbd_endio_write_sec. */
1524 ok = recv_resync_read(mdev, sector, data_size);
1526 if (__ratelimit(&drbd_ratelimit_state))
1527 dev_err(DEV, "Can not write resync data to local disk.\n");
1529 ok = drbd_drain_block(mdev, data_size);
1531 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1537 /* e_end_block() is called via drbd_process_done_ee().
1538 * this means this function only runs in the asender thread
1540 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1542 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1543 sector_t sector = e->sector;
1544 struct drbd_epoch *epoch;
1547 if (e->flags & EE_IS_BARRIER) {
1548 epoch = previous_epoch(mdev, e->epoch);
1550 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1553 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1554 if (likely(drbd_bio_uptodate(e->private_bio))) {
1555 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1556 mdev->state.conn <= C_PAUSED_SYNC_T &&
1557 e->flags & EE_MAY_SET_IN_SYNC) ?
1558 P_RS_WRITE_ACK : P_WRITE_ACK;
1559 ok &= drbd_send_ack(mdev, pcmd, e);
1560 if (pcmd == P_RS_WRITE_ACK)
1561 drbd_set_in_sync(mdev, sector, e->size);
1563 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1564 /* we expect it to be marked out of sync anyways...
1565 * maybe assert this? */
1569 /* we delete from the conflict detection hash _after_ we sent out the
1570 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1571 if (mdev->net_conf->two_primaries) {
1572 spin_lock_irq(&mdev->req_lock);
1573 D_ASSERT(!hlist_unhashed(&e->colision));
1574 hlist_del_init(&e->colision);
1575 spin_unlock_irq(&mdev->req_lock);
1577 D_ASSERT(hlist_unhashed(&e->colision));
1580 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1585 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1587 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1590 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1591 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1593 spin_lock_irq(&mdev->req_lock);
1594 D_ASSERT(!hlist_unhashed(&e->colision));
1595 hlist_del_init(&e->colision);
1596 spin_unlock_irq(&mdev->req_lock);
1603 /* Called from receive_Data.
1604 * Synchronize packets on sock with packets on msock.
1606 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1607 * packet traveling on msock, they are still processed in the order they have
1610 * Note: we don't care for Ack packets overtaking P_DATA packets.
1612 * In case packet_seq is larger than mdev->peer_seq number, there are
1613 * outstanding packets on the msock. We wait for them to arrive.
1614 * In case we are the logically next packet, we update mdev->peer_seq
1615 * ourselves. Correctly handles 32bit wrap around.
1617 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1618 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1619 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1620 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1622 * returns 0 if we may process the packet,
1623 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1624 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1630 spin_lock(&mdev->peer_seq_lock);
1632 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1633 if (seq_le(packet_seq, mdev->peer_seq+1))
1635 if (signal_pending(current)) {
1639 p_seq = mdev->peer_seq;
1640 spin_unlock(&mdev->peer_seq_lock);
1641 timeout = schedule_timeout(30*HZ);
1642 spin_lock(&mdev->peer_seq_lock);
1643 if (timeout == 0 && p_seq == mdev->peer_seq) {
1645 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1649 finish_wait(&mdev->seq_wait, &wait);
1650 if (mdev->peer_seq+1 == packet_seq)
1652 spin_unlock(&mdev->peer_seq_lock);
1656 /* mirrored write */
1657 static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1660 struct drbd_epoch_entry *e;
1661 struct p_data *p = (struct p_data *)h;
1662 int header_size, data_size;
1666 header_size = sizeof(*p) - sizeof(*h);
1667 data_size = h->length - header_size;
1669 ERR_IF(data_size == 0) return FALSE;
1671 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1674 if (!get_ldev(mdev)) {
1675 if (__ratelimit(&drbd_ratelimit_state))
1676 dev_err(DEV, "Can not write mirrored data block "
1677 "to local disk.\n");
1678 spin_lock(&mdev->peer_seq_lock);
1679 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1681 spin_unlock(&mdev->peer_seq_lock);
1683 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1684 atomic_inc(&mdev->current_epoch->epoch_size);
1685 return drbd_drain_block(mdev, data_size);
1688 /* get_ldev(mdev) successful.
1689 * Corresponding put_ldev done either below (on various errors),
1690 * or in drbd_endio_write_sec, if we successfully submit the data at
1691 * the end of this function. */
1693 sector = be64_to_cpu(p->sector);
1694 e = read_in_block(mdev, p->block_id, sector, data_size);
1700 e->private_bio->bi_end_io = drbd_endio_write_sec;
1701 e->w.cb = e_end_block;
1703 spin_lock(&mdev->epoch_lock);
1704 e->epoch = mdev->current_epoch;
1705 atomic_inc(&e->epoch->epoch_size);
1706 atomic_inc(&e->epoch->active);
1708 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1709 struct drbd_epoch *epoch;
1710 /* Issue a barrier if we start a new epoch, and the previous epoch
1711 was not a epoch containing a single request which already was
1713 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1714 if (epoch == e->epoch) {
1715 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1716 rw |= (1<<BIO_RW_BARRIER);
1717 e->flags |= EE_IS_BARRIER;
1719 if (atomic_read(&epoch->epoch_size) > 1 ||
1720 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1721 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1722 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1723 rw |= (1<<BIO_RW_BARRIER);
1724 e->flags |= EE_IS_BARRIER;
1728 spin_unlock(&mdev->epoch_lock);
1730 dp_flags = be32_to_cpu(p->dp_flags);
1731 if (dp_flags & DP_HARDBARRIER) {
1732 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1733 /* rw |= (1<<BIO_RW_BARRIER); */
1735 if (dp_flags & DP_RW_SYNC)
1736 rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
1737 if (dp_flags & DP_MAY_SET_IN_SYNC)
1738 e->flags |= EE_MAY_SET_IN_SYNC;
1740 /* I'm the receiver, I do hold a net_cnt reference. */
1741 if (!mdev->net_conf->two_primaries) {
1742 spin_lock_irq(&mdev->req_lock);
1744 /* don't get the req_lock yet,
1745 * we may sleep in drbd_wait_peer_seq */
1746 const int size = e->size;
1747 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1749 struct drbd_request *i;
1750 struct hlist_node *n;
1751 struct hlist_head *slot;
1754 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1755 BUG_ON(mdev->ee_hash == NULL);
1756 BUG_ON(mdev->tl_hash == NULL);
1758 /* conflict detection and handling:
1759 * 1. wait on the sequence number,
1760 * in case this data packet overtook ACK packets.
1761 * 2. check our hash tables for conflicting requests.
1762 * we only need to walk the tl_hash, since an ee can not
1763 * have a conflict with an other ee: on the submitting
1764 * node, the corresponding req had already been conflicting,
1765 * and a conflicting req is never sent.
1767 * Note: for two_primaries, we are protocol C,
1768 * so there cannot be any request that is DONE
1769 * but still on the transfer log.
1771 * unconditionally add to the ee_hash.
1773 * if no conflicting request is found:
1776 * if any conflicting request is found
1777 * that has not yet been acked,
1778 * AND I have the "discard concurrent writes" flag:
1779 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1781 * if any conflicting request is found:
1782 * block the receiver, waiting on misc_wait
1783 * until no more conflicting requests are there,
1784 * or we get interrupted (disconnect).
1786 * we do not just write after local io completion of those
1787 * requests, but only after req is done completely, i.e.
1788 * we wait for the P_DISCARD_ACK to arrive!
1790 * then proceed normally, i.e. submit.
1792 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1793 goto out_interrupted;
1795 spin_lock_irq(&mdev->req_lock);
1797 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1799 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1800 slot = tl_hash_slot(mdev, sector);
1803 int have_unacked = 0;
1804 int have_conflict = 0;
1805 prepare_to_wait(&mdev->misc_wait, &wait,
1806 TASK_INTERRUPTIBLE);
1807 hlist_for_each_entry(i, n, slot, colision) {
1809 /* only ALERT on first iteration,
1810 * we may be woken up early... */
1812 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1813 " new: %llus +%u; pending: %llus +%u\n",
1814 current->comm, current->pid,
1815 (unsigned long long)sector, size,
1816 (unsigned long long)i->sector, i->size);
1817 if (i->rq_state & RQ_NET_PENDING)
1826 /* Discard Ack only for the _first_ iteration */
1827 if (first && discard && have_unacked) {
1828 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1829 (unsigned long long)sector);
1831 e->w.cb = e_send_discard_ack;
1832 list_add_tail(&e->w.list, &mdev->done_ee);
1834 spin_unlock_irq(&mdev->req_lock);
1836 /* we could probably send that P_DISCARD_ACK ourselves,
1837 * but I don't like the receiver using the msock */
1841 finish_wait(&mdev->misc_wait, &wait);
1845 if (signal_pending(current)) {
1846 hlist_del_init(&e->colision);
1848 spin_unlock_irq(&mdev->req_lock);
1850 finish_wait(&mdev->misc_wait, &wait);
1851 goto out_interrupted;
1854 spin_unlock_irq(&mdev->req_lock);
1857 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1858 "sec=%llus\n", (unsigned long long)sector);
1859 } else if (discard) {
1860 /* we had none on the first iteration.
1861 * there must be none now. */
1862 D_ASSERT(have_unacked == 0);
1865 spin_lock_irq(&mdev->req_lock);
1867 finish_wait(&mdev->misc_wait, &wait);
1870 list_add(&e->w.list, &mdev->active_ee);
1871 spin_unlock_irq(&mdev->req_lock);
1873 switch (mdev->net_conf->wire_protocol) {
1876 /* corresponding dec_unacked() in e_end_block()
1877 * respective _drbd_clear_done_ee */
1880 /* I really don't like it that the receiver thread
1881 * sends on the msock, but anyways */
1882 drbd_send_ack(mdev, P_RECV_ACK, e);
1889 if (mdev->state.pdsk == D_DISKLESS) {
1890 /* In case we have the only disk of the cluster, */
1891 drbd_set_out_of_sync(mdev, e->sector, e->size);
1892 e->flags |= EE_CALL_AL_COMPLETE_IO;
1893 drbd_al_begin_io(mdev, e->sector);
1896 e->private_bio->bi_rw = rw;
1897 drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
1898 /* accounting done in endio */
1900 maybe_kick_lo(mdev);
1904 /* yes, the epoch_size now is imbalanced.
1905 * but we drop the connection anyways, so we don't have a chance to
1906 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1908 drbd_free_ee(mdev, e);
1912 static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
1915 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1916 struct drbd_epoch_entry *e;
1917 struct digest_info *di = NULL;
1918 int size, digest_size;
1919 unsigned int fault_type;
1920 struct p_block_req *p =
1921 (struct p_block_req *)h;
1922 const int brps = sizeof(*p)-sizeof(*h);
1924 if (drbd_recv(mdev, h->payload, brps) != brps)
1927 sector = be64_to_cpu(p->sector);
1928 size = be32_to_cpu(p->blksize);
1930 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
1931 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1932 (unsigned long long)sector, size);
1935 if (sector + (size>>9) > capacity) {
1936 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1937 (unsigned long long)sector, size);
1941 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
1942 if (__ratelimit(&drbd_ratelimit_state))
1943 dev_err(DEV, "Can not satisfy peer's read request, "
1944 "no local data.\n");
1945 drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
1946 P_NEG_RS_DREPLY , p);
1950 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1951 * "criss-cross" setup, that might cause write-out on some other DRBD,
1952 * which in turn might block on the other node at this very place. */
1953 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
1959 e->private_bio->bi_rw = READ;
1960 e->private_bio->bi_end_io = drbd_endio_read_sec;
1962 switch (h->command) {
1963 case P_DATA_REQUEST:
1964 e->w.cb = w_e_end_data_req;
1965 fault_type = DRBD_FAULT_DT_RD;
1967 case P_RS_DATA_REQUEST:
1968 e->w.cb = w_e_end_rsdata_req;
1969 fault_type = DRBD_FAULT_RS_RD;
1970 /* Eventually this should become asynchronously. Currently it
1971 * blocks the whole receiver just to delay the reading of a
1972 * resync data block.
1973 * the drbd_work_queue mechanism is made for this...
1975 if (!drbd_rs_begin_io(mdev, sector)) {
1976 /* we have been interrupted,
1977 * probably connection lost! */
1978 D_ASSERT(signal_pending(current));
1984 case P_CSUM_RS_REQUEST:
1985 fault_type = DRBD_FAULT_RS_RD;
1986 digest_size = h->length - brps ;
1987 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
1991 di->digest_size = digest_size;
1992 di->digest = (((char *)di)+sizeof(struct digest_info));
1994 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
1997 e->block_id = (u64)(unsigned long)di;
1998 if (h->command == P_CSUM_RS_REQUEST) {
1999 D_ASSERT(mdev->agreed_pro_version >= 89);
2000 e->w.cb = w_e_end_csum_rs_req;
2001 } else if (h->command == P_OV_REPLY) {
2002 e->w.cb = w_e_end_ov_reply;
2003 dec_rs_pending(mdev);
2007 if (!drbd_rs_begin_io(mdev, sector)) {
2008 /* we have been interrupted, probably connection lost! */
2009 D_ASSERT(signal_pending(current));
2015 if (mdev->state.conn >= C_CONNECTED &&
2016 mdev->state.conn != C_VERIFY_T)
2017 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2018 drbd_conn_str(mdev->state.conn));
2019 if (mdev->ov_start_sector == ~(sector_t)0 &&
2020 mdev->agreed_pro_version >= 90) {
2021 mdev->ov_start_sector = sector;
2022 mdev->ov_position = sector;
2023 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2024 dev_info(DEV, "Online Verify start sector: %llu\n",
2025 (unsigned long long)sector);
2027 e->w.cb = w_e_end_ov_req;
2028 fault_type = DRBD_FAULT_RS_RD;
2029 /* Eventually this should become asynchronous. Currently it
2030 * blocks the whole receiver just to delay the reading of a
2031 * resync data block.
2032 * the drbd_work_queue mechanism is made for this...
2034 if (!drbd_rs_begin_io(mdev, sector)) {
2035 /* we have been interrupted,
2036 * probably connection lost! */
2037 D_ASSERT(signal_pending(current));
2044 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2045 cmdname(h->command));
2046 fault_type = DRBD_FAULT_MAX;
2049 spin_lock_irq(&mdev->req_lock);
2050 list_add(&e->w.list, &mdev->read_ee);
2051 spin_unlock_irq(&mdev->req_lock);
2055 drbd_generic_make_request(mdev, fault_type, e->private_bio);
2056 maybe_kick_lo(mdev);
2063 drbd_free_ee(mdev, e);
2067 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2069 int self, peer, rv = -100;
2070 unsigned long ch_self, ch_peer;
2072 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2073 peer = mdev->p_uuid[UI_BITMAP] & 1;
2075 ch_peer = mdev->p_uuid[UI_SIZE];
2076 ch_self = mdev->comm_bm_set;
2078 switch (mdev->net_conf->after_sb_0p) {
2080 case ASB_DISCARD_SECONDARY:
2081 case ASB_CALL_HELPER:
2082 dev_err(DEV, "Configuration error.\n");
2084 case ASB_DISCONNECT:
2086 case ASB_DISCARD_YOUNGER_PRI:
2087 if (self == 0 && peer == 1) {
2091 if (self == 1 && peer == 0) {
2095 /* Else fall through to one of the other strategies... */
2096 case ASB_DISCARD_OLDER_PRI:
2097 if (self == 0 && peer == 1) {
2101 if (self == 1 && peer == 0) {
2105 /* Else fall through to one of the other strategies... */
2106 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2107 "Using discard-least-changes instead\n");
2108 case ASB_DISCARD_ZERO_CHG:
2109 if (ch_peer == 0 && ch_self == 0) {
2110 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2114 if (ch_peer == 0) { rv = 1; break; }
2115 if (ch_self == 0) { rv = -1; break; }
2117 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2119 case ASB_DISCARD_LEAST_CHG:
2120 if (ch_self < ch_peer)
2122 else if (ch_self > ch_peer)
2124 else /* ( ch_self == ch_peer ) */
2125 /* Well, then use something else. */
2126 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2129 case ASB_DISCARD_LOCAL:
2132 case ASB_DISCARD_REMOTE:
2139 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2141 int self, peer, hg, rv = -100;
2143 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2144 peer = mdev->p_uuid[UI_BITMAP] & 1;
2146 switch (mdev->net_conf->after_sb_1p) {
2147 case ASB_DISCARD_YOUNGER_PRI:
2148 case ASB_DISCARD_OLDER_PRI:
2149 case ASB_DISCARD_LEAST_CHG:
2150 case ASB_DISCARD_LOCAL:
2151 case ASB_DISCARD_REMOTE:
2152 dev_err(DEV, "Configuration error.\n");
2154 case ASB_DISCONNECT:
2157 hg = drbd_asb_recover_0p(mdev);
2158 if (hg == -1 && mdev->state.role == R_SECONDARY)
2160 if (hg == 1 && mdev->state.role == R_PRIMARY)
2164 rv = drbd_asb_recover_0p(mdev);
2166 case ASB_DISCARD_SECONDARY:
2167 return mdev->state.role == R_PRIMARY ? 1 : -1;
2168 case ASB_CALL_HELPER:
2169 hg = drbd_asb_recover_0p(mdev);
2170 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2171 self = drbd_set_role(mdev, R_SECONDARY, 0);
2172 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2173 * we might be here in C_WF_REPORT_PARAMS which is transient.
2174 * we do not need to wait for the after state change work either. */
2175 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2176 if (self != SS_SUCCESS) {
2177 drbd_khelper(mdev, "pri-lost-after-sb");
2179 dev_warn(DEV, "Successfully gave up primary role.\n");
2189 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2191 int self, peer, hg, rv = -100;
2193 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2194 peer = mdev->p_uuid[UI_BITMAP] & 1;
2196 switch (mdev->net_conf->after_sb_2p) {
2197 case ASB_DISCARD_YOUNGER_PRI:
2198 case ASB_DISCARD_OLDER_PRI:
2199 case ASB_DISCARD_LEAST_CHG:
2200 case ASB_DISCARD_LOCAL:
2201 case ASB_DISCARD_REMOTE:
2203 case ASB_DISCARD_SECONDARY:
2204 dev_err(DEV, "Configuration error.\n");
2207 rv = drbd_asb_recover_0p(mdev);
2209 case ASB_DISCONNECT:
2211 case ASB_CALL_HELPER:
2212 hg = drbd_asb_recover_0p(mdev);
2214 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2215 * we might be here in C_WF_REPORT_PARAMS which is transient.
2216 * we do not need to wait for the after state change work either. */
2217 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2218 if (self != SS_SUCCESS) {
2219 drbd_khelper(mdev, "pri-lost-after-sb");
2221 dev_warn(DEV, "Successfully gave up primary role.\n");
2231 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2232 u64 bits, u64 flags)
2235 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2238 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2240 (unsigned long long)uuid[UI_CURRENT],
2241 (unsigned long long)uuid[UI_BITMAP],
2242 (unsigned long long)uuid[UI_HISTORY_START],
2243 (unsigned long long)uuid[UI_HISTORY_END],
2244 (unsigned long long)bits,
2245 (unsigned long long)flags);
2249 100 after split brain try auto recover
2250 2 C_SYNC_SOURCE set BitMap
2251 1 C_SYNC_SOURCE use BitMap
2253 -1 C_SYNC_TARGET use BitMap
2254 -2 C_SYNC_TARGET set BitMap
2255 -100 after split brain, disconnect
2256 -1000 unrelated data
2258 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2263 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2264 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2267 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2271 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2272 peer != UUID_JUST_CREATED)
2276 if (self != UUID_JUST_CREATED &&
2277 (peer == UUID_JUST_CREATED || peer == (u64)0))
2281 int rct, dc; /* roles at crash time */
2283 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2285 if (mdev->agreed_pro_version < 91)
2288 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2289 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2290 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2291 drbd_uuid_set_bm(mdev, 0UL);
2293 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2294 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2297 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2304 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2306 if (mdev->agreed_pro_version < 91)
2309 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2310 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2311 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2313 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2314 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2315 mdev->p_uuid[UI_BITMAP] = 0UL;
2317 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2320 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2327 /* Common power [off|failure] */
2328 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2329 (mdev->p_uuid[UI_FLAGS] & 2);
2330 /* lowest bit is set when we were primary,
2331 * next bit (weight 2) is set when peer was primary */
2335 case 0: /* !self_pri && !peer_pri */ return 0;
2336 case 1: /* self_pri && !peer_pri */ return 1;
2337 case 2: /* !self_pri && peer_pri */ return -1;
2338 case 3: /* self_pri && peer_pri */
2339 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2345 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2350 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2352 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2353 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2355 /* The last P_SYNC_UUID did not get though. Undo the last start of
2356 resync as sync source modifications of the peer's UUIDs. */
2358 if (mdev->agreed_pro_version < 91)
2361 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2362 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2368 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2369 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2370 peer = mdev->p_uuid[i] & ~((u64)1);
2376 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2377 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2382 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2384 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2385 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2387 /* The last P_SYNC_UUID did not get though. Undo the last start of
2388 resync as sync source modifications of our UUIDs. */
2390 if (mdev->agreed_pro_version < 91)
2393 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2394 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2396 dev_info(DEV, "Undid last start of resync:\n");
2398 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2399 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2407 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2408 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2409 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2415 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2416 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2417 if (self == peer && self != ((u64)0))
2421 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2422 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2423 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2424 peer = mdev->p_uuid[j] & ~((u64)1);
2433 /* drbd_sync_handshake() returns the new conn state on success, or
2434 CONN_MASK (-1) on failure.
2436 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2437 enum drbd_disk_state peer_disk) __must_hold(local)
2440 enum drbd_conns rv = C_MASK;
2441 enum drbd_disk_state mydisk;
2443 mydisk = mdev->state.disk;
2444 if (mydisk == D_NEGOTIATING)
2445 mydisk = mdev->new_state_tmp.disk;
2447 dev_info(DEV, "drbd_sync_handshake:\n");
2448 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2449 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2450 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2452 hg = drbd_uuid_compare(mdev, &rule_nr);
2454 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2457 dev_alert(DEV, "Unrelated data, aborting!\n");
2461 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2465 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2466 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2467 int f = (hg == -100) || abs(hg) == 2;
2468 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2471 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2472 hg > 0 ? "source" : "target");
2475 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2476 int pcount = (mdev->state.role == R_PRIMARY)
2477 + (peer_role == R_PRIMARY);
2478 int forced = (hg == -100);
2482 hg = drbd_asb_recover_0p(mdev);
2485 hg = drbd_asb_recover_1p(mdev);
2488 hg = drbd_asb_recover_2p(mdev);
2491 if (abs(hg) < 100) {
2492 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2493 "automatically solved. Sync from %s node\n",
2494 pcount, (hg < 0) ? "peer" : "this");
2496 dev_warn(DEV, "Doing a full sync, since"
2497 " UUIDs where ambiguous.\n");
2504 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2506 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2510 dev_warn(DEV, "Split-Brain detected, manually solved. "
2511 "Sync from %s node\n",
2512 (hg < 0) ? "peer" : "this");
2516 /* FIXME this log message is not correct if we end up here
2517 * after an attempted attach on a diskless node.
2518 * We just refuse to attach -- well, we drop the "connection"
2519 * to that disk, in a way... */
2520 dev_alert(DEV, "Split-Brain detected, dropping connection!\n");
2521 drbd_khelper(mdev, "split-brain");
2525 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2526 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2530 if (hg < 0 && /* by intention we do not use mydisk here. */
2531 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2532 switch (mdev->net_conf->rr_conflict) {
2533 case ASB_CALL_HELPER:
2534 drbd_khelper(mdev, "pri-lost");
2536 case ASB_DISCONNECT:
2537 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2540 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2545 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2547 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2549 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2550 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2551 abs(hg) >= 2 ? "full" : "bit-map based");
2556 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2557 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2561 if (hg > 0) { /* become sync source. */
2563 } else if (hg < 0) { /* become sync target */
2567 if (drbd_bm_total_weight(mdev)) {
2568 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2569 drbd_bm_total_weight(mdev));
2576 /* returns 1 if invalid */
2577 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2579 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2580 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2581 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2584 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2585 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2586 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2589 /* everything else is valid if they are equal on both sides. */
2593 /* everything es is invalid. */
2597 static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2599 struct p_protocol *p = (struct p_protocol *)h;
2600 int header_size, data_size;
2601 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2602 int p_want_lose, p_two_primaries, cf;
2603 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2605 header_size = sizeof(*p) - sizeof(*h);
2606 data_size = h->length - header_size;
2608 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2611 p_proto = be32_to_cpu(p->protocol);
2612 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2613 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2614 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2615 p_two_primaries = be32_to_cpu(p->two_primaries);
2616 cf = be32_to_cpu(p->conn_flags);
2617 p_want_lose = cf & CF_WANT_LOSE;
2619 clear_bit(CONN_DRY_RUN, &mdev->flags);
2621 if (cf & CF_DRY_RUN)
2622 set_bit(CONN_DRY_RUN, &mdev->flags);
2624 if (p_proto != mdev->net_conf->wire_protocol) {
2625 dev_err(DEV, "incompatible communication protocols\n");
2629 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2630 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2634 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2635 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2639 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2640 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2644 if (p_want_lose && mdev->net_conf->want_lose) {
2645 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2649 if (p_two_primaries != mdev->net_conf->two_primaries) {
2650 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2654 if (mdev->agreed_pro_version >= 87) {
2655 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2657 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2660 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2661 if (strcmp(p_integrity_alg, my_alg)) {
2662 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2665 dev_info(DEV, "data-integrity-alg: %s\n",
2666 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2672 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2677 * input: alg name, feature name
2678 * return: NULL (alg name was "")
2679 * ERR_PTR(error) if something goes wrong
2680 * or the crypto hash ptr, if it worked out ok. */
2681 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2682 const char *alg, const char *name)
2684 struct crypto_hash *tfm;
2689 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2691 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2692 alg, name, PTR_ERR(tfm));
2695 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2696 crypto_free_hash(tfm);
2697 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2698 return ERR_PTR(-EINVAL);
2703 static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2706 struct p_rs_param_89 *p = (struct p_rs_param_89 *)h;
2707 unsigned int header_size, data_size, exp_max_sz;
2708 struct crypto_hash *verify_tfm = NULL;
2709 struct crypto_hash *csums_tfm = NULL;
2710 const int apv = mdev->agreed_pro_version;
2712 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2713 : apv == 88 ? sizeof(struct p_rs_param)
2715 : /* 89 */ sizeof(struct p_rs_param_89);
2717 if (h->length > exp_max_sz) {
2718 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2719 h->length, exp_max_sz);
2724 header_size = sizeof(struct p_rs_param) - sizeof(*h);
2725 data_size = h->length - header_size;
2726 } else /* apv >= 89 */ {
2727 header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
2728 data_size = h->length - header_size;
2729 D_ASSERT(data_size == 0);
2732 /* initialize verify_alg and csums_alg */
2733 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2735 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2738 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2742 if (data_size > SHARED_SECRET_MAX) {
2743 dev_err(DEV, "verify-alg too long, "
2744 "peer wants %u, accepting only %u byte\n",
2745 data_size, SHARED_SECRET_MAX);
2749 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2752 /* we expect NUL terminated string */
2753 /* but just in case someone tries to be evil */
2754 D_ASSERT(p->verify_alg[data_size-1] == 0);
2755 p->verify_alg[data_size-1] = 0;
2757 } else /* apv >= 89 */ {
2758 /* we still expect NUL terminated strings */
2759 /* but just in case someone tries to be evil */
2760 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2761 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2762 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2763 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2766 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2767 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2768 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2769 mdev->sync_conf.verify_alg, p->verify_alg);
2772 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2773 p->verify_alg, "verify-alg");
2774 if (IS_ERR(verify_tfm)) {
2780 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2781 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2782 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2783 mdev->sync_conf.csums_alg, p->csums_alg);
2786 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2787 p->csums_alg, "csums-alg");
2788 if (IS_ERR(csums_tfm)) {
2795 spin_lock(&mdev->peer_seq_lock);
2796 /* lock against drbd_nl_syncer_conf() */
2798 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2799 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2800 crypto_free_hash(mdev->verify_tfm);
2801 mdev->verify_tfm = verify_tfm;
2802 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2805 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2806 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2807 crypto_free_hash(mdev->csums_tfm);
2808 mdev->csums_tfm = csums_tfm;
2809 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2811 spin_unlock(&mdev->peer_seq_lock);
2816 /* just for completeness: actually not needed,
2817 * as this is not reached if csums_tfm was ok. */
2818 crypto_free_hash(csums_tfm);
2819 /* but free the verify_tfm again, if csums_tfm did not work out */
2820 crypto_free_hash(verify_tfm);
2821 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2825 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2827 /* sorry, we currently have no working implementation
2828 * of distributed TCQ */
2831 /* warn if the arguments differ by more than 12.5% */
2832 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2833 const char *s, sector_t a, sector_t b)
2836 if (a == 0 || b == 0)
2838 d = (a > b) ? (a - b) : (b - a);
2839 if (d > (a>>3) || d > (b>>3))
2840 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2841 (unsigned long long)a, (unsigned long long)b);
2844 static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2846 struct p_sizes *p = (struct p_sizes *)h;
2847 enum determine_dev_size dd = unchanged;
2848 unsigned int max_seg_s;
2849 sector_t p_size, p_usize, my_usize;
2850 int ldsc = 0; /* local disk size changed */
2851 enum drbd_conns nconn;
2853 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2854 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2857 p_size = be64_to_cpu(p->d_size);
2858 p_usize = be64_to_cpu(p->u_size);
2860 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2861 dev_err(DEV, "some backing storage is needed\n");
2862 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2866 /* just store the peer's disk size for now.
2867 * we still need to figure out whether we accept that. */
2868 mdev->p_size = p_size;
2870 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2871 if (get_ldev(mdev)) {
2872 warn_if_differ_considerably(mdev, "lower level device sizes",
2873 p_size, drbd_get_max_capacity(mdev->ldev));
2874 warn_if_differ_considerably(mdev, "user requested size",
2875 p_usize, mdev->ldev->dc.disk_size);
2877 /* if this is the first connect, or an otherwise expected
2878 * param exchange, choose the minimum */
2879 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2880 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2883 my_usize = mdev->ldev->dc.disk_size;
2885 if (mdev->ldev->dc.disk_size != p_usize) {
2886 mdev->ldev->dc.disk_size = p_usize;
2887 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2888 (unsigned long)mdev->ldev->dc.disk_size);
2891 /* Never shrink a device with usable data during connect.
2892 But allow online shrinking if we are connected. */
2893 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2894 drbd_get_capacity(mdev->this_bdev) &&
2895 mdev->state.disk >= D_OUTDATED &&
2896 mdev->state.conn < C_CONNECTED) {
2897 dev_err(DEV, "The peer's disk size is too small!\n");
2898 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2899 mdev->ldev->dc.disk_size = my_usize;
2907 if (get_ldev(mdev)) {
2908 dd = drbd_determin_dev_size(mdev, 0);
2910 if (dd == dev_size_error)
2914 /* I am diskless, need to accept the peer's size. */
2915 drbd_set_my_capacity(mdev, p_size);
2918 if (mdev->p_uuid && mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
2919 nconn = drbd_sync_handshake(mdev,
2920 mdev->state.peer, mdev->state.pdsk);
2923 if (nconn == C_MASK) {
2924 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2928 if (drbd_request_state(mdev, NS(conn, nconn)) < SS_SUCCESS) {
2929 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2934 if (get_ldev(mdev)) {
2935 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
2936 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2940 max_seg_s = be32_to_cpu(p->max_segment_size);
2941 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
2942 drbd_setup_queue_param(mdev, max_seg_s);
2944 drbd_setup_order_type(mdev, be32_to_cpu(p->queue_order_type));
2948 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
2949 if (be64_to_cpu(p->c_size) !=
2950 drbd_get_capacity(mdev->this_bdev) || ldsc) {
2951 /* we have different sizes, probably peer
2952 * needs to know my new size... */
2953 drbd_send_sizes(mdev, 0);
2955 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
2956 (dd == grew && mdev->state.conn == C_CONNECTED)) {
2957 if (mdev->state.pdsk >= D_INCONSISTENT &&
2958 mdev->state.disk >= D_INCONSISTENT)
2959 resync_after_online_grow(mdev);
2961 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
2968 static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
2970 struct p_uuids *p = (struct p_uuids *)h;
2974 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2975 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2978 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
2980 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
2981 p_uuid[i] = be64_to_cpu(p->uuid[i]);
2983 kfree(mdev->p_uuid);
2984 mdev->p_uuid = p_uuid;
2986 if (mdev->state.conn < C_CONNECTED &&
2987 mdev->state.disk < D_INCONSISTENT &&
2988 mdev->state.role == R_PRIMARY &&
2989 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
2990 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
2991 (unsigned long long)mdev->ed_uuid);
2992 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2996 if (get_ldev(mdev)) {
2997 int skip_initial_sync =
2998 mdev->state.conn == C_CONNECTED &&
2999 mdev->agreed_pro_version >= 90 &&
3000 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3001 (p_uuid[UI_FLAGS] & 8);
3002 if (skip_initial_sync) {
3003 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3004 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3005 "clear_n_write from receive_uuids");
3006 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3007 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3008 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3015 /* Before we test for the disk state, we should wait until an eventually
3016 ongoing cluster wide state change is finished. That is important if
3017 we are primary and are detaching from our disk. We need to see the
3018 new disk state... */
3019 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3020 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3021 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3027 * convert_state() - Converts the peer's view of the cluster state to our point of view
3028 * @ps: The state as seen by the peer.
3030 static union drbd_state convert_state(union drbd_state ps)
3032 union drbd_state ms;
3034 static enum drbd_conns c_tab[] = {
3035 [C_CONNECTED] = C_CONNECTED,
3037 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3038 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3039 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3040 [C_VERIFY_S] = C_VERIFY_T,
3046 ms.conn = c_tab[ps.conn];
3051 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3056 static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3058 struct p_req_state *p = (struct p_req_state *)h;
3059 union drbd_state mask, val;
3062 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3063 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3066 mask.i = be32_to_cpu(p->mask);
3067 val.i = be32_to_cpu(p->val);
3069 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3070 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3071 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3075 mask = convert_state(mask);
3076 val = convert_state(val);
3078 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3080 drbd_send_sr_reply(mdev, rv);
3086 static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3088 struct p_state *p = (struct p_state *)h;
3089 enum drbd_conns nconn, oconn;
3090 union drbd_state ns, peer_state;
3091 enum drbd_disk_state real_peer_disk;
3094 ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3097 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3100 peer_state.i = be32_to_cpu(p->state);
3102 real_peer_disk = peer_state.disk;
3103 if (peer_state.disk == D_NEGOTIATING) {
3104 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3105 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3108 spin_lock_irq(&mdev->req_lock);
3110 oconn = nconn = mdev->state.conn;
3111 spin_unlock_irq(&mdev->req_lock);
3113 if (nconn == C_WF_REPORT_PARAMS)
3114 nconn = C_CONNECTED;
3116 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3117 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3118 int cr; /* consider resync */
3120 /* if we established a new connection */
3121 cr = (oconn < C_CONNECTED);
3122 /* if we had an established connection
3123 * and one of the nodes newly attaches a disk */
3124 cr |= (oconn == C_CONNECTED &&
3125 (peer_state.disk == D_NEGOTIATING ||
3126 mdev->state.disk == D_NEGOTIATING));
3127 /* if we have both been inconsistent, and the peer has been
3128 * forced to be UpToDate with --overwrite-data */
3129 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3130 /* if we had been plain connected, and the admin requested to
3131 * start a sync by "invalidate" or "invalidate-remote" */
3132 cr |= (oconn == C_CONNECTED &&
3133 (peer_state.conn >= C_STARTING_SYNC_S &&
3134 peer_state.conn <= C_WF_BITMAP_T));
3137 nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3140 if (nconn == C_MASK) {
3141 nconn = C_CONNECTED;
3142 if (mdev->state.disk == D_NEGOTIATING) {
3143 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3144 } else if (peer_state.disk == D_NEGOTIATING) {
3145 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3146 peer_state.disk = D_DISKLESS;
3147 real_peer_disk = D_DISKLESS;
3149 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3151 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3152 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3158 spin_lock_irq(&mdev->req_lock);
3159 if (mdev->state.conn != oconn)
3161 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3162 ns.i = mdev->state.i;
3164 ns.peer = peer_state.role;
3165 ns.pdsk = real_peer_disk;
3166 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3167 if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3168 ns.disk = mdev->new_state_tmp.disk;
3170 rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL);
3172 spin_unlock_irq(&mdev->req_lock);
3174 if (rv < SS_SUCCESS) {
3175 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3179 if (oconn > C_WF_REPORT_PARAMS) {
3180 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3181 peer_state.disk != D_NEGOTIATING ) {
3182 /* we want resync, peer has not yet decided to sync... */
3183 /* Nowadays only used when forcing a node into primary role and
3184 setting its disk to UpToDate with that */
3185 drbd_send_uuids(mdev);
3186 drbd_send_state(mdev);
3190 mdev->net_conf->want_lose = 0;
3192 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3197 static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3199 struct p_rs_uuid *p = (struct p_rs_uuid *)h;
3201 wait_event(mdev->misc_wait,
3202 mdev->state.conn == C_WF_SYNC_UUID ||
3203 mdev->state.conn < C_CONNECTED ||
3204 mdev->state.disk < D_NEGOTIATING);
3206 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3208 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3209 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3212 /* Here the _drbd_uuid_ functions are right, current should
3213 _not_ be rotated into the history */
3214 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3215 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3216 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3218 drbd_start_resync(mdev, C_SYNC_TARGET);
3222 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3227 enum receive_bitmap_ret { OK, DONE, FAILED };
3229 static enum receive_bitmap_ret
3230 receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
3231 unsigned long *buffer, struct bm_xfer_ctx *c)
3233 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3234 unsigned want = num_words * sizeof(long);
3236 if (want != h->length) {
3237 dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length);
3242 if (drbd_recv(mdev, buffer, want) != want)
3245 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3247 c->word_offset += num_words;
3248 c->bit_offset = c->word_offset * BITS_PER_LONG;
3249 if (c->bit_offset > c->bm_bits)
3250 c->bit_offset = c->bm_bits;
3255 static enum receive_bitmap_ret
3256 recv_bm_rle_bits(struct drbd_conf *mdev,
3257 struct p_compressed_bm *p,
3258 struct bm_xfer_ctx *c)
3260 struct bitstream bs;
3264 unsigned long s = c->bit_offset;
3266 int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3267 int toggle = DCBP_get_start(p);
3271 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3273 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3277 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3278 bits = vli_decode_bits(&rl, look_ahead);
3284 if (e >= c->bm_bits) {
3285 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3288 _drbd_bm_set_bits(mdev, s, e);
3292 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3293 have, bits, look_ahead,
3294 (unsigned int)(bs.cur.b - p->code),
3295 (unsigned int)bs.buf_len);
3298 look_ahead >>= bits;
3301 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3304 look_ahead |= tmp << have;
3309 bm_xfer_ctx_bit_to_word_offset(c);
3311 return (s == c->bm_bits) ? DONE : OK;
3314 static enum receive_bitmap_ret
3315 decode_bitmap_c(struct drbd_conf *mdev,
3316 struct p_compressed_bm *p,
3317 struct bm_xfer_ctx *c)
3319 if (DCBP_get_code(p) == RLE_VLI_Bits)
3320 return recv_bm_rle_bits(mdev, p, c);
3322 /* other variants had been implemented for evaluation,
3323 * but have been dropped as this one turned out to be "best"
3324 * during all our tests. */
3326 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3327 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3331 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3332 const char *direction, struct bm_xfer_ctx *c)
3334 /* what would it take to transfer it "plaintext" */
3335 unsigned plain = sizeof(struct p_header) *
3336 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3337 + c->bm_words * sizeof(long);
3338 unsigned total = c->bytes[0] + c->bytes[1];
3341 /* total can not be zero. but just in case: */
3345 /* don't report if not compressed */
3349 /* total < plain. check for overflow, still */
3350 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3351 : (1000 * total / plain);
3357 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3358 "total %u; compression: %u.%u%%\n",
3360 c->bytes[1], c->packets[1],
3361 c->bytes[0], c->packets[0],
3362 total, r/10, r % 10);
3365 /* Since we are processing the bitfield from lower addresses to higher,
3366 it does not matter if the process it in 32 bit chunks or 64 bit
3367 chunks as long as it is little endian. (Understand it as byte stream,
3368 beginning with the lowest byte...) If we would use big endian
3369 we would need to process it from the highest address to the lowest,
3370 in order to be agnostic to the 32 vs 64 bits issue.
3372 returns 0 on failure, 1 if we successfully received it. */
3373 static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3375 struct bm_xfer_ctx c;
3377 enum receive_bitmap_ret ret;
3380 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3382 drbd_bm_lock(mdev, "receive bitmap");
3384 /* maybe we should use some per thread scratch page,
3385 * and allocate that during initial device creation? */
3386 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3388 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3392 c = (struct bm_xfer_ctx) {
3393 .bm_bits = drbd_bm_bits(mdev),
3394 .bm_words = drbd_bm_words(mdev),
3398 if (h->command == P_BITMAP) {
3399 ret = receive_bitmap_plain(mdev, h, buffer, &c);
3400 } else if (h->command == P_COMPRESSED_BITMAP) {
3401 /* MAYBE: sanity check that we speak proto >= 90,
3402 * and the feature is enabled! */
3403 struct p_compressed_bm *p;
3405 if (h->length > BM_PACKET_PAYLOAD_BYTES) {
3406 dev_err(DEV, "ReportCBitmap packet too large\n");
3409 /* use the page buff */
3411 memcpy(p, h, sizeof(*h));
3412 if (drbd_recv(mdev, p->head.payload, h->length) != h->length)
3414 if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3415 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3418 ret = decode_bitmap_c(mdev, p, &c);
3420 dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command);
3424 c.packets[h->command == P_BITMAP]++;
3425 c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
3430 if (!drbd_recv_header(mdev, h))
3432 } while (ret == OK);
3436 INFO_bm_xfer_stats(mdev, "receive", &c);
3438 if (mdev->state.conn == C_WF_BITMAP_T) {
3439 ok = !drbd_send_bitmap(mdev);
3442 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3443 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3444 D_ASSERT(ok == SS_SUCCESS);
3445 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3446 /* admin may have requested C_DISCONNECTING,
3447 * other threads may have noticed network errors */
3448 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3449 drbd_conn_str(mdev->state.conn));
3454 drbd_bm_unlock(mdev);
3455 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3456 drbd_start_resync(mdev, C_SYNC_SOURCE);
3457 free_page((unsigned long) buffer);
3461 static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
3463 /* TODO zero copy sink :) */
3464 static char sink[128];
3467 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3468 h->command, h->length);
3472 want = min_t(int, size, sizeof(sink));
3473 r = drbd_recv(mdev, sink, want);
3474 ERR_IF(r <= 0) break;
3480 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3482 if (mdev->state.disk >= D_INCONSISTENT)
3485 /* Make sure we've acked all the TCP data associated
3486 * with the data requests being unplugged */
3487 drbd_tcp_quickack(mdev->data.socket);
3492 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
3494 static drbd_cmd_handler_f drbd_default_handler[] = {
3495 [P_DATA] = receive_Data,
3496 [P_DATA_REPLY] = receive_DataReply,
3497 [P_RS_DATA_REPLY] = receive_RSDataReply,
3498 [P_BARRIER] = receive_Barrier,
3499 [P_BITMAP] = receive_bitmap,
3500 [P_COMPRESSED_BITMAP] = receive_bitmap,
3501 [P_UNPLUG_REMOTE] = receive_UnplugRemote,
3502 [P_DATA_REQUEST] = receive_DataRequest,
3503 [P_RS_DATA_REQUEST] = receive_DataRequest,
3504 [P_SYNC_PARAM] = receive_SyncParam,
3505 [P_SYNC_PARAM89] = receive_SyncParam,
3506 [P_PROTOCOL] = receive_protocol,
3507 [P_UUIDS] = receive_uuids,
3508 [P_SIZES] = receive_sizes,
3509 [P_STATE] = receive_state,
3510 [P_STATE_CHG_REQ] = receive_req_state,
3511 [P_SYNC_UUID] = receive_sync_uuid,
3512 [P_OV_REQUEST] = receive_DataRequest,
3513 [P_OV_REPLY] = receive_DataRequest,
3514 [P_CSUM_RS_REQUEST] = receive_DataRequest,
3515 /* anything missing from this table is in
3516 * the asender_tbl, see get_asender_cmd */
3520 static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
3521 static drbd_cmd_handler_f *drbd_opt_cmd_handler;
3523 static void drbdd(struct drbd_conf *mdev)
3525 drbd_cmd_handler_f handler;
3526 struct p_header *header = &mdev->data.rbuf.header;
3528 while (get_t_state(&mdev->receiver) == Running) {
3529 drbd_thread_current_set_cpu(mdev);
3530 if (!drbd_recv_header(mdev, header)) {
3531 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3535 if (header->command < P_MAX_CMD)
3536 handler = drbd_cmd_handler[header->command];
3537 else if (P_MAY_IGNORE < header->command
3538 && header->command < P_MAX_OPT_CMD)
3539 handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3540 else if (header->command > P_MAX_OPT_CMD)
3541 handler = receive_skip;
3545 if (unlikely(!handler)) {
3546 dev_err(DEV, "unknown packet type %d, l: %d!\n",
3547 header->command, header->length);
3548 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3551 if (unlikely(!handler(mdev, header))) {
3552 dev_err(DEV, "error receiving %s, l: %d!\n",
3553 cmdname(header->command), header->length);
3554 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3560 static void drbd_fail_pending_reads(struct drbd_conf *mdev)
3562 struct hlist_head *slot;
3563 struct hlist_node *pos;
3564 struct hlist_node *tmp;
3565 struct drbd_request *req;
3569 * Application READ requests
3571 spin_lock_irq(&mdev->req_lock);
3572 for (i = 0; i < APP_R_HSIZE; i++) {
3573 slot = mdev->app_reads_hash+i;
3574 hlist_for_each_entry_safe(req, pos, tmp, slot, colision) {
3575 /* it may (but should not any longer!)
3576 * be on the work queue; if that assert triggers,
3577 * we need to also grab the
3578 * spin_lock_irq(&mdev->data.work.q_lock);
3579 * and list_del_init here. */
3580 D_ASSERT(list_empty(&req->w.list));
3581 /* It would be nice to complete outside of spinlock.
3582 * But this is easier for now. */
3583 _req_mod(req, connection_lost_while_pending);
3586 for (i = 0; i < APP_R_HSIZE; i++)
3587 if (!hlist_empty(mdev->app_reads_hash+i))
3588 dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: "
3589 "%p, should be NULL\n", i, mdev->app_reads_hash[i].first);
3591 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
3592 spin_unlock_irq(&mdev->req_lock);
3595 void drbd_flush_workqueue(struct drbd_conf *mdev)
3597 struct drbd_wq_barrier barr;
3599 barr.w.cb = w_prev_work_done;
3600 init_completion(&barr.done);
3601 drbd_queue_work(&mdev->data.work, &barr.w);
3602 wait_for_completion(&barr.done);
3605 static void drbd_disconnect(struct drbd_conf *mdev)
3607 enum drbd_fencing_p fp;
3608 union drbd_state os, ns;
3609 int rv = SS_UNKNOWN_ERROR;
3612 if (mdev->state.conn == C_STANDALONE)
3614 if (mdev->state.conn >= C_WF_CONNECTION)
3615 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3616 drbd_conn_str(mdev->state.conn));
3618 /* asender does not clean up anything. it must not interfere, either */
3619 drbd_thread_stop(&mdev->asender);
3620 drbd_free_sock(mdev);
3622 spin_lock_irq(&mdev->req_lock);
3623 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3624 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3625 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3626 spin_unlock_irq(&mdev->req_lock);
3628 /* We do not have data structures that would allow us to
3629 * get the rs_pending_cnt down to 0 again.
3630 * * On C_SYNC_TARGET we do not have any data structures describing
3631 * the pending RSDataRequest's we have sent.
3632 * * On C_SYNC_SOURCE there is no data structure that tracks
3633 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3634 * And no, it is not the sum of the reference counts in the
3635 * resync_LRU. The resync_LRU tracks the whole operation including
3636 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3638 drbd_rs_cancel_all(mdev);
3640 mdev->rs_failed = 0;
3641 atomic_set(&mdev->rs_pending_cnt, 0);
3642 wake_up(&mdev->misc_wait);
3644 /* make sure syncer is stopped and w_resume_next_sg queued */
3645 del_timer_sync(&mdev->resync_timer);
3646 set_bit(STOP_SYNC_TIMER, &mdev->flags);
3647 resync_timer_fn((unsigned long)mdev);
3649 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3650 * w_make_resync_request etc. which may still be on the worker queue
3651 * to be "canceled" */
3652 drbd_flush_workqueue(mdev);
3654 /* This also does reclaim_net_ee(). If we do this too early, we might
3655 * miss some resync ee and pages.*/
3656 drbd_process_done_ee(mdev);
3658 kfree(mdev->p_uuid);
3659 mdev->p_uuid = NULL;
3661 if (!mdev->state.susp)
3664 drbd_fail_pending_reads(mdev);
3666 dev_info(DEV, "Connection closed\n");
3671 if (get_ldev(mdev)) {
3672 fp = mdev->ldev->dc.fencing;
3676 if (mdev->state.role == R_PRIMARY) {
3677 if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) {
3678 enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
3679 drbd_request_state(mdev, NS(pdsk, nps));
3683 spin_lock_irq(&mdev->req_lock);
3685 if (os.conn >= C_UNCONNECTED) {
3686 /* Do not restart in case we are C_DISCONNECTING */
3688 ns.conn = C_UNCONNECTED;
3689 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3691 spin_unlock_irq(&mdev->req_lock);
3693 if (os.conn == C_DISCONNECTING) {
3694 struct hlist_head *h;
3695 wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
3697 /* we must not free the tl_hash
3698 * while application io is still on the fly */
3699 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0);
3701 spin_lock_irq(&mdev->req_lock);
3703 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3705 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3706 (int)(h - mdev->ee_hash), h->first);
3707 kfree(mdev->ee_hash);
3708 mdev->ee_hash = NULL;
3709 mdev->ee_hash_s = 0;
3712 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3714 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3715 (int)(h - mdev->tl_hash), h->first);
3716 kfree(mdev->tl_hash);
3717 mdev->tl_hash = NULL;
3718 mdev->tl_hash_s = 0;
3719 spin_unlock_irq(&mdev->req_lock);
3721 crypto_free_hash(mdev->cram_hmac_tfm);
3722 mdev->cram_hmac_tfm = NULL;
3724 kfree(mdev->net_conf);
3725 mdev->net_conf = NULL;
3726 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3729 /* tcp_close and release of sendpage pages can be deferred. I don't
3730 * want to use SO_LINGER, because apparently it can be deferred for
3731 * more than 20 seconds (longest time I checked).
3733 * Actually we don't care for exactly when the network stack does its
3734 * put_page(), but release our reference on these pages right here.
3736 i = drbd_release_ee(mdev, &mdev->net_ee);
3738 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3739 i = atomic_read(&mdev->pp_in_use);
3741 dev_info(DEV, "pp_in_use = %u, expected 0\n", i);
3743 D_ASSERT(list_empty(&mdev->read_ee));
3744 D_ASSERT(list_empty(&mdev->active_ee));
3745 D_ASSERT(list_empty(&mdev->sync_ee));
3746 D_ASSERT(list_empty(&mdev->done_ee));
3748 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3749 atomic_set(&mdev->current_epoch->epoch_size, 0);
3750 D_ASSERT(list_empty(&mdev->current_epoch->list));
3754 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3755 * we can agree on is stored in agreed_pro_version.
3757 * feature flags and the reserved array should be enough room for future
3758 * enhancements of the handshake protocol, and possible plugins...
3760 * for now, they are expected to be zero, but ignored.
3762 static int drbd_send_handshake(struct drbd_conf *mdev)
3764 /* ASSERT current == mdev->receiver ... */
3765 struct p_handshake *p = &mdev->data.sbuf.handshake;
3768 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3769 dev_err(DEV, "interrupted during initial handshake\n");
3770 return 0; /* interrupted. not ok. */
3773 if (mdev->data.socket == NULL) {
3774 mutex_unlock(&mdev->data.mutex);
3778 memset(p, 0, sizeof(*p));
3779 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3780 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3781 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3782 (struct p_header *)p, sizeof(*p), 0 );
3783 mutex_unlock(&mdev->data.mutex);
3789 * 1 yes, we have a valid connection
3790 * 0 oops, did not work out, please try again
3791 * -1 peer talks different language,
3792 * no point in trying again, please go standalone.
3794 static int drbd_do_handshake(struct drbd_conf *mdev)
3796 /* ASSERT current == mdev->receiver ... */
3797 struct p_handshake *p = &mdev->data.rbuf.handshake;
3798 const int expect = sizeof(struct p_handshake)
3799 -sizeof(struct p_header);
3802 rv = drbd_send_handshake(mdev);
3806 rv = drbd_recv_header(mdev, &p->head);
3810 if (p->head.command != P_HAND_SHAKE) {
3811 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3812 cmdname(p->head.command), p->head.command);
3816 if (p->head.length != expect) {
3817 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3818 expect, p->head.length);
3822 rv = drbd_recv(mdev, &p->head.payload, expect);
3825 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3829 p->protocol_min = be32_to_cpu(p->protocol_min);
3830 p->protocol_max = be32_to_cpu(p->protocol_max);
3831 if (p->protocol_max == 0)
3832 p->protocol_max = p->protocol_min;
3834 if (PRO_VERSION_MAX < p->protocol_min ||
3835 PRO_VERSION_MIN > p->protocol_max)
3838 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3840 dev_info(DEV, "Handshake successful: "
3841 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3846 dev_err(DEV, "incompatible DRBD dialects: "
3847 "I support %d-%d, peer supports %d-%d\n",
3848 PRO_VERSION_MIN, PRO_VERSION_MAX,
3849 p->protocol_min, p->protocol_max);
3853 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3854 static int drbd_do_auth(struct drbd_conf *mdev)
3856 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3857 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3861 #define CHALLENGE_LEN 64
3865 0 - failed, try again (network error),
3866 -1 - auth failed, don't try again.
3869 static int drbd_do_auth(struct drbd_conf *mdev)
3871 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3872 struct scatterlist sg;
3873 char *response = NULL;
3874 char *right_response = NULL;
3875 char *peers_ch = NULL;
3877 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3878 unsigned int resp_size;
3879 struct hash_desc desc;
3882 desc.tfm = mdev->cram_hmac_tfm;
3885 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3886 (u8 *)mdev->net_conf->shared_secret, key_len);
3888 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3893 get_random_bytes(my_challenge, CHALLENGE_LEN);
3895 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
3899 rv = drbd_recv_header(mdev, &p);
3903 if (p.command != P_AUTH_CHALLENGE) {
3904 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3905 cmdname(p.command), p.command);
3910 if (p.length > CHALLENGE_LEN*2) {
3911 dev_err(DEV, "expected AuthChallenge payload too big.\n");
3916 peers_ch = kmalloc(p.length, GFP_NOIO);
3917 if (peers_ch == NULL) {
3918 dev_err(DEV, "kmalloc of peers_ch failed\n");
3923 rv = drbd_recv(mdev, peers_ch, p.length);
3925 if (rv != p.length) {
3926 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
3931 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
3932 response = kmalloc(resp_size, GFP_NOIO);
3933 if (response == NULL) {
3934 dev_err(DEV, "kmalloc of response failed\n");
3939 sg_init_table(&sg, 1);
3940 sg_set_buf(&sg, peers_ch, p.length);
3942 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
3944 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3949 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
3953 rv = drbd_recv_header(mdev, &p);
3957 if (p.command != P_AUTH_RESPONSE) {
3958 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
3959 cmdname(p.command), p.command);
3964 if (p.length != resp_size) {
3965 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
3970 rv = drbd_recv(mdev, response , resp_size);
3972 if (rv != resp_size) {
3973 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
3978 right_response = kmalloc(resp_size, GFP_NOIO);
3979 if (right_response == NULL) {
3980 dev_err(DEV, "kmalloc of right_response failed\n");
3985 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
3987 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
3989 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
3994 rv = !memcmp(response, right_response, resp_size);
3997 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
3998 resp_size, mdev->net_conf->cram_hmac_alg);
4005 kfree(right_response);
4011 int drbdd_init(struct drbd_thread *thi)
4013 struct drbd_conf *mdev = thi->mdev;
4014 unsigned int minor = mdev_to_minor(mdev);
4017 sprintf(current->comm, "drbd%d_receiver", minor);
4019 dev_info(DEV, "receiver (re)started\n");
4022 h = drbd_connect(mdev);
4024 drbd_disconnect(mdev);
4025 __set_current_state(TASK_INTERRUPTIBLE);
4026 schedule_timeout(HZ);
4029 dev_warn(DEV, "Discarding network configuration.\n");
4030 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4035 if (get_net_conf(mdev)) {
4041 drbd_disconnect(mdev);
4043 dev_info(DEV, "receiver terminated\n");
4047 /* ********* acknowledge sender ******** */
4049 static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4051 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4053 int retcode = be32_to_cpu(p->retcode);
4055 if (retcode >= SS_SUCCESS) {
4056 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4058 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4059 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4060 drbd_set_st_err_str(retcode), retcode);
4062 wake_up(&mdev->state_wait);
4067 static int got_Ping(struct drbd_conf *mdev, struct p_header *h)
4069 return drbd_send_ping_ack(mdev);
4073 static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4075 /* restore idle timeout */
4076 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4077 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4078 wake_up(&mdev->misc_wait);
4083 static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4085 struct p_block_ack *p = (struct p_block_ack *)h;
4086 sector_t sector = be64_to_cpu(p->sector);
4087 int blksize = be32_to_cpu(p->blksize);
4089 D_ASSERT(mdev->agreed_pro_version >= 89);
4091 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4093 drbd_rs_complete_io(mdev, sector);
4094 drbd_set_in_sync(mdev, sector, blksize);
4095 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4096 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4097 dec_rs_pending(mdev);
4102 /* when we receive the ACK for a write request,
4103 * verify that we actually know about it */
4104 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4105 u64 id, sector_t sector)
4107 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4108 struct hlist_node *n;
4109 struct drbd_request *req;
4111 hlist_for_each_entry(req, n, slot, colision) {
4112 if ((unsigned long)req == (unsigned long)id) {
4113 if (req->sector != sector) {
4114 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4115 "wrong sector (%llus versus %llus)\n", req,
4116 (unsigned long long)req->sector,
4117 (unsigned long long)sector);
4123 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4124 (void *)(unsigned long)id, (unsigned long long)sector);
4128 typedef struct drbd_request *(req_validator_fn)
4129 (struct drbd_conf *mdev, u64 id, sector_t sector);
4131 static int validate_req_change_req_state(struct drbd_conf *mdev,
4132 u64 id, sector_t sector, req_validator_fn validator,
4133 const char *func, enum drbd_req_event what)
4135 struct drbd_request *req;
4136 struct bio_and_error m;
4138 spin_lock_irq(&mdev->req_lock);
4139 req = validator(mdev, id, sector);
4140 if (unlikely(!req)) {
4141 spin_unlock_irq(&mdev->req_lock);
4142 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4145 __req_mod(req, what, &m);
4146 spin_unlock_irq(&mdev->req_lock);
4149 complete_master_bio(mdev, &m);
4153 static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4155 struct p_block_ack *p = (struct p_block_ack *)h;
4156 sector_t sector = be64_to_cpu(p->sector);
4157 int blksize = be32_to_cpu(p->blksize);
4158 enum drbd_req_event what;
4160 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4162 if (is_syncer_block_id(p->block_id)) {
4163 drbd_set_in_sync(mdev, sector, blksize);
4164 dec_rs_pending(mdev);
4167 switch (be16_to_cpu(h->command)) {
4168 case P_RS_WRITE_ACK:
4169 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4170 what = write_acked_by_peer_and_sis;
4173 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4174 what = write_acked_by_peer;
4177 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4178 what = recv_acked_by_peer;
4181 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4182 what = conflict_discarded_by_peer;
4189 return validate_req_change_req_state(mdev, p->block_id, sector,
4190 _ack_id_to_req, __func__ , what);
4193 static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4195 struct p_block_ack *p = (struct p_block_ack *)h;
4196 sector_t sector = be64_to_cpu(p->sector);
4198 if (__ratelimit(&drbd_ratelimit_state))
4199 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4201 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4203 if (is_syncer_block_id(p->block_id)) {
4204 int size = be32_to_cpu(p->blksize);
4205 dec_rs_pending(mdev);
4206 drbd_rs_failed_io(mdev, sector, size);
4209 return validate_req_change_req_state(mdev, p->block_id, sector,
4210 _ack_id_to_req, __func__ , neg_acked);
4213 static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4215 struct p_block_ack *p = (struct p_block_ack *)h;
4216 sector_t sector = be64_to_cpu(p->sector);
4218 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4219 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4220 (unsigned long long)sector, be32_to_cpu(p->blksize));
4222 return validate_req_change_req_state(mdev, p->block_id, sector,
4223 _ar_id_to_req, __func__ , neg_acked);
4226 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4230 struct p_block_ack *p = (struct p_block_ack *)h;
4232 sector = be64_to_cpu(p->sector);
4233 size = be32_to_cpu(p->blksize);
4234 D_ASSERT(p->block_id == ID_SYNCER);
4236 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4238 dec_rs_pending(mdev);
4240 if (get_ldev_if_state(mdev, D_FAILED)) {
4241 drbd_rs_complete_io(mdev, sector);
4242 drbd_rs_failed_io(mdev, sector, size);
4249 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4251 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4253 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4258 static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4260 struct p_block_ack *p = (struct p_block_ack *)h;
4261 struct drbd_work *w;
4265 sector = be64_to_cpu(p->sector);
4266 size = be32_to_cpu(p->blksize);
4268 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4270 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4271 drbd_ov_oos_found(mdev, sector, size);
4275 drbd_rs_complete_io(mdev, sector);
4276 dec_rs_pending(mdev);
4278 if (--mdev->ov_left == 0) {
4279 w = kmalloc(sizeof(*w), GFP_NOIO);
4281 w->cb = w_ov_finished;
4282 drbd_queue_work_front(&mdev->data.work, w);
4284 dev_err(DEV, "kmalloc(w) failed.");
4286 drbd_resync_finished(mdev);
4292 struct asender_cmd {
4294 int (*process)(struct drbd_conf *mdev, struct p_header *h);
4297 static struct asender_cmd *get_asender_cmd(int cmd)
4299 static struct asender_cmd asender_tbl[] = {
4300 /* anything missing from this table is in
4301 * the drbd_cmd_handler (drbd_default_handler) table,
4302 * see the beginning of drbdd() */
4303 [P_PING] = { sizeof(struct p_header), got_Ping },
4304 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
4305 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4306 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4307 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4308 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4309 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4310 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4311 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4312 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4313 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4314 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4315 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4316 [P_MAX_CMD] = { 0, NULL },
4318 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4320 return &asender_tbl[cmd];
4323 int drbd_asender(struct drbd_thread *thi)
4325 struct drbd_conf *mdev = thi->mdev;
4326 struct p_header *h = &mdev->meta.rbuf.header;
4327 struct asender_cmd *cmd = NULL;
4332 int expect = sizeof(struct p_header);
4335 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4337 current->policy = SCHED_RR; /* Make this a realtime task! */
4338 current->rt_priority = 2; /* more important than all other tasks */
4340 while (get_t_state(thi) == Running) {
4341 drbd_thread_current_set_cpu(mdev);
4342 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4343 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4344 mdev->meta.socket->sk->sk_rcvtimeo =
4345 mdev->net_conf->ping_timeo*HZ/10;
4348 /* conditionally cork;
4349 * it may hurt latency if we cork without much to send */
4350 if (!mdev->net_conf->no_cork &&
4351 3 < atomic_read(&mdev->unacked_cnt))
4352 drbd_tcp_cork(mdev->meta.socket);
4354 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4355 flush_signals(current);
4356 if (!drbd_process_done_ee(mdev)) {
4357 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4360 /* to avoid race with newly queued ACKs */
4361 set_bit(SIGNAL_ASENDER, &mdev->flags);
4362 spin_lock_irq(&mdev->req_lock);
4363 empty = list_empty(&mdev->done_ee);
4364 spin_unlock_irq(&mdev->req_lock);
4365 /* new ack may have been queued right here,
4366 * but then there is also a signal pending,
4367 * and we start over... */
4371 /* but unconditionally uncork unless disabled */
4372 if (!mdev->net_conf->no_cork)
4373 drbd_tcp_uncork(mdev->meta.socket);
4375 /* short circuit, recv_msg would return EINTR anyways. */
4376 if (signal_pending(current))
4379 rv = drbd_recv_short(mdev, mdev->meta.socket,
4380 buf, expect-received, 0);
4381 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4383 flush_signals(current);
4386 * -EINTR (on meta) we got a signal
4387 * -EAGAIN (on meta) rcvtimeo expired
4388 * -ECONNRESET other side closed the connection
4389 * -ERESTARTSYS (on data) we got a signal
4390 * rv < 0 other than above: unexpected error!
4391 * rv == expected: full header or command
4392 * rv < expected: "woken" by signal during receive
4393 * rv == 0 : "connection shut down by peer"
4395 if (likely(rv > 0)) {
4398 } else if (rv == 0) {
4399 dev_err(DEV, "meta connection shut down by peer.\n");
4401 } else if (rv == -EAGAIN) {
4402 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4403 mdev->net_conf->ping_timeo*HZ/10) {
4404 dev_err(DEV, "PingAck did not arrive in time.\n");
4407 set_bit(SEND_PING, &mdev->flags);
4409 } else if (rv == -EINTR) {
4412 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4416 if (received == expect && cmd == NULL) {
4417 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4418 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4419 (long)be32_to_cpu(h->magic),
4420 h->command, h->length);
4423 cmd = get_asender_cmd(be16_to_cpu(h->command));
4424 len = be16_to_cpu(h->length);
4425 if (unlikely(cmd == NULL)) {
4426 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4427 (long)be32_to_cpu(h->magic),
4428 h->command, h->length);
4431 expect = cmd->pkt_size;
4432 ERR_IF(len != expect-sizeof(struct p_header))
4435 if (received == expect) {
4436 D_ASSERT(cmd != NULL);
4437 if (!cmd->process(mdev, h))
4442 expect = sizeof(struct p_header);
4449 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4453 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4455 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4457 D_ASSERT(mdev->state.conn < C_CONNECTED);
4458 dev_info(DEV, "asender terminated\n");