4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(struct drbd_conf *mdev);
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
79 /* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
83 static struct page *page_chain_del(struct page **head, int n)
97 tmp = page_chain_next(page);
99 break; /* found sufficient pages */
101 /* insufficient pages, don't use any of them. */
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
114 /* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
121 while ((tmp = page_chain_next(page)))
128 static int page_chain_free(struct page *page)
132 page_chain_for_each_safe(page, tmp) {
139 static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
156 struct page *page = NULL;
157 struct page *tmp = NULL;
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
162 if (drbd_pp_vacant >= number) {
163 spin_lock(&drbd_pp_lock);
164 page = page_chain_del(&drbd_pp_pool, number);
166 drbd_pp_vacant -= number;
167 spin_unlock(&drbd_pp_lock);
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
179 set_page_private(tmp, (unsigned long)page);
186 /* Not enough pages immediately available this time.
187 * No need to jump around here, drbd_alloc_pages will retry this
188 * function "soon". */
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
194 spin_unlock(&drbd_pp_lock);
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
202 struct drbd_peer_request *peer_req;
203 struct list_head *le, *tle;
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
210 list_for_each_safe(le, tle, &mdev->net_ee) {
211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
212 if (drbd_peer_req_has_active_page(peer_req))
214 list_move(le, to_be_freed);
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
220 LIST_HEAD(reclaimed);
221 struct drbd_peer_request *peer_req, *t;
223 spin_lock_irq(&mdev->tconn->req_lock);
224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225 spin_unlock_irq(&mdev->tconn->req_lock);
227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228 drbd_free_net_peer_req(mdev, peer_req);
232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233 * @mdev: DRBD device.
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
241 * Returns a page chain linked via page->private.
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
246 struct page *page = NULL;
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
258 if (atomic_read(&mdev->pp_in_use) < mxb)
259 page = __drbd_alloc_pages(mdev, number);
261 while (page == NULL) {
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
264 drbd_kick_lo_and_reclaim_net(mdev);
266 if (atomic_read(&mdev->pp_in_use) < mxb) {
267 page = __drbd_alloc_pages(mdev, number);
275 if (signal_pending(current)) {
276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
282 finish_wait(&drbd_pp_wait, &wait);
285 atomic_add(number, &mdev->pp_in_use);
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
298 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
299 i = page_chain_free(page);
302 tmp = page_chain_tail(page, &i);
303 spin_lock(&drbd_pp_lock);
304 page_chain_add(&drbd_pp_pool, page, tmp);
306 spin_unlock(&drbd_pp_lock);
308 i = atomic_sub_return(i, a);
310 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
311 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
312 wake_up(&drbd_pp_wait);
316 You need to hold the req_lock:
317 _drbd_wait_ee_list_empty()
319 You must not have the req_lock:
321 drbd_alloc_peer_req()
322 drbd_free_peer_reqs()
324 drbd_finish_peer_reqs()
326 drbd_wait_ee_list_empty()
329 struct drbd_peer_request *
330 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
331 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
333 struct drbd_peer_request *peer_req;
335 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
337 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
340 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
342 if (!(gfp_mask & __GFP_NOWARN))
343 dev_err(DEV, "%s: allocation failed\n", __func__);
347 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
351 drbd_clear_interval(&peer_req->i);
352 peer_req->i.size = data_size;
353 peer_req->i.sector = sector;
354 peer_req->i.local = false;
355 peer_req->i.waiting = false;
357 peer_req->epoch = NULL;
358 peer_req->w.mdev = mdev;
359 peer_req->pages = page;
360 atomic_set(&peer_req->pending_bios, 0);
363 * The block_id is opaque to the receiver. It is not endianness
364 * converted, and sent back to the sender unchanged.
366 peer_req->block_id = id;
371 mempool_free(peer_req, drbd_ee_mempool);
375 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
378 if (peer_req->flags & EE_HAS_DIGEST)
379 kfree(peer_req->digest);
380 drbd_free_pages(mdev, peer_req->pages, is_net);
381 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
382 D_ASSERT(drbd_interval_empty(&peer_req->i));
383 mempool_free(peer_req, drbd_ee_mempool);
386 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
388 LIST_HEAD(work_list);
389 struct drbd_peer_request *peer_req, *t;
391 int is_net = list == &mdev->net_ee;
393 spin_lock_irq(&mdev->tconn->req_lock);
394 list_splice_init(list, &work_list);
395 spin_unlock_irq(&mdev->tconn->req_lock);
397 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
398 __drbd_free_peer_req(mdev, peer_req, is_net);
405 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
407 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
409 LIST_HEAD(work_list);
410 LIST_HEAD(reclaimed);
411 struct drbd_peer_request *peer_req, *t;
414 spin_lock_irq(&mdev->tconn->req_lock);
415 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
416 list_splice_init(&mdev->done_ee, &work_list);
417 spin_unlock_irq(&mdev->tconn->req_lock);
419 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
420 drbd_free_net_peer_req(mdev, peer_req);
422 /* possible callbacks here:
423 * e_end_block, and e_end_resync_block, e_send_discard_write.
424 * all ignore the last argument.
426 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
429 /* list_del not necessary, next/prev members not touched */
430 err2 = peer_req->w.cb(&peer_req->w, !!err);
433 drbd_free_peer_req(mdev, peer_req);
435 wake_up(&mdev->ee_wait);
440 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
441 struct list_head *head)
445 /* avoids spin_lock/unlock
446 * and calling prepare_to_wait in the fast path */
447 while (!list_empty(head)) {
448 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
449 spin_unlock_irq(&mdev->tconn->req_lock);
451 finish_wait(&mdev->ee_wait, &wait);
452 spin_lock_irq(&mdev->tconn->req_lock);
456 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
457 struct list_head *head)
459 spin_lock_irq(&mdev->tconn->req_lock);
460 _drbd_wait_ee_list_empty(mdev, head);
461 spin_unlock_irq(&mdev->tconn->req_lock);
464 /* see also kernel_accept; which is only present since 2.6.18.
465 * also we want to log which part of it failed, exactly */
466 static int drbd_accept(const char **what, struct socket *sock, struct socket **newsock)
468 struct sock *sk = sock->sk;
472 err = sock->ops->listen(sock, 5);
476 *what = "sock_create_lite";
477 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
483 err = sock->ops->accept(sock, *newsock, 0);
485 sock_release(*newsock);
489 (*newsock)->ops = sock->ops;
495 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
502 struct msghdr msg = {
504 .msg_iov = (struct iovec *)&iov,
505 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
511 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
517 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
524 struct msghdr msg = {
526 .msg_iov = (struct iovec *)&iov,
527 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
535 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
540 * ECONNRESET other side closed the connection
541 * ERESTARTSYS (on sock) we got a signal
545 if (rv == -ECONNRESET)
546 conn_info(tconn, "sock was reset by peer\n");
547 else if (rv != -ERESTARTSYS)
548 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
550 } else if (rv == 0) {
551 conn_info(tconn, "sock was shut down by peer\n");
554 /* signal came in, or peer/link went down,
555 * after we read a partial message
557 /* D_ASSERT(signal_pending(current)); */
565 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
570 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
574 err = drbd_recv(tconn, buf, size);
583 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
587 err = drbd_recv_all(tconn, buf, size);
588 if (err && !signal_pending(current))
589 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
594 * On individual connections, the socket buffer size must be set prior to the
595 * listen(2) or connect(2) calls in order to have it take effect.
596 * This is our wrapper to do so.
598 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
601 /* open coded SO_SNDBUF, SO_RCVBUF */
603 sock->sk->sk_sndbuf = snd;
604 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
607 sock->sk->sk_rcvbuf = rcv;
608 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
612 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
616 struct sockaddr_in6 src_in6;
617 struct sockaddr_in6 peer_in6;
619 int err, peer_addr_len, my_addr_len;
620 int sndbuf_size, rcvbuf_size, connect_int;
621 int disconnect_on_error = 1;
624 nc = rcu_dereference(tconn->net_conf);
629 sndbuf_size = nc->sndbuf_size;
630 rcvbuf_size = nc->rcvbuf_size;
631 connect_int = nc->connect_int;
634 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
635 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
637 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
638 src_in6.sin6_port = 0;
640 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
642 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
643 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
645 what = "sock_create_kern";
646 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
647 SOCK_STREAM, IPPROTO_TCP, &sock);
653 sock->sk->sk_rcvtimeo =
654 sock->sk->sk_sndtimeo = connect_int * HZ;
655 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
657 /* explicitly bind to the configured IP as source IP
658 * for the outgoing connections.
659 * This is needed for multihomed hosts and to be
660 * able to use lo: interfaces for drbd.
661 * Make sure to use 0 as port number, so linux selects
662 * a free one dynamically.
664 what = "bind before connect";
665 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
669 /* connect may fail, peer not yet available.
670 * stay C_WF_CONNECTION, don't go Disconnecting! */
671 disconnect_on_error = 0;
673 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
682 /* timeout, busy, signal pending */
683 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
684 case EINTR: case ERESTARTSYS:
685 /* peer not (yet) available, network problem */
686 case ECONNREFUSED: case ENETUNREACH:
687 case EHOSTDOWN: case EHOSTUNREACH:
688 disconnect_on_error = 0;
691 conn_err(tconn, "%s failed, err = %d\n", what, err);
693 if (disconnect_on_error)
694 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
700 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
702 int timeo, err, my_addr_len;
703 int sndbuf_size, rcvbuf_size, connect_int;
704 struct socket *s_estab = NULL, *s_listen;
705 struct sockaddr_in6 my_addr;
710 nc = rcu_dereference(tconn->net_conf);
715 sndbuf_size = nc->sndbuf_size;
716 rcvbuf_size = nc->rcvbuf_size;
717 connect_int = nc->connect_int;
720 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
721 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
723 what = "sock_create_kern";
724 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
725 SOCK_STREAM, IPPROTO_TCP, &s_listen);
731 timeo = connect_int * HZ;
732 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
734 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
735 s_listen->sk->sk_rcvtimeo = timeo;
736 s_listen->sk->sk_sndtimeo = timeo;
737 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
739 what = "bind before listen";
740 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
744 err = drbd_accept(&what, s_listen, &s_estab);
748 sock_release(s_listen);
750 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
751 conn_err(tconn, "%s failed, err = %d\n", what, err);
752 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
759 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
761 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
762 enum drbd_packet cmd)
764 if (!conn_prepare_command(tconn, sock))
766 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
769 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
771 unsigned int header_size = drbd_header_size(tconn);
772 struct packet_info pi;
775 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
776 if (err != header_size) {
781 err = decode_header(tconn, tconn->data.rbuf, &pi);
788 * drbd_socket_okay() - Free the socket if its connection is not okay
789 * @sock: pointer to the pointer to the socket.
791 static int drbd_socket_okay(struct socket **sock)
799 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
801 if (rr > 0 || rr == -EAGAIN) {
809 /* Gets called if a connection is established, or if a new minor gets created
811 int drbd_connected(struct drbd_conf *mdev)
815 atomic_set(&mdev->packet_seq, 0);
818 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
819 &mdev->tconn->cstate_mutex :
820 &mdev->own_state_mutex;
822 err = drbd_send_sync_param(mdev);
824 err = drbd_send_sizes(mdev, 0, 0);
826 err = drbd_send_uuids(mdev);
828 err = drbd_send_current_state(mdev);
829 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
830 clear_bit(RESIZE_PENDING, &mdev->flags);
831 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
837 * 1 yes, we have a valid connection
838 * 0 oops, did not work out, please try again
839 * -1 peer talks different language,
840 * no point in trying again, please go standalone.
841 * -2 We do not have a network config...
843 static int conn_connect(struct drbd_tconn *tconn)
845 struct socket *sock, *msock;
846 struct drbd_conf *mdev;
848 int vnr, timeout, try, h, ok;
849 bool discard_my_data;
851 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
854 clear_bit(DISCARD_CONCURRENT, &tconn->flags);
856 /* Assume that the peer only understands protocol 80 until we know better. */
857 tconn->agreed_pro_version = 80;
863 /* 3 tries, this should take less than a second! */
864 s = drbd_try_connect(tconn);
867 /* give the other side time to call bind() & listen() */
868 schedule_timeout_interruptible(HZ / 10);
872 if (!tconn->data.socket) {
873 tconn->data.socket = s;
874 send_first_packet(tconn, &tconn->data, P_INITIAL_DATA);
875 } else if (!tconn->meta.socket) {
876 tconn->meta.socket = s;
877 send_first_packet(tconn, &tconn->meta, P_INITIAL_META);
879 conn_err(tconn, "Logic error in conn_connect()\n");
880 goto out_release_sockets;
884 if (tconn->data.socket && tconn->meta.socket) {
885 schedule_timeout_interruptible(tconn->net_conf->ping_timeo*HZ/10);
886 ok = drbd_socket_okay(&tconn->data.socket);
887 ok = drbd_socket_okay(&tconn->meta.socket) && ok;
893 s = drbd_wait_for_connect(tconn);
895 try = receive_first_packet(tconn, s);
896 drbd_socket_okay(&tconn->data.socket);
897 drbd_socket_okay(&tconn->meta.socket);
900 if (tconn->data.socket) {
901 conn_warn(tconn, "initial packet S crossed\n");
902 sock_release(tconn->data.socket);
904 tconn->data.socket = s;
907 if (tconn->meta.socket) {
908 conn_warn(tconn, "initial packet M crossed\n");
909 sock_release(tconn->meta.socket);
911 tconn->meta.socket = s;
912 set_bit(DISCARD_CONCURRENT, &tconn->flags);
915 conn_warn(tconn, "Error receiving initial packet\n");
922 if (tconn->cstate <= C_DISCONNECTING)
923 goto out_release_sockets;
924 if (signal_pending(current)) {
925 flush_signals(current);
927 if (get_t_state(&tconn->receiver) == EXITING)
928 goto out_release_sockets;
931 if (tconn->data.socket && &tconn->meta.socket) {
932 ok = drbd_socket_okay(&tconn->data.socket);
933 ok = drbd_socket_okay(&tconn->meta.socket) && ok;
939 sock = tconn->data.socket;
940 msock = tconn->meta.socket;
942 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
943 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
945 sock->sk->sk_allocation = GFP_NOIO;
946 msock->sk->sk_allocation = GFP_NOIO;
948 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
949 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
952 * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
953 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
954 * first set it to the P_CONNECTION_FEATURES timeout,
955 * which we set to 4x the configured ping_timeout. */
957 nc = rcu_dereference(tconn->net_conf);
959 sock->sk->sk_sndtimeo =
960 sock->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
962 msock->sk->sk_rcvtimeo = nc->ping_int*HZ;
963 timeout = nc->timeout * HZ / 10;
964 discard_my_data = nc->discard_my_data;
967 msock->sk->sk_sndtimeo = timeout;
969 /* we don't want delays.
970 * we use TCP_CORK where appropriate, though */
971 drbd_tcp_nodelay(sock);
972 drbd_tcp_nodelay(msock);
974 tconn->last_received = jiffies;
976 h = drbd_do_features(tconn);
980 if (tconn->cram_hmac_tfm) {
981 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
982 switch (drbd_do_auth(tconn)) {
984 conn_err(tconn, "Authentication of peer failed\n");
987 conn_err(tconn, "Authentication of peer failed, trying again.\n");
992 sock->sk->sk_sndtimeo = timeout;
993 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
995 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
999 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1000 kref_get(&mdev->kref);
1003 if (discard_my_data)
1004 set_bit(DISCARD_MY_DATA, &mdev->flags);
1006 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1008 drbd_connected(mdev);
1009 kref_put(&mdev->kref, &drbd_minor_destroy);
1014 if (conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE) < SS_SUCCESS)
1017 drbd_thread_start(&tconn->asender);
1019 mutex_lock(&tconn->conf_update);
1020 /* The discard_my_data flag is a single-shot modifier to the next
1021 * connection attempt, the handshake of which is now well underway.
1022 * No need for rcu style copying of the whole struct
1023 * just to clear a single value. */
1024 tconn->net_conf->discard_my_data = 0;
1025 mutex_unlock(&tconn->conf_update);
1029 out_release_sockets:
1030 if (tconn->data.socket) {
1031 sock_release(tconn->data.socket);
1032 tconn->data.socket = NULL;
1034 if (tconn->meta.socket) {
1035 sock_release(tconn->meta.socket);
1036 tconn->meta.socket = NULL;
1041 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1043 unsigned int header_size = drbd_header_size(tconn);
1045 if (header_size == sizeof(struct p_header100) &&
1046 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1047 struct p_header100 *h = header;
1049 conn_err(tconn, "Header padding is not zero\n");
1052 pi->vnr = be16_to_cpu(h->volume);
1053 pi->cmd = be16_to_cpu(h->command);
1054 pi->size = be32_to_cpu(h->length);
1055 } else if (header_size == sizeof(struct p_header95) &&
1056 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1057 struct p_header95 *h = header;
1058 pi->cmd = be16_to_cpu(h->command);
1059 pi->size = be32_to_cpu(h->length);
1061 } else if (header_size == sizeof(struct p_header80) &&
1062 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1063 struct p_header80 *h = header;
1064 pi->cmd = be16_to_cpu(h->command);
1065 pi->size = be16_to_cpu(h->length);
1068 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1069 be32_to_cpu(*(__be32 *)header),
1070 tconn->agreed_pro_version);
1073 pi->data = header + header_size;
1077 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1079 void *buffer = tconn->data.rbuf;
1082 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1086 err = decode_header(tconn, buffer, pi);
1087 tconn->last_received = jiffies;
1092 static void drbd_flush(struct drbd_conf *mdev)
1096 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
1097 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
1100 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1101 /* would rather check on EOPNOTSUPP, but that is not reliable.
1102 * don't try again for ANY return value != 0
1103 * if (rv == -EOPNOTSUPP) */
1104 drbd_bump_write_ordering(mdev, WO_drain_io);
1111 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1112 * @mdev: DRBD device.
1113 * @epoch: Epoch object.
1116 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1117 struct drbd_epoch *epoch,
1118 enum epoch_event ev)
1121 struct drbd_epoch *next_epoch;
1122 enum finish_epoch rv = FE_STILL_LIVE;
1124 spin_lock(&mdev->epoch_lock);
1128 epoch_size = atomic_read(&epoch->epoch_size);
1130 switch (ev & ~EV_CLEANUP) {
1132 atomic_dec(&epoch->active);
1134 case EV_GOT_BARRIER_NR:
1135 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1137 case EV_BECAME_LAST:
1142 if (epoch_size != 0 &&
1143 atomic_read(&epoch->active) == 0 &&
1144 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1145 if (!(ev & EV_CLEANUP)) {
1146 spin_unlock(&mdev->epoch_lock);
1147 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1148 spin_lock(&mdev->epoch_lock);
1150 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1153 if (mdev->current_epoch != epoch) {
1154 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1155 list_del(&epoch->list);
1156 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1160 if (rv == FE_STILL_LIVE)
1164 atomic_set(&epoch->epoch_size, 0);
1165 /* atomic_set(&epoch->active, 0); is already zero */
1166 if (rv == FE_STILL_LIVE)
1168 wake_up(&mdev->ee_wait);
1178 spin_unlock(&mdev->epoch_lock);
1184 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1185 * @mdev: DRBD device.
1186 * @wo: Write ordering method to try.
1188 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1190 struct disk_conf *dc;
1191 enum write_ordering_e pwo;
1192 static char *write_ordering_str[] = {
1194 [WO_drain_io] = "drain",
1195 [WO_bdev_flush] = "flush",
1198 pwo = mdev->write_ordering;
1201 dc = rcu_dereference(mdev->ldev->disk_conf);
1203 if (wo == WO_bdev_flush && !dc->disk_flushes)
1205 if (wo == WO_drain_io && !dc->disk_drain)
1208 mdev->write_ordering = wo;
1209 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1210 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1214 * drbd_submit_peer_request()
1215 * @mdev: DRBD device.
1216 * @peer_req: peer request
1217 * @rw: flag field, see bio->bi_rw
1219 * May spread the pages to multiple bios,
1220 * depending on bio_add_page restrictions.
1222 * Returns 0 if all bios have been submitted,
1223 * -ENOMEM if we could not allocate enough bios,
1224 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1225 * single page to an empty bio (which should never happen and likely indicates
1226 * that the lower level IO stack is in some way broken). This has been observed
1227 * on certain Xen deployments.
1229 /* TODO allocate from our own bio_set. */
1230 int drbd_submit_peer_request(struct drbd_conf *mdev,
1231 struct drbd_peer_request *peer_req,
1232 const unsigned rw, const int fault_type)
1234 struct bio *bios = NULL;
1236 struct page *page = peer_req->pages;
1237 sector_t sector = peer_req->i.sector;
1238 unsigned ds = peer_req->i.size;
1239 unsigned n_bios = 0;
1240 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1243 /* In most cases, we will only need one bio. But in case the lower
1244 * level restrictions happen to be different at this offset on this
1245 * side than those of the sending peer, we may need to submit the
1246 * request in more than one bio.
1248 * Plain bio_alloc is good enough here, this is no DRBD internally
1249 * generated bio, but a bio allocated on behalf of the peer.
1252 bio = bio_alloc(GFP_NOIO, nr_pages);
1254 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1257 /* > peer_req->i.sector, unless this is the first bio */
1258 bio->bi_sector = sector;
1259 bio->bi_bdev = mdev->ldev->backing_bdev;
1261 bio->bi_private = peer_req;
1262 bio->bi_end_io = drbd_peer_request_endio;
1264 bio->bi_next = bios;
1268 page_chain_for_each(page) {
1269 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1270 if (!bio_add_page(bio, page, len, 0)) {
1271 /* A single page must always be possible!
1272 * But in case it fails anyways,
1273 * we deal with it, and complain (below). */
1274 if (bio->bi_vcnt == 0) {
1276 "bio_add_page failed for len=%u, "
1277 "bi_vcnt=0 (bi_sector=%llu)\n",
1278 len, (unsigned long long)bio->bi_sector);
1288 D_ASSERT(page == NULL);
1291 atomic_set(&peer_req->pending_bios, n_bios);
1294 bios = bios->bi_next;
1295 bio->bi_next = NULL;
1297 drbd_generic_make_request(mdev, fault_type, bio);
1304 bios = bios->bi_next;
1310 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1311 struct drbd_peer_request *peer_req)
1313 struct drbd_interval *i = &peer_req->i;
1315 drbd_remove_interval(&mdev->write_requests, i);
1316 drbd_clear_interval(i);
1318 /* Wake up any processes waiting for this peer request to complete. */
1320 wake_up(&mdev->misc_wait);
1323 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1325 struct drbd_conf *mdev;
1327 struct p_barrier *p = pi->data;
1328 struct drbd_epoch *epoch;
1330 mdev = vnr_to_mdev(tconn, pi->vnr);
1336 mdev->current_epoch->barrier_nr = p->barrier;
1337 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1339 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1340 * the activity log, which means it would not be resynced in case the
1341 * R_PRIMARY crashes now.
1342 * Therefore we must send the barrier_ack after the barrier request was
1344 switch (mdev->write_ordering) {
1346 if (rv == FE_RECYCLED)
1349 /* receiver context, in the writeout path of the other node.
1350 * avoid potential distributed deadlock */
1351 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1355 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1360 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1363 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1364 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1369 epoch = mdev->current_epoch;
1370 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1372 D_ASSERT(atomic_read(&epoch->active) == 0);
1373 D_ASSERT(epoch->flags == 0);
1377 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1382 atomic_set(&epoch->epoch_size, 0);
1383 atomic_set(&epoch->active, 0);
1385 spin_lock(&mdev->epoch_lock);
1386 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1387 list_add(&epoch->list, &mdev->current_epoch->list);
1388 mdev->current_epoch = epoch;
1391 /* The current_epoch got recycled while we allocated this one... */
1394 spin_unlock(&mdev->epoch_lock);
1399 /* used from receive_RSDataReply (recv_resync_read)
1400 * and from receive_Data */
1401 static struct drbd_peer_request *
1402 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1403 int data_size) __must_hold(local)
1405 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1406 struct drbd_peer_request *peer_req;
1409 void *dig_in = mdev->tconn->int_dig_in;
1410 void *dig_vv = mdev->tconn->int_dig_vv;
1411 unsigned long *data;
1414 if (mdev->tconn->peer_integrity_tfm) {
1415 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1417 * FIXME: Receive the incoming digest into the receive buffer
1418 * here, together with its struct p_data?
1420 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1426 if (!expect(data_size != 0))
1428 if (!expect(IS_ALIGNED(data_size, 512)))
1430 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1433 /* even though we trust out peer,
1434 * we sometimes have to double check. */
1435 if (sector + (data_size>>9) > capacity) {
1436 dev_err(DEV, "request from peer beyond end of local disk: "
1437 "capacity: %llus < sector: %llus + size: %u\n",
1438 (unsigned long long)capacity,
1439 (unsigned long long)sector, data_size);
1443 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1444 * "criss-cross" setup, that might cause write-out on some other DRBD,
1445 * which in turn might block on the other node at this very place. */
1446 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1451 page = peer_req->pages;
1452 page_chain_for_each(page) {
1453 unsigned len = min_t(int, ds, PAGE_SIZE);
1455 err = drbd_recv_all_warn(mdev->tconn, data, len);
1456 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1457 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1458 data[0] = data[0] ^ (unsigned long)-1;
1462 drbd_free_peer_req(mdev, peer_req);
1469 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1470 if (memcmp(dig_in, dig_vv, dgs)) {
1471 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1472 (unsigned long long)sector, data_size);
1473 drbd_free_peer_req(mdev, peer_req);
1477 mdev->recv_cnt += data_size>>9;
1481 /* drbd_drain_block() just takes a data block
1482 * out of the socket input buffer, and discards it.
1484 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1493 page = drbd_alloc_pages(mdev, 1, 1);
1497 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1499 err = drbd_recv_all_warn(mdev->tconn, data, len);
1505 drbd_free_pages(mdev, page, 0);
1509 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1510 sector_t sector, int data_size)
1512 struct bio_vec *bvec;
1514 int dgs, err, i, expect;
1515 void *dig_in = mdev->tconn->int_dig_in;
1516 void *dig_vv = mdev->tconn->int_dig_vv;
1519 if (mdev->tconn->peer_integrity_tfm) {
1520 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1521 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1527 /* optimistically update recv_cnt. if receiving fails below,
1528 * we disconnect anyways, and counters will be reset. */
1529 mdev->recv_cnt += data_size>>9;
1531 bio = req->master_bio;
1532 D_ASSERT(sector == bio->bi_sector);
1534 bio_for_each_segment(bvec, bio, i) {
1535 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1536 expect = min_t(int, data_size, bvec->bv_len);
1537 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1538 kunmap(bvec->bv_page);
1541 data_size -= expect;
1545 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1546 if (memcmp(dig_in, dig_vv, dgs)) {
1547 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1552 D_ASSERT(data_size == 0);
1557 * e_end_resync_block() is called in asender context via
1558 * drbd_finish_peer_reqs().
1560 static int e_end_resync_block(struct drbd_work *w, int unused)
1562 struct drbd_peer_request *peer_req =
1563 container_of(w, struct drbd_peer_request, w);
1564 struct drbd_conf *mdev = w->mdev;
1565 sector_t sector = peer_req->i.sector;
1568 D_ASSERT(drbd_interval_empty(&peer_req->i));
1570 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1571 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1572 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1574 /* Record failure to sync */
1575 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1577 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1584 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1586 struct drbd_peer_request *peer_req;
1588 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1592 dec_rs_pending(mdev);
1595 /* corresponding dec_unacked() in e_end_resync_block()
1596 * respective _drbd_clear_done_ee */
1598 peer_req->w.cb = e_end_resync_block;
1600 spin_lock_irq(&mdev->tconn->req_lock);
1601 list_add(&peer_req->w.list, &mdev->sync_ee);
1602 spin_unlock_irq(&mdev->tconn->req_lock);
1604 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1605 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1608 /* don't care for the reason here */
1609 dev_err(DEV, "submit failed, triggering re-connect\n");
1610 spin_lock_irq(&mdev->tconn->req_lock);
1611 list_del(&peer_req->w.list);
1612 spin_unlock_irq(&mdev->tconn->req_lock);
1614 drbd_free_peer_req(mdev, peer_req);
1620 static struct drbd_request *
1621 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1622 sector_t sector, bool missing_ok, const char *func)
1624 struct drbd_request *req;
1626 /* Request object according to our peer */
1627 req = (struct drbd_request *)(unsigned long)id;
1628 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1631 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1632 (unsigned long)id, (unsigned long long)sector);
1637 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1639 struct drbd_conf *mdev;
1640 struct drbd_request *req;
1643 struct p_data *p = pi->data;
1645 mdev = vnr_to_mdev(tconn, pi->vnr);
1649 sector = be64_to_cpu(p->sector);
1651 spin_lock_irq(&mdev->tconn->req_lock);
1652 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1653 spin_unlock_irq(&mdev->tconn->req_lock);
1657 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1658 * special casing it there for the various failure cases.
1659 * still no race with drbd_fail_pending_reads */
1660 err = recv_dless_read(mdev, req, sector, pi->size);
1662 req_mod(req, DATA_RECEIVED);
1663 /* else: nothing. handled from drbd_disconnect...
1664 * I don't think we may complete this just yet
1665 * in case we are "on-disconnect: freeze" */
1670 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1672 struct drbd_conf *mdev;
1675 struct p_data *p = pi->data;
1677 mdev = vnr_to_mdev(tconn, pi->vnr);
1681 sector = be64_to_cpu(p->sector);
1682 D_ASSERT(p->block_id == ID_SYNCER);
1684 if (get_ldev(mdev)) {
1685 /* data is submitted to disk within recv_resync_read.
1686 * corresponding put_ldev done below on error,
1687 * or in drbd_peer_request_endio. */
1688 err = recv_resync_read(mdev, sector, pi->size);
1690 if (__ratelimit(&drbd_ratelimit_state))
1691 dev_err(DEV, "Can not write resync data to local disk.\n");
1693 err = drbd_drain_block(mdev, pi->size);
1695 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1698 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1703 static int w_restart_write(struct drbd_work *w, int cancel)
1705 struct drbd_request *req = container_of(w, struct drbd_request, w);
1706 struct drbd_conf *mdev = w->mdev;
1708 unsigned long start_time;
1709 unsigned long flags;
1711 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1712 if (!expect(req->rq_state & RQ_POSTPONED)) {
1713 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1716 bio = req->master_bio;
1717 start_time = req->start_time;
1718 /* Postponed requests will not have their master_bio completed! */
1719 __req_mod(req, DISCARD_WRITE, NULL);
1720 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1722 while (__drbd_make_request(mdev, bio, start_time))
1727 static void restart_conflicting_writes(struct drbd_conf *mdev,
1728 sector_t sector, int size)
1730 struct drbd_interval *i;
1731 struct drbd_request *req;
1733 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1736 req = container_of(i, struct drbd_request, i);
1737 if (req->rq_state & RQ_LOCAL_PENDING ||
1738 !(req->rq_state & RQ_POSTPONED))
1740 if (expect(list_empty(&req->w.list))) {
1742 req->w.cb = w_restart_write;
1743 drbd_queue_work(&mdev->tconn->data.work, &req->w);
1749 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1751 static int e_end_block(struct drbd_work *w, int cancel)
1753 struct drbd_peer_request *peer_req =
1754 container_of(w, struct drbd_peer_request, w);
1755 struct drbd_conf *mdev = w->mdev;
1756 sector_t sector = peer_req->i.sector;
1759 if (peer_req->flags & EE_SEND_WRITE_ACK) {
1760 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1761 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1762 mdev->state.conn <= C_PAUSED_SYNC_T &&
1763 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1764 P_RS_WRITE_ACK : P_WRITE_ACK;
1765 err = drbd_send_ack(mdev, pcmd, peer_req);
1766 if (pcmd == P_RS_WRITE_ACK)
1767 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1769 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1770 /* we expect it to be marked out of sync anyways...
1771 * maybe assert this? */
1775 /* we delete from the conflict detection hash _after_ we sent out the
1776 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1777 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1778 spin_lock_irq(&mdev->tconn->req_lock);
1779 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1780 drbd_remove_epoch_entry_interval(mdev, peer_req);
1781 if (peer_req->flags & EE_RESTART_REQUESTS)
1782 restart_conflicting_writes(mdev, sector, peer_req->i.size);
1783 spin_unlock_irq(&mdev->tconn->req_lock);
1785 D_ASSERT(drbd_interval_empty(&peer_req->i));
1787 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1792 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1794 struct drbd_conf *mdev = w->mdev;
1795 struct drbd_peer_request *peer_req =
1796 container_of(w, struct drbd_peer_request, w);
1799 err = drbd_send_ack(mdev, ack, peer_req);
1805 static int e_send_discard_write(struct drbd_work *w, int unused)
1807 return e_send_ack(w, P_DISCARD_WRITE);
1810 static int e_send_retry_write(struct drbd_work *w, int unused)
1812 struct drbd_tconn *tconn = w->mdev->tconn;
1814 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1815 P_RETRY_WRITE : P_DISCARD_WRITE);
1818 static bool seq_greater(u32 a, u32 b)
1821 * We assume 32-bit wrap-around here.
1822 * For 24-bit wrap-around, we would have to shift:
1825 return (s32)a - (s32)b > 0;
1828 static u32 seq_max(u32 a, u32 b)
1830 return seq_greater(a, b) ? a : b;
1833 static bool need_peer_seq(struct drbd_conf *mdev)
1835 struct drbd_tconn *tconn = mdev->tconn;
1839 * We only need to keep track of the last packet_seq number of our peer
1840 * if we are in dual-primary mode and we have the discard flag set; see
1841 * handle_write_conflicts().
1845 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1848 return tp && test_bit(DISCARD_CONCURRENT, &tconn->flags);
1851 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1853 unsigned int newest_peer_seq;
1855 if (need_peer_seq(mdev)) {
1856 spin_lock(&mdev->peer_seq_lock);
1857 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1858 mdev->peer_seq = newest_peer_seq;
1859 spin_unlock(&mdev->peer_seq_lock);
1860 /* wake up only if we actually changed mdev->peer_seq */
1861 if (peer_seq == newest_peer_seq)
1862 wake_up(&mdev->seq_wait);
1866 /* Called from receive_Data.
1867 * Synchronize packets on sock with packets on msock.
1869 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1870 * packet traveling on msock, they are still processed in the order they have
1873 * Note: we don't care for Ack packets overtaking P_DATA packets.
1875 * In case packet_seq is larger than mdev->peer_seq number, there are
1876 * outstanding packets on the msock. We wait for them to arrive.
1877 * In case we are the logically next packet, we update mdev->peer_seq
1878 * ourselves. Correctly handles 32bit wrap around.
1880 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1881 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1882 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1883 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1885 * returns 0 if we may process the packet,
1886 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1887 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1893 if (!need_peer_seq(mdev))
1896 spin_lock(&mdev->peer_seq_lock);
1898 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1899 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1903 if (signal_pending(current)) {
1907 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1908 spin_unlock(&mdev->peer_seq_lock);
1910 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1912 timeout = schedule_timeout(timeout);
1913 spin_lock(&mdev->peer_seq_lock);
1916 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
1920 spin_unlock(&mdev->peer_seq_lock);
1921 finish_wait(&mdev->seq_wait, &wait);
1925 /* see also bio_flags_to_wire()
1926 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1927 * flags and back. We may replicate to other kernel versions. */
1928 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1930 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1931 (dpf & DP_FUA ? REQ_FUA : 0) |
1932 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1933 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1936 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
1939 struct drbd_interval *i;
1942 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1943 struct drbd_request *req;
1944 struct bio_and_error m;
1948 req = container_of(i, struct drbd_request, i);
1949 if (!(req->rq_state & RQ_POSTPONED))
1951 req->rq_state &= ~RQ_POSTPONED;
1952 __req_mod(req, NEG_ACKED, &m);
1953 spin_unlock_irq(&mdev->tconn->req_lock);
1955 complete_master_bio(mdev, &m);
1956 spin_lock_irq(&mdev->tconn->req_lock);
1961 static int handle_write_conflicts(struct drbd_conf *mdev,
1962 struct drbd_peer_request *peer_req)
1964 struct drbd_tconn *tconn = mdev->tconn;
1965 bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
1966 sector_t sector = peer_req->i.sector;
1967 const unsigned int size = peer_req->i.size;
1968 struct drbd_interval *i;
1973 * Inserting the peer request into the write_requests tree will prevent
1974 * new conflicting local requests from being added.
1976 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
1979 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1980 if (i == &peer_req->i)
1985 * Our peer has sent a conflicting remote request; this
1986 * should not happen in a two-node setup. Wait for the
1987 * earlier peer request to complete.
1989 err = drbd_wait_misc(mdev, i);
1995 equal = i->sector == sector && i->size == size;
1996 if (resolve_conflicts) {
1998 * If the peer request is fully contained within the
1999 * overlapping request, it can be discarded; otherwise,
2000 * it will be retried once all overlapping requests
2003 bool discard = i->sector <= sector && i->sector +
2004 (i->size >> 9) >= sector + (size >> 9);
2007 dev_alert(DEV, "Concurrent writes detected: "
2008 "local=%llus +%u, remote=%llus +%u, "
2009 "assuming %s came first\n",
2010 (unsigned long long)i->sector, i->size,
2011 (unsigned long long)sector, size,
2012 discard ? "local" : "remote");
2015 peer_req->w.cb = discard ? e_send_discard_write :
2017 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2018 wake_asender(mdev->tconn);
2023 struct drbd_request *req =
2024 container_of(i, struct drbd_request, i);
2027 dev_alert(DEV, "Concurrent writes detected: "
2028 "local=%llus +%u, remote=%llus +%u\n",
2029 (unsigned long long)i->sector, i->size,
2030 (unsigned long long)sector, size);
2032 if (req->rq_state & RQ_LOCAL_PENDING ||
2033 !(req->rq_state & RQ_POSTPONED)) {
2035 * Wait for the node with the discard flag to
2036 * decide if this request will be discarded or
2037 * retried. Requests that are discarded will
2038 * disappear from the write_requests tree.
2040 * In addition, wait for the conflicting
2041 * request to finish locally before submitting
2042 * the conflicting peer request.
2044 err = drbd_wait_misc(mdev, &req->i);
2046 _conn_request_state(mdev->tconn,
2047 NS(conn, C_TIMEOUT),
2049 fail_postponed_requests(mdev, sector, size);
2055 * Remember to restart the conflicting requests after
2056 * the new peer request has completed.
2058 peer_req->flags |= EE_RESTART_REQUESTS;
2065 drbd_remove_epoch_entry_interval(mdev, peer_req);
2069 /* mirrored write */
2070 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2072 struct drbd_conf *mdev;
2074 struct drbd_peer_request *peer_req;
2075 struct p_data *p = pi->data;
2076 u32 peer_seq = be32_to_cpu(p->seq_num);
2081 mdev = vnr_to_mdev(tconn, pi->vnr);
2085 if (!get_ldev(mdev)) {
2088 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2089 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2090 atomic_inc(&mdev->current_epoch->epoch_size);
2091 err2 = drbd_drain_block(mdev, pi->size);
2098 * Corresponding put_ldev done either below (on various errors), or in
2099 * drbd_peer_request_endio, if we successfully submit the data at the
2100 * end of this function.
2103 sector = be64_to_cpu(p->sector);
2104 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2110 peer_req->w.cb = e_end_block;
2112 dp_flags = be32_to_cpu(p->dp_flags);
2113 rw |= wire_flags_to_bio(mdev, dp_flags);
2115 if (dp_flags & DP_MAY_SET_IN_SYNC)
2116 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2118 spin_lock(&mdev->epoch_lock);
2119 peer_req->epoch = mdev->current_epoch;
2120 atomic_inc(&peer_req->epoch->epoch_size);
2121 atomic_inc(&peer_req->epoch->active);
2122 spin_unlock(&mdev->epoch_lock);
2125 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2128 peer_req->flags |= EE_IN_INTERVAL_TREE;
2129 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2131 goto out_interrupted;
2132 spin_lock_irq(&mdev->tconn->req_lock);
2133 err = handle_write_conflicts(mdev, peer_req);
2135 spin_unlock_irq(&mdev->tconn->req_lock);
2136 if (err == -ENOENT) {
2140 goto out_interrupted;
2143 spin_lock_irq(&mdev->tconn->req_lock);
2144 list_add(&peer_req->w.list, &mdev->active_ee);
2145 spin_unlock_irq(&mdev->tconn->req_lock);
2147 if (mdev->tconn->agreed_pro_version < 100) {
2149 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2151 dp_flags |= DP_SEND_WRITE_ACK;
2154 dp_flags |= DP_SEND_RECEIVE_ACK;
2160 if (dp_flags & DP_SEND_WRITE_ACK) {
2161 peer_req->flags |= EE_SEND_WRITE_ACK;
2163 /* corresponding dec_unacked() in e_end_block()
2164 * respective _drbd_clear_done_ee */
2167 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2168 /* I really don't like it that the receiver thread
2169 * sends on the msock, but anyways */
2170 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2173 if (mdev->state.pdsk < D_INCONSISTENT) {
2174 /* In case we have the only disk of the cluster, */
2175 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2176 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2177 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2178 drbd_al_begin_io(mdev, &peer_req->i);
2181 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2185 /* don't care for the reason here */
2186 dev_err(DEV, "submit failed, triggering re-connect\n");
2187 spin_lock_irq(&mdev->tconn->req_lock);
2188 list_del(&peer_req->w.list);
2189 drbd_remove_epoch_entry_interval(mdev, peer_req);
2190 spin_unlock_irq(&mdev->tconn->req_lock);
2191 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2192 drbd_al_complete_io(mdev, &peer_req->i);
2195 drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
2197 drbd_free_peer_req(mdev, peer_req);
2201 /* We may throttle resync, if the lower device seems to be busy,
2202 * and current sync rate is above c_min_rate.
2204 * To decide whether or not the lower device is busy, we use a scheme similar
2205 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2206 * (more than 64 sectors) of activity we cannot account for with our own resync
2207 * activity, it obviously is "busy".
2209 * The current sync rate used here uses only the most recent two step marks,
2210 * to have a short time average so we can react faster.
2212 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2214 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2215 unsigned long db, dt, dbdt;
2216 struct lc_element *tmp;
2219 unsigned int c_min_rate;
2222 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2225 /* feature disabled? */
2226 if (c_min_rate == 0)
2229 spin_lock_irq(&mdev->al_lock);
2230 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2232 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2233 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2234 spin_unlock_irq(&mdev->al_lock);
2237 /* Do not slow down if app IO is already waiting for this extent */
2239 spin_unlock_irq(&mdev->al_lock);
2241 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2242 (int)part_stat_read(&disk->part0, sectors[1]) -
2243 atomic_read(&mdev->rs_sect_ev);
2245 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2246 unsigned long rs_left;
2249 mdev->rs_last_events = curr_events;
2251 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2253 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2255 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2256 rs_left = mdev->ov_left;
2258 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2260 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2263 db = mdev->rs_mark_left[i] - rs_left;
2264 dbdt = Bit2KB(db/dt);
2266 if (dbdt > c_min_rate)
2273 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2275 struct drbd_conf *mdev;
2278 struct drbd_peer_request *peer_req;
2279 struct digest_info *di = NULL;
2281 unsigned int fault_type;
2282 struct p_block_req *p = pi->data;
2284 mdev = vnr_to_mdev(tconn, pi->vnr);
2287 capacity = drbd_get_capacity(mdev->this_bdev);
2289 sector = be64_to_cpu(p->sector);
2290 size = be32_to_cpu(p->blksize);
2292 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2293 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2294 (unsigned long long)sector, size);
2297 if (sector + (size>>9) > capacity) {
2298 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2299 (unsigned long long)sector, size);
2303 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2306 case P_DATA_REQUEST:
2307 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2309 case P_RS_DATA_REQUEST:
2310 case P_CSUM_RS_REQUEST:
2312 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2316 dec_rs_pending(mdev);
2317 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2322 if (verb && __ratelimit(&drbd_ratelimit_state))
2323 dev_err(DEV, "Can not satisfy peer's read request, "
2324 "no local data.\n");
2326 /* drain possibly payload */
2327 return drbd_drain_block(mdev, pi->size);
2330 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2331 * "criss-cross" setup, that might cause write-out on some other DRBD,
2332 * which in turn might block on the other node at this very place. */
2333 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2340 case P_DATA_REQUEST:
2341 peer_req->w.cb = w_e_end_data_req;
2342 fault_type = DRBD_FAULT_DT_RD;
2343 /* application IO, don't drbd_rs_begin_io */
2346 case P_RS_DATA_REQUEST:
2347 peer_req->w.cb = w_e_end_rsdata_req;
2348 fault_type = DRBD_FAULT_RS_RD;
2349 /* used in the sector offset progress display */
2350 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2354 case P_CSUM_RS_REQUEST:
2355 fault_type = DRBD_FAULT_RS_RD;
2356 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2360 di->digest_size = pi->size;
2361 di->digest = (((char *)di)+sizeof(struct digest_info));
2363 peer_req->digest = di;
2364 peer_req->flags |= EE_HAS_DIGEST;
2366 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2369 if (pi->cmd == P_CSUM_RS_REQUEST) {
2370 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2371 peer_req->w.cb = w_e_end_csum_rs_req;
2372 /* used in the sector offset progress display */
2373 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2374 } else if (pi->cmd == P_OV_REPLY) {
2375 /* track progress, we may need to throttle */
2376 atomic_add(size >> 9, &mdev->rs_sect_in);
2377 peer_req->w.cb = w_e_end_ov_reply;
2378 dec_rs_pending(mdev);
2379 /* drbd_rs_begin_io done when we sent this request,
2380 * but accounting still needs to be done. */
2381 goto submit_for_resync;
2386 if (mdev->ov_start_sector == ~(sector_t)0 &&
2387 mdev->tconn->agreed_pro_version >= 90) {
2388 unsigned long now = jiffies;
2390 mdev->ov_start_sector = sector;
2391 mdev->ov_position = sector;
2392 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2393 mdev->rs_total = mdev->ov_left;
2394 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2395 mdev->rs_mark_left[i] = mdev->ov_left;
2396 mdev->rs_mark_time[i] = now;
2398 dev_info(DEV, "Online Verify start sector: %llu\n",
2399 (unsigned long long)sector);
2401 peer_req->w.cb = w_e_end_ov_req;
2402 fault_type = DRBD_FAULT_RS_RD;
2409 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2410 * wrt the receiver, but it is not as straightforward as it may seem.
2411 * Various places in the resync start and stop logic assume resync
2412 * requests are processed in order, requeuing this on the worker thread
2413 * introduces a bunch of new code for synchronization between threads.
2415 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2416 * "forever", throttling after drbd_rs_begin_io will lock that extent
2417 * for application writes for the same time. For now, just throttle
2418 * here, where the rest of the code expects the receiver to sleep for
2422 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2423 * this defers syncer requests for some time, before letting at least
2424 * on request through. The resync controller on the receiving side
2425 * will adapt to the incoming rate accordingly.
2427 * We cannot throttle here if remote is Primary/SyncTarget:
2428 * we would also throttle its application reads.
2429 * In that case, throttling is done on the SyncTarget only.
2431 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2432 schedule_timeout_uninterruptible(HZ/10);
2433 if (drbd_rs_begin_io(mdev, sector))
2437 atomic_add(size >> 9, &mdev->rs_sect_ev);
2441 spin_lock_irq(&mdev->tconn->req_lock);
2442 list_add_tail(&peer_req->w.list, &mdev->read_ee);
2443 spin_unlock_irq(&mdev->tconn->req_lock);
2445 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2448 /* don't care for the reason here */
2449 dev_err(DEV, "submit failed, triggering re-connect\n");
2450 spin_lock_irq(&mdev->tconn->req_lock);
2451 list_del(&peer_req->w.list);
2452 spin_unlock_irq(&mdev->tconn->req_lock);
2453 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2457 drbd_free_peer_req(mdev, peer_req);
2461 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2463 int self, peer, rv = -100;
2464 unsigned long ch_self, ch_peer;
2465 enum drbd_after_sb_p after_sb_0p;
2467 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2468 peer = mdev->p_uuid[UI_BITMAP] & 1;
2470 ch_peer = mdev->p_uuid[UI_SIZE];
2471 ch_self = mdev->comm_bm_set;
2474 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2476 switch (after_sb_0p) {
2478 case ASB_DISCARD_SECONDARY:
2479 case ASB_CALL_HELPER:
2481 dev_err(DEV, "Configuration error.\n");
2483 case ASB_DISCONNECT:
2485 case ASB_DISCARD_YOUNGER_PRI:
2486 if (self == 0 && peer == 1) {
2490 if (self == 1 && peer == 0) {
2494 /* Else fall through to one of the other strategies... */
2495 case ASB_DISCARD_OLDER_PRI:
2496 if (self == 0 && peer == 1) {
2500 if (self == 1 && peer == 0) {
2504 /* Else fall through to one of the other strategies... */
2505 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2506 "Using discard-least-changes instead\n");
2507 case ASB_DISCARD_ZERO_CHG:
2508 if (ch_peer == 0 && ch_self == 0) {
2509 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2513 if (ch_peer == 0) { rv = 1; break; }
2514 if (ch_self == 0) { rv = -1; break; }
2516 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2518 case ASB_DISCARD_LEAST_CHG:
2519 if (ch_self < ch_peer)
2521 else if (ch_self > ch_peer)
2523 else /* ( ch_self == ch_peer ) */
2524 /* Well, then use something else. */
2525 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2528 case ASB_DISCARD_LOCAL:
2531 case ASB_DISCARD_REMOTE:
2538 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2541 enum drbd_after_sb_p after_sb_1p;
2544 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2546 switch (after_sb_1p) {
2547 case ASB_DISCARD_YOUNGER_PRI:
2548 case ASB_DISCARD_OLDER_PRI:
2549 case ASB_DISCARD_LEAST_CHG:
2550 case ASB_DISCARD_LOCAL:
2551 case ASB_DISCARD_REMOTE:
2552 case ASB_DISCARD_ZERO_CHG:
2553 dev_err(DEV, "Configuration error.\n");
2555 case ASB_DISCONNECT:
2558 hg = drbd_asb_recover_0p(mdev);
2559 if (hg == -1 && mdev->state.role == R_SECONDARY)
2561 if (hg == 1 && mdev->state.role == R_PRIMARY)
2565 rv = drbd_asb_recover_0p(mdev);
2567 case ASB_DISCARD_SECONDARY:
2568 return mdev->state.role == R_PRIMARY ? 1 : -1;
2569 case ASB_CALL_HELPER:
2570 hg = drbd_asb_recover_0p(mdev);
2571 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2572 enum drbd_state_rv rv2;
2574 drbd_set_role(mdev, R_SECONDARY, 0);
2575 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2576 * we might be here in C_WF_REPORT_PARAMS which is transient.
2577 * we do not need to wait for the after state change work either. */
2578 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2579 if (rv2 != SS_SUCCESS) {
2580 drbd_khelper(mdev, "pri-lost-after-sb");
2582 dev_warn(DEV, "Successfully gave up primary role.\n");
2592 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2595 enum drbd_after_sb_p after_sb_2p;
2598 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2600 switch (after_sb_2p) {
2601 case ASB_DISCARD_YOUNGER_PRI:
2602 case ASB_DISCARD_OLDER_PRI:
2603 case ASB_DISCARD_LEAST_CHG:
2604 case ASB_DISCARD_LOCAL:
2605 case ASB_DISCARD_REMOTE:
2607 case ASB_DISCARD_SECONDARY:
2608 case ASB_DISCARD_ZERO_CHG:
2609 dev_err(DEV, "Configuration error.\n");
2612 rv = drbd_asb_recover_0p(mdev);
2614 case ASB_DISCONNECT:
2616 case ASB_CALL_HELPER:
2617 hg = drbd_asb_recover_0p(mdev);
2619 enum drbd_state_rv rv2;
2621 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2622 * we might be here in C_WF_REPORT_PARAMS which is transient.
2623 * we do not need to wait for the after state change work either. */
2624 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2625 if (rv2 != SS_SUCCESS) {
2626 drbd_khelper(mdev, "pri-lost-after-sb");
2628 dev_warn(DEV, "Successfully gave up primary role.\n");
2638 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2639 u64 bits, u64 flags)
2642 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2645 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2647 (unsigned long long)uuid[UI_CURRENT],
2648 (unsigned long long)uuid[UI_BITMAP],
2649 (unsigned long long)uuid[UI_HISTORY_START],
2650 (unsigned long long)uuid[UI_HISTORY_END],
2651 (unsigned long long)bits,
2652 (unsigned long long)flags);
2656 100 after split brain try auto recover
2657 2 C_SYNC_SOURCE set BitMap
2658 1 C_SYNC_SOURCE use BitMap
2660 -1 C_SYNC_TARGET use BitMap
2661 -2 C_SYNC_TARGET set BitMap
2662 -100 after split brain, disconnect
2663 -1000 unrelated data
2664 -1091 requires proto 91
2665 -1096 requires proto 96
2667 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2672 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2673 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2676 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2680 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2681 peer != UUID_JUST_CREATED)
2685 if (self != UUID_JUST_CREATED &&
2686 (peer == UUID_JUST_CREATED || peer == (u64)0))
2690 int rct, dc; /* roles at crash time */
2692 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2694 if (mdev->tconn->agreed_pro_version < 91)
2697 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2698 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2699 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2700 drbd_uuid_set_bm(mdev, 0UL);
2702 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2703 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2706 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2713 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2715 if (mdev->tconn->agreed_pro_version < 91)
2718 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2719 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2720 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2722 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2723 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2724 mdev->p_uuid[UI_BITMAP] = 0UL;
2726 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2729 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2736 /* Common power [off|failure] */
2737 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2738 (mdev->p_uuid[UI_FLAGS] & 2);
2739 /* lowest bit is set when we were primary,
2740 * next bit (weight 2) is set when peer was primary */
2744 case 0: /* !self_pri && !peer_pri */ return 0;
2745 case 1: /* self_pri && !peer_pri */ return 1;
2746 case 2: /* !self_pri && peer_pri */ return -1;
2747 case 3: /* self_pri && peer_pri */
2748 dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2754 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2759 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2761 if (mdev->tconn->agreed_pro_version < 96 ?
2762 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2763 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2764 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2765 /* The last P_SYNC_UUID did not get though. Undo the last start of
2766 resync as sync source modifications of the peer's UUIDs. */
2768 if (mdev->tconn->agreed_pro_version < 91)
2771 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2772 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2774 dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
2775 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2782 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2783 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2784 peer = mdev->p_uuid[i] & ~((u64)1);
2790 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2791 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2796 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2798 if (mdev->tconn->agreed_pro_version < 96 ?
2799 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2800 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2801 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2802 /* The last P_SYNC_UUID did not get though. Undo the last start of
2803 resync as sync source modifications of our UUIDs. */
2805 if (mdev->tconn->agreed_pro_version < 91)
2808 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2809 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2811 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2812 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2813 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2821 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2822 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2823 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2829 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2830 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2831 if (self == peer && self != ((u64)0))
2835 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2836 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2837 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2838 peer = mdev->p_uuid[j] & ~((u64)1);
2847 /* drbd_sync_handshake() returns the new conn state on success, or
2848 CONN_MASK (-1) on failure.
2850 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2851 enum drbd_disk_state peer_disk) __must_hold(local)
2853 enum drbd_conns rv = C_MASK;
2854 enum drbd_disk_state mydisk;
2855 struct net_conf *nc;
2856 int hg, rule_nr, rr_conflict, tentative;
2858 mydisk = mdev->state.disk;
2859 if (mydisk == D_NEGOTIATING)
2860 mydisk = mdev->new_state_tmp.disk;
2862 dev_info(DEV, "drbd_sync_handshake:\n");
2863 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2864 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2865 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2867 hg = drbd_uuid_compare(mdev, &rule_nr);
2869 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2872 dev_alert(DEV, "Unrelated data, aborting!\n");
2876 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2880 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2881 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2882 int f = (hg == -100) || abs(hg) == 2;
2883 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2886 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2887 hg > 0 ? "source" : "target");
2891 drbd_khelper(mdev, "initial-split-brain");
2894 nc = rcu_dereference(mdev->tconn->net_conf);
2896 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2897 int pcount = (mdev->state.role == R_PRIMARY)
2898 + (peer_role == R_PRIMARY);
2899 int forced = (hg == -100);
2903 hg = drbd_asb_recover_0p(mdev);
2906 hg = drbd_asb_recover_1p(mdev);
2909 hg = drbd_asb_recover_2p(mdev);
2912 if (abs(hg) < 100) {
2913 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2914 "automatically solved. Sync from %s node\n",
2915 pcount, (hg < 0) ? "peer" : "this");
2917 dev_warn(DEV, "Doing a full sync, since"
2918 " UUIDs where ambiguous.\n");
2925 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
2927 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
2931 dev_warn(DEV, "Split-Brain detected, manually solved. "
2932 "Sync from %s node\n",
2933 (hg < 0) ? "peer" : "this");
2935 rr_conflict = nc->rr_conflict;
2936 tentative = nc->tentative;
2940 /* FIXME this log message is not correct if we end up here
2941 * after an attempted attach on a diskless node.
2942 * We just refuse to attach -- well, we drop the "connection"
2943 * to that disk, in a way... */
2944 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2945 drbd_khelper(mdev, "split-brain");
2949 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2950 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2954 if (hg < 0 && /* by intention we do not use mydisk here. */
2955 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2956 switch (rr_conflict) {
2957 case ASB_CALL_HELPER:
2958 drbd_khelper(mdev, "pri-lost");
2960 case ASB_DISCONNECT:
2961 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2964 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2969 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
2971 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2973 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2974 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2975 abs(hg) >= 2 ? "full" : "bit-map based");
2980 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2981 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2982 BM_LOCKED_SET_ALLOWED))
2986 if (hg > 0) { /* become sync source. */
2988 } else if (hg < 0) { /* become sync target */
2992 if (drbd_bm_total_weight(mdev)) {
2993 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2994 drbd_bm_total_weight(mdev));
3001 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3003 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3004 if (peer == ASB_DISCARD_REMOTE)
3005 return ASB_DISCARD_LOCAL;
3007 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3008 if (peer == ASB_DISCARD_LOCAL)
3009 return ASB_DISCARD_REMOTE;
3011 /* everything else is valid if they are equal on both sides. */
3015 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3017 struct p_protocol *p = pi->data;
3018 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3019 int p_proto, p_discard_my_data, p_two_primaries, cf;
3020 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3021 char integrity_alg[SHARED_SECRET_MAX] = "";
3022 struct crypto_hash *peer_integrity_tfm = NULL;
3023 void *int_dig_in = NULL, *int_dig_vv = NULL;
3025 p_proto = be32_to_cpu(p->protocol);
3026 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3027 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3028 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
3029 p_two_primaries = be32_to_cpu(p->two_primaries);
3030 cf = be32_to_cpu(p->conn_flags);
3031 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3033 if (tconn->agreed_pro_version >= 87) {
3036 if (pi->size > sizeof(integrity_alg))
3038 err = drbd_recv_all(tconn, integrity_alg, pi->size);
3041 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3044 if (pi->cmd != P_PROTOCOL_UPDATE) {
3045 clear_bit(CONN_DRY_RUN, &tconn->flags);
3047 if (cf & CF_DRY_RUN)
3048 set_bit(CONN_DRY_RUN, &tconn->flags);
3051 nc = rcu_dereference(tconn->net_conf);
3053 if (p_proto != nc->wire_protocol) {
3054 conn_err(tconn, "incompatible %s settings\n", "protocol");
3055 goto disconnect_rcu_unlock;
3058 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3059 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
3060 goto disconnect_rcu_unlock;
3063 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3064 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
3065 goto disconnect_rcu_unlock;
3068 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3069 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
3070 goto disconnect_rcu_unlock;
3073 if (p_discard_my_data && nc->discard_my_data) {
3074 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
3075 goto disconnect_rcu_unlock;
3078 if (p_two_primaries != nc->two_primaries) {
3079 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
3080 goto disconnect_rcu_unlock;
3083 if (strcmp(integrity_alg, nc->integrity_alg)) {
3084 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
3085 goto disconnect_rcu_unlock;
3091 if (integrity_alg[0]) {
3095 * We can only change the peer data integrity algorithm
3096 * here. Changing our own data integrity algorithm
3097 * requires that we send a P_PROTOCOL_UPDATE packet at
3098 * the same time; otherwise, the peer has no way to
3099 * tell between which packets the algorithm should
3103 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3104 if (!peer_integrity_tfm) {
3105 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3110 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3111 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3112 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3113 if (!(int_dig_in && int_dig_vv)) {
3114 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3119 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3120 if (!new_net_conf) {
3121 conn_err(tconn, "Allocation of new net_conf failed\n");
3125 mutex_lock(&tconn->data.mutex);
3126 mutex_lock(&tconn->conf_update);
3127 old_net_conf = tconn->net_conf;
3128 *new_net_conf = *old_net_conf;
3130 new_net_conf->wire_protocol = p_proto;
3131 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3132 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3133 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3134 new_net_conf->two_primaries = p_two_primaries;
3136 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3137 mutex_unlock(&tconn->conf_update);
3138 mutex_unlock(&tconn->data.mutex);
3140 crypto_free_hash(tconn->peer_integrity_tfm);
3141 kfree(tconn->int_dig_in);
3142 kfree(tconn->int_dig_vv);
3143 tconn->peer_integrity_tfm = peer_integrity_tfm;
3144 tconn->int_dig_in = int_dig_in;
3145 tconn->int_dig_vv = int_dig_vv;
3147 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3148 conn_info(tconn, "peer data-integrity-alg: %s\n",
3149 integrity_alg[0] ? integrity_alg : "(none)");
3152 kfree(old_net_conf);
3155 disconnect_rcu_unlock:
3158 crypto_free_hash(peer_integrity_tfm);
3161 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3166 * input: alg name, feature name
3167 * return: NULL (alg name was "")
3168 * ERR_PTR(error) if something goes wrong
3169 * or the crypto hash ptr, if it worked out ok. */
3170 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3171 const char *alg, const char *name)
3173 struct crypto_hash *tfm;
3178 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3180 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3181 alg, name, PTR_ERR(tfm));
3187 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3189 void *buffer = tconn->data.rbuf;
3190 int size = pi->size;
3193 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3194 s = drbd_recv(tconn, buffer, s);
3208 * config_unknown_volume - device configuration command for unknown volume
3210 * When a device is added to an existing connection, the node on which the
3211 * device is added first will send configuration commands to its peer but the
3212 * peer will not know about the device yet. It will warn and ignore these
3213 * commands. Once the device is added on the second node, the second node will
3214 * send the same device configuration commands, but in the other direction.
3216 * (We can also end up here if drbd is misconfigured.)
3218 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3220 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3221 cmdname(pi->cmd), pi->vnr);
3222 return ignore_remaining_packet(tconn, pi);
3225 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3227 struct drbd_conf *mdev;
3228 struct p_rs_param_95 *p;
3229 unsigned int header_size, data_size, exp_max_sz;
3230 struct crypto_hash *verify_tfm = NULL;
3231 struct crypto_hash *csums_tfm = NULL;
3232 struct net_conf *old_net_conf, *new_net_conf = NULL;
3233 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3234 const int apv = tconn->agreed_pro_version;
3235 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3239 mdev = vnr_to_mdev(tconn, pi->vnr);
3241 return config_unknown_volume(tconn, pi);
3243 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3244 : apv == 88 ? sizeof(struct p_rs_param)
3246 : apv <= 94 ? sizeof(struct p_rs_param_89)
3247 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3249 if (pi->size > exp_max_sz) {
3250 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3251 pi->size, exp_max_sz);
3256 header_size = sizeof(struct p_rs_param);
3257 data_size = pi->size - header_size;
3258 } else if (apv <= 94) {
3259 header_size = sizeof(struct p_rs_param_89);
3260 data_size = pi->size - header_size;
3261 D_ASSERT(data_size == 0);
3263 header_size = sizeof(struct p_rs_param_95);
3264 data_size = pi->size - header_size;
3265 D_ASSERT(data_size == 0);
3268 /* initialize verify_alg and csums_alg */
3270 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3272 err = drbd_recv_all(mdev->tconn, p, header_size);
3276 mutex_lock(&mdev->tconn->conf_update);
3277 old_net_conf = mdev->tconn->net_conf;
3278 if (get_ldev(mdev)) {
3279 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3280 if (!new_disk_conf) {
3282 mutex_unlock(&mdev->tconn->conf_update);
3283 dev_err(DEV, "Allocation of new disk_conf failed\n");
3287 old_disk_conf = mdev->ldev->disk_conf;
3288 *new_disk_conf = *old_disk_conf;
3290 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3295 if (data_size > SHARED_SECRET_MAX) {
3296 dev_err(DEV, "verify-alg too long, "
3297 "peer wants %u, accepting only %u byte\n",
3298 data_size, SHARED_SECRET_MAX);
3303 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3306 /* we expect NUL terminated string */
3307 /* but just in case someone tries to be evil */
3308 D_ASSERT(p->verify_alg[data_size-1] == 0);
3309 p->verify_alg[data_size-1] = 0;
3311 } else /* apv >= 89 */ {
3312 /* we still expect NUL terminated strings */
3313 /* but just in case someone tries to be evil */
3314 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3315 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3316 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3317 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3320 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3321 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3322 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3323 old_net_conf->verify_alg, p->verify_alg);
3326 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3327 p->verify_alg, "verify-alg");
3328 if (IS_ERR(verify_tfm)) {
3334 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3335 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3336 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3337 old_net_conf->csums_alg, p->csums_alg);
3340 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3341 p->csums_alg, "csums-alg");
3342 if (IS_ERR(csums_tfm)) {
3348 if (apv > 94 && new_disk_conf) {
3349 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3350 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3351 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3352 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3354 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3355 if (fifo_size != mdev->rs_plan_s->size) {
3356 new_plan = fifo_alloc(fifo_size);
3358 dev_err(DEV, "kmalloc of fifo_buffer failed");
3365 if (verify_tfm || csums_tfm) {
3366 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3367 if (!new_net_conf) {
3368 dev_err(DEV, "Allocation of new net_conf failed\n");
3372 *new_net_conf = *old_net_conf;
3375 strcpy(new_net_conf->verify_alg, p->verify_alg);
3376 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3377 crypto_free_hash(mdev->tconn->verify_tfm);
3378 mdev->tconn->verify_tfm = verify_tfm;
3379 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3382 strcpy(new_net_conf->csums_alg, p->csums_alg);
3383 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3384 crypto_free_hash(mdev->tconn->csums_tfm);
3385 mdev->tconn->csums_tfm = csums_tfm;
3386 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3388 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3392 if (new_disk_conf) {
3393 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3398 old_plan = mdev->rs_plan_s;
3399 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3402 mutex_unlock(&mdev->tconn->conf_update);
3405 kfree(old_net_conf);
3406 kfree(old_disk_conf);
3412 if (new_disk_conf) {
3414 kfree(new_disk_conf);
3416 mutex_unlock(&mdev->tconn->conf_update);
3421 if (new_disk_conf) {
3423 kfree(new_disk_conf);
3425 mutex_unlock(&mdev->tconn->conf_update);
3426 /* just for completeness: actually not needed,
3427 * as this is not reached if csums_tfm was ok. */
3428 crypto_free_hash(csums_tfm);
3429 /* but free the verify_tfm again, if csums_tfm did not work out */
3430 crypto_free_hash(verify_tfm);
3431 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3435 /* warn if the arguments differ by more than 12.5% */
3436 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3437 const char *s, sector_t a, sector_t b)
3440 if (a == 0 || b == 0)
3442 d = (a > b) ? (a - b) : (b - a);
3443 if (d > (a>>3) || d > (b>>3))
3444 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3445 (unsigned long long)a, (unsigned long long)b);
3448 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3450 struct drbd_conf *mdev;
3451 struct p_sizes *p = pi->data;
3452 enum determine_dev_size dd = unchanged;
3453 sector_t p_size, p_usize, my_usize;
3454 int ldsc = 0; /* local disk size changed */
3455 enum dds_flags ddsf;
3457 mdev = vnr_to_mdev(tconn, pi->vnr);
3459 return config_unknown_volume(tconn, pi);
3461 p_size = be64_to_cpu(p->d_size);
3462 p_usize = be64_to_cpu(p->u_size);
3464 /* just store the peer's disk size for now.
3465 * we still need to figure out whether we accept that. */
3466 mdev->p_size = p_size;
3468 if (get_ldev(mdev)) {
3470 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3473 warn_if_differ_considerably(mdev, "lower level device sizes",
3474 p_size, drbd_get_max_capacity(mdev->ldev));
3475 warn_if_differ_considerably(mdev, "user requested size",
3478 /* if this is the first connect, or an otherwise expected
3479 * param exchange, choose the minimum */
3480 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3481 p_usize = min_not_zero(my_usize, p_usize);
3483 /* Never shrink a device with usable data during connect.
3484 But allow online shrinking if we are connected. */
3485 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3486 drbd_get_capacity(mdev->this_bdev) &&
3487 mdev->state.disk >= D_OUTDATED &&
3488 mdev->state.conn < C_CONNECTED) {
3489 dev_err(DEV, "The peer's disk size is too small!\n");
3490 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3495 if (my_usize != p_usize) {
3496 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3498 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3499 if (!new_disk_conf) {
3500 dev_err(DEV, "Allocation of new disk_conf failed\n");
3505 mutex_lock(&mdev->tconn->conf_update);
3506 old_disk_conf = mdev->ldev->disk_conf;
3507 *new_disk_conf = *old_disk_conf;
3508 new_disk_conf->disk_size = p_usize;
3510 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3511 mutex_unlock(&mdev->tconn->conf_update);
3513 kfree(old_disk_conf);
3515 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3516 (unsigned long)my_usize);
3522 ddsf = be16_to_cpu(p->dds_flags);
3523 if (get_ldev(mdev)) {
3524 dd = drbd_determine_dev_size(mdev, ddsf);
3526 if (dd == dev_size_error)
3530 /* I am diskless, need to accept the peer's size. */
3531 drbd_set_my_capacity(mdev, p_size);
3534 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3535 drbd_reconsider_max_bio_size(mdev);
3537 if (get_ldev(mdev)) {
3538 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3539 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3546 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3547 if (be64_to_cpu(p->c_size) !=
3548 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3549 /* we have different sizes, probably peer
3550 * needs to know my new size... */
3551 drbd_send_sizes(mdev, 0, ddsf);
3553 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3554 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3555 if (mdev->state.pdsk >= D_INCONSISTENT &&
3556 mdev->state.disk >= D_INCONSISTENT) {
3557 if (ddsf & DDSF_NO_RESYNC)
3558 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3560 resync_after_online_grow(mdev);
3562 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3569 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3571 struct drbd_conf *mdev;
3572 struct p_uuids *p = pi->data;
3574 int i, updated_uuids = 0;
3576 mdev = vnr_to_mdev(tconn, pi->vnr);
3578 return config_unknown_volume(tconn, pi);
3580 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3582 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3583 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3585 kfree(mdev->p_uuid);
3586 mdev->p_uuid = p_uuid;
3588 if (mdev->state.conn < C_CONNECTED &&
3589 mdev->state.disk < D_INCONSISTENT &&
3590 mdev->state.role == R_PRIMARY &&
3591 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3592 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3593 (unsigned long long)mdev->ed_uuid);
3594 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3598 if (get_ldev(mdev)) {
3599 int skip_initial_sync =
3600 mdev->state.conn == C_CONNECTED &&
3601 mdev->tconn->agreed_pro_version >= 90 &&
3602 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3603 (p_uuid[UI_FLAGS] & 8);
3604 if (skip_initial_sync) {
3605 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3606 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3607 "clear_n_write from receive_uuids",
3608 BM_LOCKED_TEST_ALLOWED);
3609 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3610 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3611 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3617 } else if (mdev->state.disk < D_INCONSISTENT &&
3618 mdev->state.role == R_PRIMARY) {
3619 /* I am a diskless primary, the peer just created a new current UUID
3621 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3624 /* Before we test for the disk state, we should wait until an eventually
3625 ongoing cluster wide state change is finished. That is important if
3626 we are primary and are detaching from our disk. We need to see the
3627 new disk state... */
3628 mutex_lock(mdev->state_mutex);
3629 mutex_unlock(mdev->state_mutex);
3630 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3631 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3634 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3640 * convert_state() - Converts the peer's view of the cluster state to our point of view
3641 * @ps: The state as seen by the peer.
3643 static union drbd_state convert_state(union drbd_state ps)
3645 union drbd_state ms;
3647 static enum drbd_conns c_tab[] = {
3648 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3649 [C_CONNECTED] = C_CONNECTED,
3651 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3652 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3653 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3654 [C_VERIFY_S] = C_VERIFY_T,
3660 ms.conn = c_tab[ps.conn];
3665 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3670 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3672 struct drbd_conf *mdev;
3673 struct p_req_state *p = pi->data;
3674 union drbd_state mask, val;
3675 enum drbd_state_rv rv;
3677 mdev = vnr_to_mdev(tconn, pi->vnr);
3681 mask.i = be32_to_cpu(p->mask);
3682 val.i = be32_to_cpu(p->val);
3684 if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
3685 mutex_is_locked(mdev->state_mutex)) {
3686 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3690 mask = convert_state(mask);
3691 val = convert_state(val);
3693 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3694 drbd_send_sr_reply(mdev, rv);
3701 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3703 struct p_req_state *p = pi->data;
3704 union drbd_state mask, val;
3705 enum drbd_state_rv rv;
3707 mask.i = be32_to_cpu(p->mask);
3708 val.i = be32_to_cpu(p->val);
3710 if (test_bit(DISCARD_CONCURRENT, &tconn->flags) &&
3711 mutex_is_locked(&tconn->cstate_mutex)) {
3712 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3716 mask = convert_state(mask);
3717 val = convert_state(val);
3719 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3720 conn_send_sr_reply(tconn, rv);
3725 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3727 struct drbd_conf *mdev;
3728 struct p_state *p = pi->data;
3729 union drbd_state os, ns, peer_state;
3730 enum drbd_disk_state real_peer_disk;
3731 enum chg_state_flags cs_flags;
3734 mdev = vnr_to_mdev(tconn, pi->vnr);
3736 return config_unknown_volume(tconn, pi);
3738 peer_state.i = be32_to_cpu(p->state);
3740 real_peer_disk = peer_state.disk;
3741 if (peer_state.disk == D_NEGOTIATING) {
3742 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3743 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3746 spin_lock_irq(&mdev->tconn->req_lock);
3748 os = ns = drbd_read_state(mdev);
3749 spin_unlock_irq(&mdev->tconn->req_lock);
3751 /* If this is the "end of sync" confirmation, usually the peer disk
3752 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3753 * set) resync started in PausedSyncT, or if the timing of pause-/
3754 * unpause-sync events has been "just right", the peer disk may
3755 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3757 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3758 real_peer_disk == D_UP_TO_DATE &&
3759 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3760 /* If we are (becoming) SyncSource, but peer is still in sync
3761 * preparation, ignore its uptodate-ness to avoid flapping, it
3762 * will change to inconsistent once the peer reaches active
3764 * It may have changed syncer-paused flags, however, so we
3765 * cannot ignore this completely. */
3766 if (peer_state.conn > C_CONNECTED &&
3767 peer_state.conn < C_SYNC_SOURCE)
3768 real_peer_disk = D_INCONSISTENT;
3770 /* if peer_state changes to connected at the same time,
3771 * it explicitly notifies us that it finished resync.
3772 * Maybe we should finish it up, too? */
3773 else if (os.conn >= C_SYNC_SOURCE &&
3774 peer_state.conn == C_CONNECTED) {
3775 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3776 drbd_resync_finished(mdev);
3781 /* peer says his disk is inconsistent, while we think it is uptodate,
3782 * and this happens while the peer still thinks we have a sync going on,
3783 * but we think we are already done with the sync.
3784 * We ignore this to avoid flapping pdsk.
3785 * This should not happen, if the peer is a recent version of drbd. */
3786 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3787 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3788 real_peer_disk = D_UP_TO_DATE;
3790 if (ns.conn == C_WF_REPORT_PARAMS)
3791 ns.conn = C_CONNECTED;
3793 if (peer_state.conn == C_AHEAD)
3796 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3797 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3798 int cr; /* consider resync */
3800 /* if we established a new connection */
3801 cr = (os.conn < C_CONNECTED);
3802 /* if we had an established connection
3803 * and one of the nodes newly attaches a disk */
3804 cr |= (os.conn == C_CONNECTED &&
3805 (peer_state.disk == D_NEGOTIATING ||
3806 os.disk == D_NEGOTIATING));
3807 /* if we have both been inconsistent, and the peer has been
3808 * forced to be UpToDate with --overwrite-data */
3809 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3810 /* if we had been plain connected, and the admin requested to
3811 * start a sync by "invalidate" or "invalidate-remote" */
3812 cr |= (os.conn == C_CONNECTED &&
3813 (peer_state.conn >= C_STARTING_SYNC_S &&
3814 peer_state.conn <= C_WF_BITMAP_T));
3817 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3820 if (ns.conn == C_MASK) {
3821 ns.conn = C_CONNECTED;
3822 if (mdev->state.disk == D_NEGOTIATING) {
3823 drbd_force_state(mdev, NS(disk, D_FAILED));
3824 } else if (peer_state.disk == D_NEGOTIATING) {
3825 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3826 peer_state.disk = D_DISKLESS;
3827 real_peer_disk = D_DISKLESS;
3829 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3831 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3832 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3838 spin_lock_irq(&mdev->tconn->req_lock);
3839 if (os.i != drbd_read_state(mdev).i)
3841 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3842 ns.peer = peer_state.role;
3843 ns.pdsk = real_peer_disk;
3844 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3845 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3846 ns.disk = mdev->new_state_tmp.disk;
3847 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3848 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3849 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3850 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3851 for temporal network outages! */
3852 spin_unlock_irq(&mdev->tconn->req_lock);
3853 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3854 tl_clear(mdev->tconn);
3855 drbd_uuid_new_current(mdev);
3856 clear_bit(NEW_CUR_UUID, &mdev->flags);
3857 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3860 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3861 ns = drbd_read_state(mdev);
3862 spin_unlock_irq(&mdev->tconn->req_lock);
3864 if (rv < SS_SUCCESS) {
3865 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3869 if (os.conn > C_WF_REPORT_PARAMS) {
3870 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3871 peer_state.disk != D_NEGOTIATING ) {
3872 /* we want resync, peer has not yet decided to sync... */
3873 /* Nowadays only used when forcing a node into primary role and
3874 setting its disk to UpToDate with that */
3875 drbd_send_uuids(mdev);
3876 drbd_send_current_state(mdev);
3880 clear_bit(DISCARD_MY_DATA, &mdev->flags);
3882 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3887 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
3889 struct drbd_conf *mdev;
3890 struct p_rs_uuid *p = pi->data;
3892 mdev = vnr_to_mdev(tconn, pi->vnr);
3896 wait_event(mdev->misc_wait,
3897 mdev->state.conn == C_WF_SYNC_UUID ||
3898 mdev->state.conn == C_BEHIND ||
3899 mdev->state.conn < C_CONNECTED ||
3900 mdev->state.disk < D_NEGOTIATING);
3902 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3904 /* Here the _drbd_uuid_ functions are right, current should
3905 _not_ be rotated into the history */
3906 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3907 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3908 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3910 drbd_print_uuids(mdev, "updated sync uuid");
3911 drbd_start_resync(mdev, C_SYNC_TARGET);
3915 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3921 * receive_bitmap_plain
3923 * Return 0 when done, 1 when another iteration is needed, and a negative error
3924 * code upon failure.
3927 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
3928 unsigned long *p, struct bm_xfer_ctx *c)
3930 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
3931 drbd_header_size(mdev->tconn);
3932 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
3933 c->bm_words - c->word_offset);
3934 unsigned int want = num_words * sizeof(*p);
3938 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
3943 err = drbd_recv_all(mdev->tconn, p, want);
3947 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
3949 c->word_offset += num_words;
3950 c->bit_offset = c->word_offset * BITS_PER_LONG;
3951 if (c->bit_offset > c->bm_bits)
3952 c->bit_offset = c->bm_bits;
3957 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
3959 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
3962 static int dcbp_get_start(struct p_compressed_bm *p)
3964 return (p->encoding & 0x80) != 0;
3967 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
3969 return (p->encoding >> 4) & 0x7;
3975 * Return 0 when done, 1 when another iteration is needed, and a negative error
3976 * code upon failure.
3979 recv_bm_rle_bits(struct drbd_conf *mdev,
3980 struct p_compressed_bm *p,
3981 struct bm_xfer_ctx *c,
3984 struct bitstream bs;
3988 unsigned long s = c->bit_offset;
3990 int toggle = dcbp_get_start(p);
3994 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
3996 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4000 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4001 bits = vli_decode_bits(&rl, look_ahead);
4007 if (e >= c->bm_bits) {
4008 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4011 _drbd_bm_set_bits(mdev, s, e);
4015 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4016 have, bits, look_ahead,
4017 (unsigned int)(bs.cur.b - p->code),
4018 (unsigned int)bs.buf_len);
4021 look_ahead >>= bits;
4024 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4027 look_ahead |= tmp << have;
4032 bm_xfer_ctx_bit_to_word_offset(c);
4034 return (s != c->bm_bits);
4040 * Return 0 when done, 1 when another iteration is needed, and a negative error
4041 * code upon failure.
4044 decode_bitmap_c(struct drbd_conf *mdev,
4045 struct p_compressed_bm *p,
4046 struct bm_xfer_ctx *c,
4049 if (dcbp_get_code(p) == RLE_VLI_Bits)
4050 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
4052 /* other variants had been implemented for evaluation,
4053 * but have been dropped as this one turned out to be "best"
4054 * during all our tests. */
4056 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4057 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4061 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4062 const char *direction, struct bm_xfer_ctx *c)
4064 /* what would it take to transfer it "plaintext" */
4065 unsigned int header_size = drbd_header_size(mdev->tconn);
4066 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4067 unsigned int plain =
4068 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4069 c->bm_words * sizeof(unsigned long);
4070 unsigned int total = c->bytes[0] + c->bytes[1];
4073 /* total can not be zero. but just in case: */
4077 /* don't report if not compressed */
4081 /* total < plain. check for overflow, still */
4082 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4083 : (1000 * total / plain);
4089 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4090 "total %u; compression: %u.%u%%\n",
4092 c->bytes[1], c->packets[1],
4093 c->bytes[0], c->packets[0],
4094 total, r/10, r % 10);
4097 /* Since we are processing the bitfield from lower addresses to higher,
4098 it does not matter if the process it in 32 bit chunks or 64 bit
4099 chunks as long as it is little endian. (Understand it as byte stream,
4100 beginning with the lowest byte...) If we would use big endian
4101 we would need to process it from the highest address to the lowest,
4102 in order to be agnostic to the 32 vs 64 bits issue.
4104 returns 0 on failure, 1 if we successfully received it. */
4105 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4107 struct drbd_conf *mdev;
4108 struct bm_xfer_ctx c;
4111 mdev = vnr_to_mdev(tconn, pi->vnr);
4115 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4116 /* you are supposed to send additional out-of-sync information
4117 * if you actually set bits during this phase */
4119 c = (struct bm_xfer_ctx) {
4120 .bm_bits = drbd_bm_bits(mdev),
4121 .bm_words = drbd_bm_words(mdev),
4125 if (pi->cmd == P_BITMAP)
4126 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4127 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4128 /* MAYBE: sanity check that we speak proto >= 90,
4129 * and the feature is enabled! */
4130 struct p_compressed_bm *p = pi->data;
4132 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4133 dev_err(DEV, "ReportCBitmap packet too large\n");
4137 if (pi->size <= sizeof(*p)) {
4138 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4142 err = drbd_recv_all(mdev->tconn, p, pi->size);
4145 err = decode_bitmap_c(mdev, p, &c, pi->size);
4147 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4152 c.packets[pi->cmd == P_BITMAP]++;
4153 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4160 err = drbd_recv_header(mdev->tconn, pi);
4165 INFO_bm_xfer_stats(mdev, "receive", &c);
4167 if (mdev->state.conn == C_WF_BITMAP_T) {
4168 enum drbd_state_rv rv;
4170 err = drbd_send_bitmap(mdev);
4173 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4174 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4175 D_ASSERT(rv == SS_SUCCESS);
4176 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4177 /* admin may have requested C_DISCONNECTING,
4178 * other threads may have noticed network errors */
4179 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4180 drbd_conn_str(mdev->state.conn));
4185 drbd_bm_unlock(mdev);
4186 if (!err && mdev->state.conn == C_WF_BITMAP_S)
4187 drbd_start_resync(mdev, C_SYNC_SOURCE);
4191 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4193 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4196 return ignore_remaining_packet(tconn, pi);
4199 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4201 /* Make sure we've acked all the TCP data associated
4202 * with the data requests being unplugged */
4203 drbd_tcp_quickack(tconn->data.socket);
4208 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4210 struct drbd_conf *mdev;
4211 struct p_block_desc *p = pi->data;
4213 mdev = vnr_to_mdev(tconn, pi->vnr);
4217 switch (mdev->state.conn) {
4218 case C_WF_SYNC_UUID:
4223 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4224 drbd_conn_str(mdev->state.conn));
4227 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4235 int (*fn)(struct drbd_tconn *, struct packet_info *);
4238 static struct data_cmd drbd_cmd_handler[] = {
4239 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4240 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4241 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4242 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4243 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4244 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4245 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
4246 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4247 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4248 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4249 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
4250 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4251 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4252 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4253 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4254 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4255 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4256 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4257 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4258 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4259 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
4260 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4261 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4262 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4265 static void drbdd(struct drbd_tconn *tconn)
4267 struct packet_info pi;
4268 size_t shs; /* sub header size */
4271 while (get_t_state(&tconn->receiver) == RUNNING) {
4272 struct data_cmd *cmd;
4274 drbd_thread_current_set_cpu(&tconn->receiver);
4275 if (drbd_recv_header(tconn, &pi))
4278 cmd = &drbd_cmd_handler[pi.cmd];
4279 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4280 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4281 cmdname(pi.cmd), pi.cmd);
4285 shs = cmd->pkt_size;
4286 if (pi.size > shs && !cmd->expect_payload) {
4287 conn_err(tconn, "No payload expected %s l:%d\n",
4288 cmdname(pi.cmd), pi.size);
4293 err = drbd_recv_all_warn(tconn, pi.data, shs);
4299 err = cmd->fn(tconn, &pi);
4301 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4302 cmdname(pi.cmd), err, pi.size);
4309 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4312 void conn_flush_workqueue(struct drbd_tconn *tconn)
4314 struct drbd_wq_barrier barr;
4316 barr.w.cb = w_prev_work_done;
4317 barr.w.tconn = tconn;
4318 init_completion(&barr.done);
4319 drbd_queue_work(&tconn->data.work, &barr.w);
4320 wait_for_completion(&barr.done);
4323 static void conn_disconnect(struct drbd_tconn *tconn)
4325 struct drbd_conf *mdev;
4329 if (tconn->cstate == C_STANDALONE)
4332 /* asender does not clean up anything. it must not interfere, either */
4333 drbd_thread_stop(&tconn->asender);
4334 drbd_free_sock(tconn);
4337 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4338 kref_get(&mdev->kref);
4340 drbd_disconnected(mdev);
4341 kref_put(&mdev->kref, &drbd_minor_destroy);
4346 conn_info(tconn, "Connection closed\n");
4348 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4349 conn_try_outdate_peer_async(tconn);
4351 spin_lock_irq(&tconn->req_lock);
4353 if (oc >= C_UNCONNECTED)
4354 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4356 spin_unlock_irq(&tconn->req_lock);
4358 if (oc == C_DISCONNECTING)
4359 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4362 static int drbd_disconnected(struct drbd_conf *mdev)
4366 /* wait for current activity to cease. */
4367 spin_lock_irq(&mdev->tconn->req_lock);
4368 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4369 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4370 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4371 spin_unlock_irq(&mdev->tconn->req_lock);
4373 /* We do not have data structures that would allow us to
4374 * get the rs_pending_cnt down to 0 again.
4375 * * On C_SYNC_TARGET we do not have any data structures describing
4376 * the pending RSDataRequest's we have sent.
4377 * * On C_SYNC_SOURCE there is no data structure that tracks
4378 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4379 * And no, it is not the sum of the reference counts in the
4380 * resync_LRU. The resync_LRU tracks the whole operation including
4381 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4383 drbd_rs_cancel_all(mdev);
4385 mdev->rs_failed = 0;
4386 atomic_set(&mdev->rs_pending_cnt, 0);
4387 wake_up(&mdev->misc_wait);
4389 del_timer_sync(&mdev->resync_timer);
4390 resync_timer_fn((unsigned long)mdev);
4392 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4393 * w_make_resync_request etc. which may still be on the worker queue
4394 * to be "canceled" */
4395 drbd_flush_workqueue(mdev);
4397 drbd_finish_peer_reqs(mdev);
4399 kfree(mdev->p_uuid);
4400 mdev->p_uuid = NULL;
4402 if (!drbd_suspended(mdev))
4403 tl_clear(mdev->tconn);
4407 /* serialize with bitmap writeout triggered by the state change,
4409 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4411 /* tcp_close and release of sendpage pages can be deferred. I don't
4412 * want to use SO_LINGER, because apparently it can be deferred for
4413 * more than 20 seconds (longest time I checked).
4415 * Actually we don't care for exactly when the network stack does its
4416 * put_page(), but release our reference on these pages right here.
4418 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4420 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4421 i = atomic_read(&mdev->pp_in_use_by_net);
4423 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4424 i = atomic_read(&mdev->pp_in_use);
4426 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4428 D_ASSERT(list_empty(&mdev->read_ee));
4429 D_ASSERT(list_empty(&mdev->active_ee));
4430 D_ASSERT(list_empty(&mdev->sync_ee));
4431 D_ASSERT(list_empty(&mdev->done_ee));
4433 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4434 atomic_set(&mdev->current_epoch->epoch_size, 0);
4435 D_ASSERT(list_empty(&mdev->current_epoch->list));
4441 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4442 * we can agree on is stored in agreed_pro_version.
4444 * feature flags and the reserved array should be enough room for future
4445 * enhancements of the handshake protocol, and possible plugins...
4447 * for now, they are expected to be zero, but ignored.
4449 static int drbd_send_features(struct drbd_tconn *tconn)
4451 struct drbd_socket *sock;
4452 struct p_connection_features *p;
4454 sock = &tconn->data;
4455 p = conn_prepare_command(tconn, sock);
4458 memset(p, 0, sizeof(*p));
4459 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4460 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4461 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4466 * 1 yes, we have a valid connection
4467 * 0 oops, did not work out, please try again
4468 * -1 peer talks different language,
4469 * no point in trying again, please go standalone.
4471 static int drbd_do_features(struct drbd_tconn *tconn)
4473 /* ASSERT current == tconn->receiver ... */
4474 struct p_connection_features *p;
4475 const int expect = sizeof(struct p_connection_features);
4476 struct packet_info pi;
4479 err = drbd_send_features(tconn);
4483 err = drbd_recv_header(tconn, &pi);
4487 if (pi.cmd != P_CONNECTION_FEATURES) {
4488 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4489 cmdname(pi.cmd), pi.cmd);
4493 if (pi.size != expect) {
4494 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4500 err = drbd_recv_all_warn(tconn, p, expect);
4504 p->protocol_min = be32_to_cpu(p->protocol_min);
4505 p->protocol_max = be32_to_cpu(p->protocol_max);
4506 if (p->protocol_max == 0)
4507 p->protocol_max = p->protocol_min;
4509 if (PRO_VERSION_MAX < p->protocol_min ||
4510 PRO_VERSION_MIN > p->protocol_max)
4513 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4515 conn_info(tconn, "Handshake successful: "
4516 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4521 conn_err(tconn, "incompatible DRBD dialects: "
4522 "I support %d-%d, peer supports %d-%d\n",
4523 PRO_VERSION_MIN, PRO_VERSION_MAX,
4524 p->protocol_min, p->protocol_max);
4528 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4529 static int drbd_do_auth(struct drbd_tconn *tconn)
4531 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4532 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4536 #define CHALLENGE_LEN 64
4540 0 - failed, try again (network error),
4541 -1 - auth failed, don't try again.
4544 static int drbd_do_auth(struct drbd_tconn *tconn)
4546 struct drbd_socket *sock;
4547 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4548 struct scatterlist sg;
4549 char *response = NULL;
4550 char *right_response = NULL;
4551 char *peers_ch = NULL;
4552 unsigned int key_len;
4553 char secret[SHARED_SECRET_MAX]; /* 64 byte */
4554 unsigned int resp_size;
4555 struct hash_desc desc;
4556 struct packet_info pi;
4557 struct net_conf *nc;
4560 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4563 nc = rcu_dereference(tconn->net_conf);
4564 key_len = strlen(nc->shared_secret);
4565 memcpy(secret, nc->shared_secret, key_len);
4568 desc.tfm = tconn->cram_hmac_tfm;
4571 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4573 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4578 get_random_bytes(my_challenge, CHALLENGE_LEN);
4580 sock = &tconn->data;
4581 if (!conn_prepare_command(tconn, sock)) {
4585 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4586 my_challenge, CHALLENGE_LEN);
4590 err = drbd_recv_header(tconn, &pi);
4596 if (pi.cmd != P_AUTH_CHALLENGE) {
4597 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4598 cmdname(pi.cmd), pi.cmd);
4603 if (pi.size > CHALLENGE_LEN * 2) {
4604 conn_err(tconn, "expected AuthChallenge payload too big.\n");
4609 peers_ch = kmalloc(pi.size, GFP_NOIO);
4610 if (peers_ch == NULL) {
4611 conn_err(tconn, "kmalloc of peers_ch failed\n");
4616 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4622 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4623 response = kmalloc(resp_size, GFP_NOIO);
4624 if (response == NULL) {
4625 conn_err(tconn, "kmalloc of response failed\n");
4630 sg_init_table(&sg, 1);
4631 sg_set_buf(&sg, peers_ch, pi.size);
4633 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4635 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4640 if (!conn_prepare_command(tconn, sock)) {
4644 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4645 response, resp_size);
4649 err = drbd_recv_header(tconn, &pi);
4655 if (pi.cmd != P_AUTH_RESPONSE) {
4656 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4657 cmdname(pi.cmd), pi.cmd);
4662 if (pi.size != resp_size) {
4663 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4668 err = drbd_recv_all_warn(tconn, response , resp_size);
4674 right_response = kmalloc(resp_size, GFP_NOIO);
4675 if (right_response == NULL) {
4676 conn_err(tconn, "kmalloc of right_response failed\n");
4681 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4683 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4685 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4690 rv = !memcmp(response, right_response, resp_size);
4693 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4701 kfree(right_response);
4707 int drbdd_init(struct drbd_thread *thi)
4709 struct drbd_tconn *tconn = thi->tconn;
4712 conn_info(tconn, "receiver (re)started\n");
4715 h = conn_connect(tconn);
4717 conn_disconnect(tconn);
4718 schedule_timeout_interruptible(HZ);
4721 conn_warn(tconn, "Discarding network configuration.\n");
4722 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4729 conn_disconnect(tconn);
4731 conn_info(tconn, "receiver terminated\n");
4735 /* ********* acknowledge sender ******** */
4737 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4739 struct p_req_state_reply *p = pi->data;
4740 int retcode = be32_to_cpu(p->retcode);
4742 if (retcode >= SS_SUCCESS) {
4743 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4745 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4746 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4747 drbd_set_st_err_str(retcode), retcode);
4749 wake_up(&tconn->ping_wait);
4754 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4756 struct drbd_conf *mdev;
4757 struct p_req_state_reply *p = pi->data;
4758 int retcode = be32_to_cpu(p->retcode);
4760 mdev = vnr_to_mdev(tconn, pi->vnr);
4764 if (retcode >= SS_SUCCESS) {
4765 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4767 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4768 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4769 drbd_set_st_err_str(retcode), retcode);
4771 wake_up(&mdev->state_wait);
4776 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4778 return drbd_send_ping_ack(tconn);
4782 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4784 /* restore idle timeout */
4785 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4786 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4787 wake_up(&tconn->ping_wait);
4792 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4794 struct drbd_conf *mdev;
4795 struct p_block_ack *p = pi->data;
4796 sector_t sector = be64_to_cpu(p->sector);
4797 int blksize = be32_to_cpu(p->blksize);
4799 mdev = vnr_to_mdev(tconn, pi->vnr);
4803 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4805 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4807 if (get_ldev(mdev)) {
4808 drbd_rs_complete_io(mdev, sector);
4809 drbd_set_in_sync(mdev, sector, blksize);
4810 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4811 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4814 dec_rs_pending(mdev);
4815 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4821 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4822 struct rb_root *root, const char *func,
4823 enum drbd_req_event what, bool missing_ok)
4825 struct drbd_request *req;
4826 struct bio_and_error m;
4828 spin_lock_irq(&mdev->tconn->req_lock);
4829 req = find_request(mdev, root, id, sector, missing_ok, func);
4830 if (unlikely(!req)) {
4831 spin_unlock_irq(&mdev->tconn->req_lock);
4834 __req_mod(req, what, &m);
4835 spin_unlock_irq(&mdev->tconn->req_lock);
4838 complete_master_bio(mdev, &m);
4842 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4844 struct drbd_conf *mdev;
4845 struct p_block_ack *p = pi->data;
4846 sector_t sector = be64_to_cpu(p->sector);
4847 int blksize = be32_to_cpu(p->blksize);
4848 enum drbd_req_event what;
4850 mdev = vnr_to_mdev(tconn, pi->vnr);
4854 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4856 if (p->block_id == ID_SYNCER) {
4857 drbd_set_in_sync(mdev, sector, blksize);
4858 dec_rs_pending(mdev);
4862 case P_RS_WRITE_ACK:
4863 what = WRITE_ACKED_BY_PEER_AND_SIS;
4866 what = WRITE_ACKED_BY_PEER;
4869 what = RECV_ACKED_BY_PEER;
4871 case P_DISCARD_WRITE:
4872 what = DISCARD_WRITE;
4875 what = POSTPONE_WRITE;
4881 return validate_req_change_req_state(mdev, p->block_id, sector,
4882 &mdev->write_requests, __func__,
4886 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
4888 struct drbd_conf *mdev;
4889 struct p_block_ack *p = pi->data;
4890 sector_t sector = be64_to_cpu(p->sector);
4891 int size = be32_to_cpu(p->blksize);
4894 mdev = vnr_to_mdev(tconn, pi->vnr);
4898 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4900 if (p->block_id == ID_SYNCER) {
4901 dec_rs_pending(mdev);
4902 drbd_rs_failed_io(mdev, sector, size);
4906 err = validate_req_change_req_state(mdev, p->block_id, sector,
4907 &mdev->write_requests, __func__,
4910 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4911 The master bio might already be completed, therefore the
4912 request is no longer in the collision hash. */
4913 /* In Protocol B we might already have got a P_RECV_ACK
4914 but then get a P_NEG_ACK afterwards. */
4915 drbd_set_out_of_sync(mdev, sector, size);
4920 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
4922 struct drbd_conf *mdev;
4923 struct p_block_ack *p = pi->data;
4924 sector_t sector = be64_to_cpu(p->sector);
4926 mdev = vnr_to_mdev(tconn, pi->vnr);
4930 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4932 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4933 (unsigned long long)sector, be32_to_cpu(p->blksize));
4935 return validate_req_change_req_state(mdev, p->block_id, sector,
4936 &mdev->read_requests, __func__,
4940 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
4942 struct drbd_conf *mdev;
4945 struct p_block_ack *p = pi->data;
4947 mdev = vnr_to_mdev(tconn, pi->vnr);
4951 sector = be64_to_cpu(p->sector);
4952 size = be32_to_cpu(p->blksize);
4954 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4956 dec_rs_pending(mdev);
4958 if (get_ldev_if_state(mdev, D_FAILED)) {
4959 drbd_rs_complete_io(mdev, sector);
4961 case P_NEG_RS_DREPLY:
4962 drbd_rs_failed_io(mdev, sector, size);
4974 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
4976 struct drbd_conf *mdev;
4977 struct p_barrier_ack *p = pi->data;
4979 mdev = vnr_to_mdev(tconn, pi->vnr);
4983 tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
4985 if (mdev->state.conn == C_AHEAD &&
4986 atomic_read(&mdev->ap_in_flight) == 0 &&
4987 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
4988 mdev->start_resync_timer.expires = jiffies + HZ;
4989 add_timer(&mdev->start_resync_timer);
4995 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
4997 struct drbd_conf *mdev;
4998 struct p_block_ack *p = pi->data;
4999 struct drbd_work *w;
5003 mdev = vnr_to_mdev(tconn, pi->vnr);
5007 sector = be64_to_cpu(p->sector);
5008 size = be32_to_cpu(p->blksize);
5010 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5012 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5013 drbd_ov_out_of_sync_found(mdev, sector, size);
5015 ov_out_of_sync_print(mdev);
5017 if (!get_ldev(mdev))
5020 drbd_rs_complete_io(mdev, sector);
5021 dec_rs_pending(mdev);
5025 /* let's advance progress step marks only for every other megabyte */
5026 if ((mdev->ov_left & 0x200) == 0x200)
5027 drbd_advance_rs_marks(mdev, mdev->ov_left);
5029 if (mdev->ov_left == 0) {
5030 w = kmalloc(sizeof(*w), GFP_NOIO);
5032 w->cb = w_ov_finished;
5034 drbd_queue_work_front(&mdev->tconn->data.work, w);
5036 dev_err(DEV, "kmalloc(w) failed.");
5037 ov_out_of_sync_print(mdev);
5038 drbd_resync_finished(mdev);
5045 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
5050 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5052 struct drbd_conf *mdev;
5053 int vnr, not_empty = 0;
5056 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5057 flush_signals(current);
5060 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5061 kref_get(&mdev->kref);
5063 if (drbd_finish_peer_reqs(mdev)) {
5064 kref_put(&mdev->kref, &drbd_minor_destroy);
5067 kref_put(&mdev->kref, &drbd_minor_destroy);
5070 set_bit(SIGNAL_ASENDER, &tconn->flags);
5072 spin_lock_irq(&tconn->req_lock);
5073 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5074 not_empty = !list_empty(&mdev->done_ee);
5078 spin_unlock_irq(&tconn->req_lock);
5080 } while (not_empty);
5085 struct asender_cmd {
5087 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5090 static struct asender_cmd asender_tbl[] = {
5091 [P_PING] = { 0, got_Ping },
5092 [P_PING_ACK] = { 0, got_PingAck },
5093 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5094 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5095 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5096 [P_DISCARD_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5097 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5098 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
5099 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
5100 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5101 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5102 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5103 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
5104 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
5105 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5106 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5107 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5110 int drbd_asender(struct drbd_thread *thi)
5112 struct drbd_tconn *tconn = thi->tconn;
5113 struct asender_cmd *cmd = NULL;
5114 struct packet_info pi;
5116 void *buf = tconn->meta.rbuf;
5118 unsigned int header_size = drbd_header_size(tconn);
5119 int expect = header_size;
5120 bool ping_timeout_active = false;
5121 struct net_conf *nc;
5122 int ping_timeo, tcp_cork, ping_int;
5124 current->policy = SCHED_RR; /* Make this a realtime task! */
5125 current->rt_priority = 2; /* more important than all other tasks */
5127 while (get_t_state(thi) == RUNNING) {
5128 drbd_thread_current_set_cpu(thi);
5131 nc = rcu_dereference(tconn->net_conf);
5132 ping_timeo = nc->ping_timeo;
5133 tcp_cork = nc->tcp_cork;
5134 ping_int = nc->ping_int;
5137 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5138 if (drbd_send_ping(tconn)) {
5139 conn_err(tconn, "drbd_send_ping has failed\n");
5142 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5143 ping_timeout_active = true;
5146 /* TODO: conditionally cork; it may hurt latency if we cork without
5149 drbd_tcp_cork(tconn->meta.socket);
5150 if (tconn_finish_peer_reqs(tconn)) {
5151 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5154 /* but unconditionally uncork unless disabled */
5156 drbd_tcp_uncork(tconn->meta.socket);
5158 /* short circuit, recv_msg would return EINTR anyways. */
5159 if (signal_pending(current))
5162 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5163 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5165 flush_signals(current);
5168 * -EINTR (on meta) we got a signal
5169 * -EAGAIN (on meta) rcvtimeo expired
5170 * -ECONNRESET other side closed the connection
5171 * -ERESTARTSYS (on data) we got a signal
5172 * rv < 0 other than above: unexpected error!
5173 * rv == expected: full header or command
5174 * rv < expected: "woken" by signal during receive
5175 * rv == 0 : "connection shut down by peer"
5177 if (likely(rv > 0)) {
5180 } else if (rv == 0) {
5181 conn_err(tconn, "meta connection shut down by peer.\n");
5183 } else if (rv == -EAGAIN) {
5184 /* If the data socket received something meanwhile,
5185 * that is good enough: peer is still alive. */
5186 if (time_after(tconn->last_received,
5187 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5189 if (ping_timeout_active) {
5190 conn_err(tconn, "PingAck did not arrive in time.\n");
5193 set_bit(SEND_PING, &tconn->flags);
5195 } else if (rv == -EINTR) {
5198 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5202 if (received == expect && cmd == NULL) {
5203 if (decode_header(tconn, tconn->meta.rbuf, &pi))
5205 cmd = &asender_tbl[pi.cmd];
5206 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5207 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5208 cmdname(pi.cmd), pi.cmd);
5211 expect = header_size + cmd->pkt_size;
5212 if (pi.size != expect - header_size) {
5213 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5218 if (received == expect) {
5221 err = cmd->fn(tconn, &pi);
5223 conn_err(tconn, "%pf failed\n", cmd->fn);
5227 tconn->last_received = jiffies;
5229 if (cmd == &asender_tbl[P_PING_ACK]) {
5230 /* restore idle timeout */
5231 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5232 ping_timeout_active = false;
5235 buf = tconn->meta.rbuf;
5237 expect = header_size;
5244 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5248 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5250 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5252 conn_info(tconn, "asender terminated\n");