]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/block/drbd/drbd_receiver.c
drbd: Fixed processing of disk-barrier, disk-flushes and disk-drain
[karo-tx-linux.git] / drivers / block / drbd / drbd_receiver.c
index 4665ad79b4aeb70b0f443cc0562dbd83727e0505..e8cd4c4acc65df5b1d4501d8a643d73aa6b52f55 100644 (file)
@@ -63,9 +63,9 @@ enum finish_epoch {
 
 static int drbd_do_features(struct drbd_tconn *tconn);
 static int drbd_do_auth(struct drbd_tconn *tconn);
-static int drbd_disconnected(int vnr, void *p, void *data);
+static int drbd_disconnected(struct drbd_conf *mdev);
 
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
 static int e_end_block(struct drbd_work *, int);
 
 
@@ -487,6 +487,7 @@ static int drbd_accept(const char **what, struct socket *sock, struct socket **n
                goto out;
        }
        (*newsock)->ops  = sock->ops;
+       __module_get((*newsock)->ops->owner);
 
 out:
        return err;
@@ -617,7 +618,7 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
        struct sockaddr_in6 peer_in6;
        struct net_conf *nc;
        int err, peer_addr_len, my_addr_len;
-       int sndbuf_size, rcvbuf_size, try_connect_int;
+       int sndbuf_size, rcvbuf_size, connect_int;
        int disconnect_on_error = 1;
 
        rcu_read_lock();
@@ -626,23 +627,21 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
                rcu_read_unlock();
                return NULL;
        }
-
        sndbuf_size = nc->sndbuf_size;
        rcvbuf_size = nc->rcvbuf_size;
-       try_connect_int = nc->try_connect_int;
+       connect_int = nc->connect_int;
+       rcu_read_unlock();
 
-       my_addr_len = min_t(int, nc->my_addr_len, sizeof(src_in6));
-       memcpy(&src_in6, nc->my_addr, my_addr_len);
+       my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
+       memcpy(&src_in6, &tconn->my_addr, my_addr_len);
 
-       if (((struct sockaddr *)nc->my_addr)->sa_family == AF_INET6)
+       if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
                src_in6.sin6_port = 0;
        else
                ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
 
-       peer_addr_len = min_t(int, nc->peer_addr_len, sizeof(src_in6));
-       memcpy(&peer_in6, nc->peer_addr, peer_addr_len);
-
-       rcu_read_unlock();
+       peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
+       memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
 
        what = "sock_create_kern";
        err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
@@ -653,7 +652,7 @@ static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
        }
 
        sock->sk->sk_rcvtimeo =
-       sock->sk->sk_sndtimeo = try_connect_int * HZ;
+       sock->sk->sk_sndtimeo = connect_int * HZ;
        drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
 
        /* explicitly bind to the configured IP as source IP
@@ -702,7 +701,7 @@ out:
 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
 {
        int timeo, err, my_addr_len;
-       int sndbuf_size, rcvbuf_size, try_connect_int;
+       int sndbuf_size, rcvbuf_size, connect_int;
        struct socket *s_estab = NULL, *s_listen;
        struct sockaddr_in6 my_addr;
        struct net_conf *nc;
@@ -714,15 +713,14 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
                rcu_read_unlock();
                return NULL;
        }
-
        sndbuf_size = nc->sndbuf_size;
        rcvbuf_size = nc->rcvbuf_size;
-       try_connect_int = nc->try_connect_int;
-
-       my_addr_len = min_t(int, nc->my_addr_len, sizeof(struct sockaddr_in6));
-       memcpy(&my_addr, nc->my_addr, my_addr_len);
+       connect_int = nc->connect_int;
        rcu_read_unlock();
 
+       my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
+       memcpy(&my_addr, &tconn->my_addr, my_addr_len);
+
        what = "sock_create_kern";
        err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
                SOCK_STREAM, IPPROTO_TCP, &s_listen);
@@ -731,7 +729,7 @@ static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn)
                goto out;
        }
 
-       timeo = try_connect_int * HZ;
+       timeo = connect_int * HZ;
        timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
 
        s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
@@ -811,9 +809,8 @@ static int drbd_socket_okay(struct socket **sock)
 }
 /* Gets called if a connection is established, or if a new minor gets created
    in a connection */
-int drbd_connected(int vnr, void *p, void *data)
+int drbd_connected(struct drbd_conf *mdev)
 {
-       struct drbd_conf *mdev = (struct drbd_conf *)p;
        int err;
 
        atomic_set(&mdev->packet_seq, 0);
@@ -829,7 +826,7 @@ int drbd_connected(int vnr, void *p, void *data)
        if (!err)
                err = drbd_send_uuids(mdev);
        if (!err)
-               err = drbd_send_state(mdev);
+               err = drbd_send_current_state(mdev);
        clear_bit(USE_DEGR_WFC_T, &mdev->flags);
        clear_bit(RESIZE_PENDING, &mdev->flags);
        mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
@@ -844,15 +841,26 @@ int drbd_connected(int vnr, void *p, void *data)
  *     no point in trying again, please go standalone.
  *  -2 We do not have a network config...
  */
-static int drbd_connect(struct drbd_tconn *tconn)
+static int conn_connect(struct drbd_tconn *tconn)
 {
-       struct socket *sock, *msock;
+       struct drbd_socket sock, msock;
+       struct drbd_conf *mdev;
        struct net_conf *nc;
-       int timeout, try, h, ok;
+       int vnr, timeout, try, h, ok;
+       bool discard_my_data;
 
        if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
                return -2;
 
+       mutex_init(&sock.mutex);
+       sock.sbuf = tconn->data.sbuf;
+       sock.rbuf = tconn->data.rbuf;
+       sock.socket = NULL;
+       mutex_init(&msock.mutex);
+       msock.sbuf = tconn->meta.sbuf;
+       msock.rbuf = tconn->meta.rbuf;
+       msock.socket = NULL;
+
        clear_bit(DISCARD_CONCURRENT, &tconn->flags);
 
        /* Assume that the peer only understands protocol 80 until we know better.  */
@@ -871,22 +879,26 @@ static int drbd_connect(struct drbd_tconn *tconn)
                }
 
                if (s) {
-                       if (!tconn->data.socket) {
-                               tconn->data.socket = s;
-                               send_first_packet(tconn, &tconn->data, P_INITIAL_DATA);
-                       } else if (!tconn->meta.socket) {
-                               tconn->meta.socket = s;
-                               send_first_packet(tconn, &tconn->meta, P_INITIAL_META);
+                       if (!sock.socket) {
+                               sock.socket = s;
+                               send_first_packet(tconn, &sock, P_INITIAL_DATA);
+                       } else if (!msock.socket) {
+                               msock.socket = s;
+                               send_first_packet(tconn, &msock, P_INITIAL_META);
                        } else {
-                               conn_err(tconn, "Logic error in drbd_connect()\n");
+                               conn_err(tconn, "Logic error in conn_connect()\n");
                                goto out_release_sockets;
                        }
                }
 
-               if (tconn->data.socket && tconn->meta.socket) {
-                       schedule_timeout_interruptible(tconn->net_conf->ping_timeo*HZ/10);
-                       ok = drbd_socket_okay(&tconn->data.socket);
-                       ok = drbd_socket_okay(&tconn->meta.socket) && ok;
+               if (sock.socket && msock.socket) {
+                       rcu_read_lock();
+                       nc = rcu_dereference(tconn->net_conf);
+                       timeout = nc->ping_timeo * HZ / 10;
+                       rcu_read_unlock();
+                       schedule_timeout_interruptible(timeout);
+                       ok = drbd_socket_okay(&sock.socket);
+                       ok = drbd_socket_okay(&msock.socket) && ok;
                        if (ok)
                                break;
                }
@@ -895,22 +907,22 @@ retry:
                s = drbd_wait_for_connect(tconn);
                if (s) {
                        try = receive_first_packet(tconn, s);
-                       drbd_socket_okay(&tconn->data.socket);
-                       drbd_socket_okay(&tconn->meta.socket);
+                       drbd_socket_okay(&sock.socket);
+                       drbd_socket_okay(&msock.socket);
                        switch (try) {
                        case P_INITIAL_DATA:
-                               if (tconn->data.socket) {
+                               if (sock.socket) {
                                        conn_warn(tconn, "initial packet S crossed\n");
-                                       sock_release(tconn->data.socket);
+                                       sock_release(sock.socket);
                                }
-                               tconn->data.socket = s;
+                               sock.socket = s;
                                break;
                        case P_INITIAL_META:
-                               if (tconn->meta.socket) {
+                               if (msock.socket) {
                                        conn_warn(tconn, "initial packet M crossed\n");
-                                       sock_release(tconn->meta.socket);
+                                       sock_release(msock.socket);
                                }
-                               tconn->meta.socket = s;
+                               msock.socket = s;
                                set_bit(DISCARD_CONCURRENT, &tconn->flags);
                                break;
                        default:
@@ -930,48 +942,48 @@ retry:
                                goto out_release_sockets;
                }
 
-               if (tconn->data.socket && &tconn->meta.socket) {
-                       ok = drbd_socket_okay(&tconn->data.socket);
-                       ok = drbd_socket_okay(&tconn->meta.socket) && ok;
+               if (sock.socket && &msock.socket) {
+                       ok = drbd_socket_okay(&sock.socket);
+                       ok = drbd_socket_okay(&msock.socket) && ok;
                        if (ok)
                                break;
                }
        } while (1);
 
-       sock  = tconn->data.socket;
-       msock = tconn->meta.socket;
-
-       msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
-       sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
+       sock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
+       msock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
 
-       sock->sk->sk_allocation = GFP_NOIO;
-       msock->sk->sk_allocation = GFP_NOIO;
+       sock.socket->sk->sk_allocation = GFP_NOIO;
+       msock.socket->sk->sk_allocation = GFP_NOIO;
 
-       sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
-       msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
+       sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
+       msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
 
        /* NOT YET ...
-        * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
-        * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+        * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
+        * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
         * first set it to the P_CONNECTION_FEATURES timeout,
         * which we set to 4x the configured ping_timeout. */
        rcu_read_lock();
        nc = rcu_dereference(tconn->net_conf);
 
-       sock->sk->sk_sndtimeo =
-       sock->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
+       sock.socket->sk->sk_sndtimeo =
+       sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
 
-       msock->sk->sk_rcvtimeo = nc->ping_int*HZ;
+       msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
        timeout = nc->timeout * HZ / 10;
+       discard_my_data = nc->discard_my_data;
        rcu_read_unlock();
 
-       msock->sk->sk_sndtimeo = timeout;
+       msock.socket->sk->sk_sndtimeo = timeout;
 
        /* we don't want delays.
         * we use TCP_CORK where appropriate, though */
-       drbd_tcp_nodelay(sock);
-       drbd_tcp_nodelay(msock);
+       drbd_tcp_nodelay(sock.socket);
+       drbd_tcp_nodelay(msock.socket);
 
+       tconn->data.socket = sock.socket;
+       tconn->meta.socket = msock.socket;
        tconn->last_received = jiffies;
 
        h = drbd_do_features(tconn);
@@ -990,31 +1002,48 @@ retry:
                }
        }
 
+       tconn->data.socket->sk->sk_sndtimeo = timeout;
+       tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+
+       if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
+               return -1;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               kref_get(&mdev->kref);
+               rcu_read_unlock();
+
+               if (discard_my_data)
+                       set_bit(DISCARD_MY_DATA, &mdev->flags);
+               else
+                       clear_bit(DISCARD_MY_DATA, &mdev->flags);
+
+               drbd_connected(mdev);
+               kref_put(&mdev->kref, &drbd_minor_destroy);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+
        if (conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE) < SS_SUCCESS)
                return 0;
 
-       sock->sk->sk_sndtimeo = timeout;
-       sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
-
        drbd_thread_start(&tconn->asender);
 
-       if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
-               return -1;
+       mutex_lock(&tconn->conf_update);
+       /* The discard_my_data flag is a single-shot modifier to the next
+        * connection attempt, the handshake of which is now well underway.
+        * No need for rcu style copying of the whole struct
+        * just to clear a single value. */
+       tconn->net_conf->discard_my_data = 0;
+       mutex_unlock(&tconn->conf_update);
 
-       down_read(&drbd_cfg_rwsem);
-       h = !idr_for_each(&tconn->volumes, drbd_connected, tconn);
-       up_read(&drbd_cfg_rwsem);
        return h;
 
 out_release_sockets:
-       if (tconn->data.socket) {
-               sock_release(tconn->data.socket);
-               tconn->data.socket = NULL;
-       }
-       if (tconn->meta.socket) {
-               sock_release(tconn->meta.socket);
-               tconn->meta.socket = NULL;
-       }
+       if (sock.socket)
+               sock_release(sock.socket);
+       if (msock.socket)
+               sock_release(msock.socket);
        return -1;
 }
 
@@ -1069,21 +1098,37 @@ static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
        return err;
 }
 
-static void drbd_flush(struct drbd_conf *mdev)
+static void drbd_flush(struct drbd_tconn *tconn)
 {
        int rv;
+       struct drbd_conf *mdev;
+       int vnr;
 
-       if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
-               rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
-                                       NULL);
-               if (rv) {
-                       dev_err(DEV, "local disk flush failed with status %d\n", rv);
-                       /* would rather check on EOPNOTSUPP, but that is not reliable.
-                        * don't try again for ANY return value != 0
-                        * if (rv == -EOPNOTSUPP) */
-                       drbd_bump_write_ordering(mdev, WO_drain_io);
+       if (tconn->write_ordering >= WO_bdev_flush) {
+               rcu_read_lock();
+               idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+                       if (!get_ldev(mdev))
+                               continue;
+                       kref_get(&mdev->kref);
+                       rcu_read_unlock();
+
+                       rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
+                                       GFP_NOIO, NULL);
+                       if (rv) {
+                               dev_info(DEV, "local disk flush failed with status %d\n", rv);
+                               /* would rather check on EOPNOTSUPP, but that is not reliable.
+                                * don't try again for ANY return value != 0
+                                * if (rv == -EOPNOTSUPP) */
+                               drbd_bump_write_ordering(tconn, WO_drain_io);
+                       }
+                       put_ldev(mdev);
+                       kref_put(&mdev->kref, &drbd_minor_destroy);
+
+                       rcu_read_lock();
+                       if (rv)
+                               break;
                }
-               put_ldev(mdev);
+               rcu_read_unlock();
        }
 }
 
@@ -1093,7 +1138,7 @@ static void drbd_flush(struct drbd_conf *mdev)
  * @epoch:     Epoch object.
  * @ev:                Epoch event.
  */
-static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
+static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
                                               struct drbd_epoch *epoch,
                                               enum epoch_event ev)
 {
@@ -1101,7 +1146,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
        struct drbd_epoch *next_epoch;
        enum finish_epoch rv = FE_STILL_LIVE;
 
-       spin_lock(&mdev->epoch_lock);
+       spin_lock(&tconn->epoch_lock);
        do {
                next_epoch = NULL;
 
@@ -1121,19 +1166,24 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
 
                if (epoch_size != 0 &&
                    atomic_read(&epoch->active) == 0 &&
-                   test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
+                   (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
                        if (!(ev & EV_CLEANUP)) {
-                               spin_unlock(&mdev->epoch_lock);
-                               drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
-                               spin_lock(&mdev->epoch_lock);
+                               spin_unlock(&tconn->epoch_lock);
+                               drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
+                               spin_lock(&tconn->epoch_lock);
                        }
-                       dec_unacked(mdev);
+#if 0
+                       /* FIXME: dec unacked on connection, once we have
+                        * something to count pending connection packets in. */
+                       if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
+                               dec_unacked(epoch->tconn);
+#endif
 
-                       if (mdev->current_epoch != epoch) {
+                       if (tconn->current_epoch != epoch) {
                                next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
                                list_del(&epoch->list);
                                ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
-                               mdev->epochs--;
+                               tconn->epochs--;
                                kfree(epoch);
 
                                if (rv == FE_STILL_LIVE)
@@ -1144,7 +1194,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
                                /* atomic_set(&epoch->active, 0); is already zero */
                                if (rv == FE_STILL_LIVE)
                                        rv = FE_RECYCLED;
-                               wake_up(&mdev->ee_wait);
                        }
                }
 
@@ -1154,34 +1203,46 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
                epoch = next_epoch;
        } while (1);
 
-       spin_unlock(&mdev->epoch_lock);
+       spin_unlock(&tconn->epoch_lock);
 
        return rv;
 }
 
 /**
  * drbd_bump_write_ordering() - Fall back to an other write ordering method
- * @mdev:      DRBD device.
+ * @tconn:     DRBD connection.
  * @wo:                Write ordering method to try.
  */
-void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
+void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
 {
+       struct disk_conf *dc;
+       struct drbd_conf *mdev;
        enum write_ordering_e pwo;
+       int vnr;
        static char *write_ordering_str[] = {
                [WO_none] = "none",
                [WO_drain_io] = "drain",
                [WO_bdev_flush] = "flush",
        };
 
-       pwo = mdev->write_ordering;
+       pwo = tconn->write_ordering;
        wo = min(pwo, wo);
-       if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
-               wo = WO_drain_io;
-       if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
-               wo = WO_none;
-       mdev->write_ordering = wo;
-       if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
-               dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (!get_ldev_if_state(mdev, D_ATTACHING))
+                       continue;
+               dc = rcu_dereference(mdev->ldev->disk_conf);
+
+               if (wo == WO_bdev_flush && !dc->disk_flushes)
+                       wo = WO_drain_io;
+               if (wo == WO_drain_io && !dc->disk_drain)
+                       wo = WO_none;
+               put_ldev(mdev);
+       }
+       rcu_read_unlock();
+       tconn->write_ordering = wo;
+       if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
+               conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
 }
 
 /**
@@ -1294,28 +1355,41 @@ static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
                wake_up(&mdev->misc_wait);
 }
 
-static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
+void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
 {
        struct drbd_conf *mdev;
+       int vnr;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               kref_get(&mdev->kref);
+               rcu_read_unlock();
+               drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+               kref_put(&mdev->kref, &drbd_minor_destroy);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+}
+
+static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
+{
        int rv;
        struct p_barrier *p = pi->data;
        struct drbd_epoch *epoch;
 
-       mdev = vnr_to_mdev(tconn, pi->vnr);
-       if (!mdev)
-               return -EIO;
-
-       inc_unacked(mdev);
-
-       mdev->current_epoch->barrier_nr = p->barrier;
-       rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
+       /* FIXME these are unacked on connection,
+        * not a specific (peer)device.
+        */
+       tconn->current_epoch->barrier_nr = p->barrier;
+       tconn->current_epoch->tconn = tconn;
+       rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
 
        /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
         * the activity log, which means it would not be resynced in case the
         * R_PRIMARY crashes now.
         * Therefore we must send the barrier_ack after the barrier request was
         * completed. */
-       switch (mdev->write_ordering) {
+       switch (tconn->write_ordering) {
        case WO_none:
                if (rv == FE_RECYCLED)
                        return 0;
@@ -1326,29 +1400,23 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
                if (epoch)
                        break;
                else
-                       dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+                       conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
                        /* Fall through */
 
        case WO_bdev_flush:
        case WO_drain_io:
-               drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-               drbd_flush(mdev);
+               conn_wait_active_ee_empty(tconn);
+               drbd_flush(tconn);
 
-               if (atomic_read(&mdev->current_epoch->epoch_size)) {
+               if (atomic_read(&tconn->current_epoch->epoch_size)) {
                        epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
                        if (epoch)
                                break;
                }
 
-               epoch = mdev->current_epoch;
-               wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
-
-               D_ASSERT(atomic_read(&epoch->active) == 0);
-               D_ASSERT(epoch->flags == 0);
-
                return 0;
        default:
-               dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
+               conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
                return -EIO;
        }
 
@@ -1356,16 +1424,16 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
        atomic_set(&epoch->epoch_size, 0);
        atomic_set(&epoch->active, 0);
 
-       spin_lock(&mdev->epoch_lock);
-       if (atomic_read(&mdev->current_epoch->epoch_size)) {
-               list_add(&epoch->list, &mdev->current_epoch->list);
-               mdev->current_epoch = epoch;
-               mdev->epochs++;
+       spin_lock(&tconn->epoch_lock);
+       if (atomic_read(&tconn->current_epoch->epoch_size)) {
+               list_add(&epoch->list, &tconn->current_epoch->list);
+               tconn->current_epoch = epoch;
+               tconn->epochs++;
        } else {
                /* The current_epoch got recycled while we allocated this one... */
                kfree(epoch);
        }
-       spin_unlock(&mdev->epoch_lock);
+       spin_unlock(&tconn->epoch_lock);
 
        return 0;
 }
@@ -1384,10 +1452,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
        void *dig_vv = mdev->tconn->int_dig_vv;
        unsigned long *data;
 
-       dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
-               crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
-
-       if (dgs) {
+       dgs = 0;
+       if (mdev->tconn->peer_integrity_tfm) {
+               dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
                /*
                 * FIXME: Receive the incoming digest into the receive buffer
                 *        here, together with its struct p_data?
@@ -1395,10 +1462,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
                err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
                if (err)
                        return NULL;
+               data_size -= dgs;
        }
 
-       data_size -= dgs;
-
        if (!expect(data_size != 0))
                return NULL;
        if (!expect(IS_ALIGNED(data_size, 512)))
@@ -1442,7 +1508,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
        }
 
        if (dgs) {
-               drbd_csum_ee(mdev, mdev->tconn->integrity_r_tfm, peer_req, dig_vv);
+               drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
                if (memcmp(dig_in, dig_vv, dgs)) {
                        dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
                                (unsigned long long)sector, data_size);
@@ -1491,17 +1557,15 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
        void *dig_in = mdev->tconn->int_dig_in;
        void *dig_vv = mdev->tconn->int_dig_vv;
 
-       dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_r_tfm) ?
-               crypto_hash_digestsize(mdev->tconn->integrity_r_tfm) : 0;
-
-       if (dgs) {
+       dgs = 0;
+       if (mdev->tconn->peer_integrity_tfm) {
+               dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
                err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
                if (err)
                        return err;
+               data_size -= dgs;
        }
 
-       data_size -= dgs;
-
        /* optimistically update recv_cnt.  if receiving fails below,
         * we disconnect anyways, and counters will be reset. */
        mdev->recv_cnt += data_size>>9;
@@ -1520,7 +1584,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
        }
 
        if (dgs) {
-               drbd_csum_bio(mdev, mdev->tconn->integrity_r_tfm, bio, dig_vv);
+               drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
                if (memcmp(dig_in, dig_vv, dgs)) {
                        dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
                        return -EINVAL;
@@ -1606,7 +1670,7 @@ find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
        if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
                return req;
        if (!missing_ok) {
-               dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
+               dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
                        (unsigned long)id, (unsigned long long)sector);
        }
        return NULL;
@@ -1678,30 +1742,6 @@ static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
        return err;
 }
 
-static int w_restart_write(struct drbd_work *w, int cancel)
-{
-       struct drbd_request *req = container_of(w, struct drbd_request, w);
-       struct drbd_conf *mdev = w->mdev;
-       struct bio *bio;
-       unsigned long start_time;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mdev->tconn->req_lock, flags);
-       if (!expect(req->rq_state & RQ_POSTPONED)) {
-               spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
-               return -EIO;
-       }
-       bio = req->master_bio;
-       start_time = req->start_time;
-       /* Postponed requests will not have their master_bio completed!  */
-       __req_mod(req, DISCARD_WRITE, NULL);
-       spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
-
-       while (__drbd_make_request(mdev, bio, start_time))
-               /* retry */ ;
-       return 0;
-}
-
 static void restart_conflicting_writes(struct drbd_conf *mdev,
                                       sector_t sector, int size)
 {
@@ -1715,11 +1755,9 @@ static void restart_conflicting_writes(struct drbd_conf *mdev,
                if (req->rq_state & RQ_LOCAL_PENDING ||
                    !(req->rq_state & RQ_POSTPONED))
                        continue;
-               if (expect(list_empty(&req->w.list))) {
-                       req->w.mdev = mdev;
-                       req->w.cb = w_restart_write;
-                       drbd_queue_work(&mdev->tconn->data.work, &req->w);
-               }
+               /* as it is RQ_POSTPONED, this will cause it to
+                * be queued on the retry workqueue. */
+               __req_mod(req, DISCARD_WRITE, NULL);
        }
 }
 
@@ -1762,7 +1800,7 @@ static int e_end_block(struct drbd_work *w, int cancel)
        } else
                D_ASSERT(drbd_interval_empty(&peer_req->i));
 
-       drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
+       drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
 
        return err;
 }
@@ -1841,6 +1879,33 @@ static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
        }
 }
 
+static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
+{
+       return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
+}
+
+/* maybe change sync_ee into interval trees as well? */
+static bool overlaping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
+{
+       struct drbd_peer_request *rs_req;
+       bool rv = 0;
+
+       spin_lock_irq(&mdev->tconn->req_lock);
+       list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
+               if (overlaps(peer_req->i.sector, peer_req->i.size,
+                            rs_req->i.sector, rs_req->i.size)) {
+                       rv = 1;
+                       break;
+               }
+       }
+       spin_unlock_irq(&mdev->tconn->req_lock);
+
+       if (rv)
+               dev_warn(DEV, "WARN: Avoiding concurrent data/resync write to single sector.\n");
+
+       return rv;
+}
+
 /* Called from receive_Data.
  * Synchronize packets on sock with packets on msock.
  *
@@ -2065,7 +2130,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
 
                err = wait_for_and_update_peer_seq(mdev, peer_seq);
                drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
-               atomic_inc(&mdev->current_epoch->epoch_size);
+               atomic_inc(&tconn->current_epoch->epoch_size);
                err2 = drbd_drain_block(mdev, pi->size);
                if (!err)
                        err = err2;
@@ -2093,11 +2158,11 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
        if (dp_flags & DP_MAY_SET_IN_SYNC)
                peer_req->flags |= EE_MAY_SET_IN_SYNC;
 
-       spin_lock(&mdev->epoch_lock);
-       peer_req->epoch = mdev->current_epoch;
+       spin_lock(&tconn->epoch_lock);
+       peer_req->epoch = tconn->current_epoch;
        atomic_inc(&peer_req->epoch->epoch_size);
        atomic_inc(&peer_req->epoch->active);
-       spin_unlock(&mdev->epoch_lock);
+       spin_unlock(&tconn->epoch_lock);
 
        rcu_read_lock();
        tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
@@ -2122,6 +2187,9 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
        list_add(&peer_req->w.list, &mdev->active_ee);
        spin_unlock_irq(&mdev->tconn->req_lock);
 
+       if (mdev->state.conn == C_SYNC_TARGET)
+               wait_event(mdev->ee_wait, !overlaping_resync_write(mdev, peer_req));
+
        if (mdev->tconn->agreed_pro_version < 100) {
                rcu_read_lock();
                switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
@@ -2170,7 +2238,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
                drbd_al_complete_io(mdev, &peer_req->i);
 
 out_interrupted:
-       drbd_may_finish_epoch(mdev, peer_req->epoch, EV_PUT + EV_CLEANUP);
+       drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
        put_ldev(mdev);
        drbd_free_peer_req(mdev, peer_req);
        return err;
@@ -2194,9 +2262,14 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
        struct lc_element *tmp;
        int curr_events;
        int throttle = 0;
+       unsigned int c_min_rate;
+
+       rcu_read_lock();
+       c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
+       rcu_read_unlock();
 
        /* feature disabled? */
-       if (mdev->ldev->dc.c_min_rate == 0)
+       if (c_min_rate == 0)
                return 0;
 
        spin_lock_irq(&mdev->al_lock);
@@ -2236,7 +2309,7 @@ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
                db = mdev->rs_mark_left[i] - rs_left;
                dbdt = Bit2KB(db/dt);
 
-               if (dbdt > mdev->ldev->dc.c_min_rate)
+               if (dbdt > c_min_rate)
                        throttle = 1;
        }
        return throttle;
@@ -2826,7 +2899,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
        enum drbd_conns rv = C_MASK;
        enum drbd_disk_state mydisk;
        struct net_conf *nc;
-       int hg, rule_nr, rr_conflict, dry_run;
+       int hg, rule_nr, rr_conflict, tentative;
 
        mydisk = mdev->state.disk;
        if (mydisk == D_NEGOTIATING)
@@ -2895,9 +2968,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
        }
 
        if (hg == -100) {
-               if (nc->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
+               if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
                        hg = -1;
-               if (!nc->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
+               if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
                        hg = 1;
 
                if (abs(hg) < 100)
@@ -2906,7 +2979,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
                             (hg < 0) ? "peer" : "this");
        }
        rr_conflict = nc->rr_conflict;
-       dry_run = nc->dry_run;
+       tentative = nc->tentative;
        rcu_read_unlock();
 
        if (hg == -100) {
@@ -2939,7 +3012,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
                }
        }
 
-       if (dry_run || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
+       if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
                if (hg == 0)
                        dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
                else
@@ -2971,35 +3044,29 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
        return rv;
 }
 
-/* returns 1 if invalid */
-static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
+static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
 {
        /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
-       if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
-           (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
-               return 0;
+       if (peer == ASB_DISCARD_REMOTE)
+               return ASB_DISCARD_LOCAL;
 
        /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
-       if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
-           self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
-               return 1;
+       if (peer == ASB_DISCARD_LOCAL)
+               return ASB_DISCARD_REMOTE;
 
        /* everything else is valid if they are equal on both sides. */
-       if (peer == self)
-               return 0;
-
-       /* everything es is invalid. */
-       return 1;
+       return peer;
 }
 
 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
 {
        struct p_protocol *p = pi->data;
-       int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
-       int p_want_lose, p_two_primaries, cf;
-       char p_integrity_alg[SHARED_SECRET_MAX] = "";
-       unsigned char *my_alg;
-       struct net_conf *nc;
+       enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
+       int p_proto, p_discard_my_data, p_two_primaries, cf;
+       struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
+       char integrity_alg[SHARED_SECRET_MAX] = "";
+       struct crypto_hash *peer_integrity_tfm = NULL;
+       void *int_dig_in = NULL, *int_dig_vv = NULL;
 
        p_proto         = be32_to_cpu(p->protocol);
        p_after_sb_0p   = be32_to_cpu(p->after_sb_0p);
@@ -3007,70 +3074,136 @@ static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
        p_after_sb_2p   = be32_to_cpu(p->after_sb_2p);
        p_two_primaries = be32_to_cpu(p->two_primaries);
        cf              = be32_to_cpu(p->conn_flags);
-       p_want_lose = cf & CF_WANT_LOSE;
+       p_discard_my_data = cf & CF_DISCARD_MY_DATA;
 
-       clear_bit(CONN_DRY_RUN, &tconn->flags);
+       if (tconn->agreed_pro_version >= 87) {
+               int err;
 
-       if (cf & CF_DRY_RUN)
-               set_bit(CONN_DRY_RUN, &tconn->flags);
+               if (pi->size > sizeof(integrity_alg))
+                       return -EIO;
+               err = drbd_recv_all(tconn, integrity_alg, pi->size);
+               if (err)
+                       return err;
+               integrity_alg[SHARED_SECRET_MAX - 1] = 0;
+       }
 
-       rcu_read_lock();
-       nc = rcu_dereference(tconn->net_conf);
+       if (pi->cmd != P_PROTOCOL_UPDATE) {
+               clear_bit(CONN_DRY_RUN, &tconn->flags);
 
-       if (p_proto != nc->wire_protocol && tconn->agreed_pro_version < 100) {
-               conn_err(tconn, "incompatible communication protocols\n");
-               goto disconnect_rcu_unlock;
-       }
+               if (cf & CF_DRY_RUN)
+                       set_bit(CONN_DRY_RUN, &tconn->flags);
 
-       if (cmp_after_sb(p_after_sb_0p, nc->after_sb_0p)) {
-               conn_err(tconn, "incompatible after-sb-0pri settings\n");
-               goto disconnect_rcu_unlock;
-       }
+               rcu_read_lock();
+               nc = rcu_dereference(tconn->net_conf);
 
-       if (cmp_after_sb(p_after_sb_1p, nc->after_sb_1p)) {
-               conn_err(tconn, "incompatible after-sb-1pri settings\n");
-               goto disconnect_rcu_unlock;
-       }
+               if (p_proto != nc->wire_protocol) {
+                       conn_err(tconn, "incompatible %s settings\n", "protocol");
+                       goto disconnect_rcu_unlock;
+               }
 
-       if (cmp_after_sb(p_after_sb_2p, nc->after_sb_2p)) {
-               conn_err(tconn, "incompatible after-sb-2pri settings\n");
-               goto disconnect_rcu_unlock;
-       }
+               if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
+                       conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
+                       goto disconnect_rcu_unlock;
+               }
 
-       if (p_want_lose && nc->want_lose) {
-               conn_err(tconn, "both sides have the 'want_lose' flag set\n");
-               goto disconnect_rcu_unlock;
-       }
+               if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
+                       conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
+                       goto disconnect_rcu_unlock;
+               }
+
+               if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
+                       conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
+                       goto disconnect_rcu_unlock;
+               }
 
-       if (p_two_primaries != nc->two_primaries) {
-               conn_err(tconn, "incompatible setting of the two-primaries options\n");
-               goto disconnect_rcu_unlock;
+               if (p_discard_my_data && nc->discard_my_data) {
+                       conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
+                       goto disconnect_rcu_unlock;
+               }
+
+               if (p_two_primaries != nc->two_primaries) {
+                       conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
+                       goto disconnect_rcu_unlock;
+               }
+
+               if (strcmp(integrity_alg, nc->integrity_alg)) {
+                       conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
+                       goto disconnect_rcu_unlock;
+               }
+
+               rcu_read_unlock();
        }
 
-       my_alg = nc->integrity_alg;
-       rcu_read_unlock();
+       if (integrity_alg[0]) {
+               int hash_size;
 
-       if (tconn->agreed_pro_version >= 87) {
-               int err;
+               /*
+                * We can only change the peer data integrity algorithm
+                * here.  Changing our own data integrity algorithm
+                * requires that we send a P_PROTOCOL_UPDATE packet at
+                * the same time; otherwise, the peer has no way to
+                * tell between which packets the algorithm should
+                * change.
+                */
 
-               err = drbd_recv_all(tconn, p_integrity_alg, pi->size);
-               if (err)
-                       return err;
+               peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+               if (!peer_integrity_tfm) {
+                       conn_err(tconn, "peer data-integrity-alg %s not supported\n",
+                                integrity_alg);
+                       goto disconnect;
+               }
 
-               p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
-               if (strcmp(p_integrity_alg, my_alg)) {
-                       conn_err(tconn, "incompatible setting of the data-integrity-alg\n");
+               hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+               int_dig_in = kmalloc(hash_size, GFP_KERNEL);
+               int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
+               if (!(int_dig_in && int_dig_vv)) {
+                       conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
                        goto disconnect;
                }
-               conn_info(tconn, "data-integrity-alg: %s\n",
-                    my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
        }
 
+       new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
+       if (!new_net_conf) {
+               conn_err(tconn, "Allocation of new net_conf failed\n");
+               goto disconnect;
+       }
+
+       mutex_lock(&tconn->data.mutex);
+       mutex_lock(&tconn->conf_update);
+       old_net_conf = tconn->net_conf;
+       *new_net_conf = *old_net_conf;
+
+       new_net_conf->wire_protocol = p_proto;
+       new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
+       new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
+       new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
+       new_net_conf->two_primaries = p_two_primaries;
+
+       rcu_assign_pointer(tconn->net_conf, new_net_conf);
+       mutex_unlock(&tconn->conf_update);
+       mutex_unlock(&tconn->data.mutex);
+
+       crypto_free_hash(tconn->peer_integrity_tfm);
+       kfree(tconn->int_dig_in);
+       kfree(tconn->int_dig_vv);
+       tconn->peer_integrity_tfm = peer_integrity_tfm;
+       tconn->int_dig_in = int_dig_in;
+       tconn->int_dig_vv = int_dig_vv;
+
+       if (strcmp(old_net_conf->integrity_alg, integrity_alg))
+               conn_info(tconn, "peer data-integrity-alg: %s\n",
+                         integrity_alg[0] ? integrity_alg : "(none)");
+
+       synchronize_rcu();
+       kfree(old_net_conf);
        return 0;
 
 disconnect_rcu_unlock:
        rcu_read_unlock();
 disconnect:
+       crypto_free_hash(peer_integrity_tfm);
+       kfree(int_dig_in);
+       kfree(int_dig_vv);
        conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
        return -EIO;
 }
@@ -3094,11 +3227,6 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
                        alg, name, PTR_ERR(tfm));
                return tfm;
        }
-       if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
-               crypto_free_hash(tfm);
-               dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
-               return ERR_PTR(-EINVAL);
-       }
        return tfm;
 }
 
@@ -3135,8 +3263,8 @@ static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info
  */
 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
 {
-       conn_warn(tconn, "Volume %u unknown; ignoring %s packet\n",
-                 pi->vnr, cmdname(pi->cmd));
+       conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
+                 cmdname(pi->cmd), pi->vnr);
        return ignore_remaining_packet(tconn, pi);
 }
 
@@ -3147,9 +3275,10 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
        unsigned int header_size, data_size, exp_max_sz;
        struct crypto_hash *verify_tfm = NULL;
        struct crypto_hash *csums_tfm = NULL;
-       struct net_conf *old_conf, *new_conf = NULL;
+       struct net_conf *old_net_conf, *new_net_conf = NULL;
+       struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
        const int apv = tconn->agreed_pro_version;
-       int *rs_plan_s = NULL;
+       struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
        int fifo_size = 0;
        int err;
 
@@ -3190,9 +3319,21 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
        if (err)
                return err;
 
+       mutex_lock(&mdev->tconn->conf_update);
+       old_net_conf = mdev->tconn->net_conf;
        if (get_ldev(mdev)) {
-               mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
-               put_ldev(mdev);
+               new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+               if (!new_disk_conf) {
+                       put_ldev(mdev);
+                       mutex_unlock(&mdev->tconn->conf_update);
+                       dev_err(DEV, "Allocation of new disk_conf failed\n");
+                       return -ENOMEM;
+               }
+
+               old_disk_conf = mdev->ldev->disk_conf;
+               *new_disk_conf = *old_disk_conf;
+
+               new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
        }
 
        if (apv >= 88) {
@@ -3201,13 +3342,13 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
                                dev_err(DEV, "verify-alg too long, "
                                    "peer wants %u, accepting only %u byte\n",
                                                data_size, SHARED_SECRET_MAX);
-                               return -EIO;
+                               err = -EIO;
+                               goto reconnect;
                        }
 
                        err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
                        if (err)
-                               return err;
-
+                               goto reconnect;
                        /* we expect NUL terminated string */
                        /* but just in case someone tries to be evil */
                        D_ASSERT(p->verify_alg[data_size-1] == 0);
@@ -3222,13 +3363,10 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
                        p->csums_alg[SHARED_SECRET_MAX-1] = 0;
                }
 
-               mutex_lock(&mdev->tconn->net_conf_update);
-               old_conf = mdev->tconn->net_conf;
-
-               if (strcmp(old_conf->verify_alg, p->verify_alg)) {
+               if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
                        if (mdev->state.conn == C_WF_REPORT_PARAMS) {
                                dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
-                                   old_conf->verify_alg, p->verify_alg);
+                                   old_net_conf->verify_alg, p->verify_alg);
                                goto disconnect;
                        }
                        verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -3239,10 +3377,10 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
                        }
                }
 
-               if (apv >= 89 && strcmp(old_conf->csums_alg, p->csums_alg)) {
+               if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
                        if (mdev->state.conn == C_WF_REPORT_PARAMS) {
                                dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
-                                   old_conf->csums_alg, p->csums_alg);
+                                   old_net_conf->csums_alg, p->csums_alg);
                                goto disconnect;
                        }
                        csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
@@ -3253,69 +3391,84 @@ static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
                        }
                }
 
-               if (apv > 94 && get_ldev(mdev)) {
-                       mdev->ldev->dc.resync_rate = be32_to_cpu(p->rate);
-                       mdev->ldev->dc.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
-                       mdev->ldev->dc.c_delay_target = be32_to_cpu(p->c_delay_target);
-                       mdev->ldev->dc.c_fill_target = be32_to_cpu(p->c_fill_target);
-                       mdev->ldev->dc.c_max_rate = be32_to_cpu(p->c_max_rate);
-
-                       fifo_size = (mdev->ldev->dc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
-                       if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
-                               rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
-                               if (!rs_plan_s) {
+               if (apv > 94 && new_disk_conf) {
+                       new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
+                       new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
+                       new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
+                       new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
+
+                       fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+                       if (fifo_size != mdev->rs_plan_s->size) {
+                               new_plan = fifo_alloc(fifo_size);
+                               if (!new_plan) {
                                        dev_err(DEV, "kmalloc of fifo_buffer failed");
                                        put_ldev(mdev);
                                        goto disconnect;
                                }
                        }
-                       put_ldev(mdev);
                }
 
                if (verify_tfm || csums_tfm) {
-                       new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
-                       if (!new_conf) {
+                       new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
+                       if (!new_net_conf) {
                                dev_err(DEV, "Allocation of new net_conf failed\n");
                                goto disconnect;
                        }
 
-                       *new_conf = *old_conf;
+                       *new_net_conf = *old_net_conf;
 
                        if (verify_tfm) {
-                               strcpy(new_conf->verify_alg, p->verify_alg);
-                               new_conf->verify_alg_len = strlen(p->verify_alg) + 1;
+                               strcpy(new_net_conf->verify_alg, p->verify_alg);
+                               new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
                                crypto_free_hash(mdev->tconn->verify_tfm);
                                mdev->tconn->verify_tfm = verify_tfm;
                                dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
                        }
                        if (csums_tfm) {
-                               strcpy(new_conf->csums_alg, p->csums_alg);
-                               new_conf->csums_alg_len = strlen(p->csums_alg) + 1;
+                               strcpy(new_net_conf->csums_alg, p->csums_alg);
+                               new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
                                crypto_free_hash(mdev->tconn->csums_tfm);
                                mdev->tconn->csums_tfm = csums_tfm;
                                dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
                        }
-                       rcu_assign_pointer(tconn->net_conf, new_conf);
-               }
-               mutex_unlock(&mdev->tconn->net_conf_update);
-               if (new_conf) {
-                       synchronize_rcu();
-                       kfree(old_conf);
+                       rcu_assign_pointer(tconn->net_conf, new_net_conf);
                }
+       }
 
-               spin_lock(&mdev->peer_seq_lock);
-               if (fifo_size != mdev->rs_plan_s.size) {
-                       kfree(mdev->rs_plan_s.values);
-                       mdev->rs_plan_s.values = rs_plan_s;
-                       mdev->rs_plan_s.size   = fifo_size;
-                       mdev->rs_planed = 0;
-               }
-               spin_unlock(&mdev->peer_seq_lock);
+       if (new_disk_conf) {
+               rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+               put_ldev(mdev);
        }
+
+       if (new_plan) {
+               old_plan = mdev->rs_plan_s;
+               rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+       }
+
+       mutex_unlock(&mdev->tconn->conf_update);
+       synchronize_rcu();
+       if (new_net_conf)
+               kfree(old_net_conf);
+       kfree(old_disk_conf);
+       kfree(old_plan);
+
        return 0;
 
+reconnect:
+       if (new_disk_conf) {
+               put_ldev(mdev);
+               kfree(new_disk_conf);
+       }
+       mutex_unlock(&mdev->tconn->conf_update);
+       return -EIO;
+
 disconnect:
-       mutex_unlock(&mdev->tconn->net_conf_update);
+       kfree(new_plan);
+       if (new_disk_conf) {
+               put_ldev(mdev);
+               kfree(new_disk_conf);
+       }
+       mutex_unlock(&mdev->tconn->conf_update);
        /* just for completeness: actually not needed,
         * as this is not reached if csums_tfm was ok. */
        crypto_free_hash(csums_tfm);
@@ -3359,37 +3512,56 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
        mdev->p_size = p_size;
 
        if (get_ldev(mdev)) {
+               rcu_read_lock();
+               my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+               rcu_read_unlock();
+
                warn_if_differ_considerably(mdev, "lower level device sizes",
                           p_size, drbd_get_max_capacity(mdev->ldev));
                warn_if_differ_considerably(mdev, "user requested size",
-                                           p_usize, mdev->ldev->dc.disk_size);
+                                           p_usize, my_usize);
 
                /* if this is the first connect, or an otherwise expected
                 * param exchange, choose the minimum */
                if (mdev->state.conn == C_WF_REPORT_PARAMS)
-                       p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
-                                            p_usize);
-
-               my_usize = mdev->ldev->dc.disk_size;
-
-               if (mdev->ldev->dc.disk_size != p_usize) {
-                       mdev->ldev->dc.disk_size = p_usize;
-                       dev_info(DEV, "Peer sets u_size to %lu sectors\n",
-                            (unsigned long)mdev->ldev->dc.disk_size);
-               }
+                       p_usize = min_not_zero(my_usize, p_usize);
 
                /* Never shrink a device with usable data during connect.
                   But allow online shrinking if we are connected. */
-               if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
-                  drbd_get_capacity(mdev->this_bdev) &&
-                  mdev->state.disk >= D_OUTDATED &&
-                  mdev->state.conn < C_CONNECTED) {
+               if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
+                   drbd_get_capacity(mdev->this_bdev) &&
+                   mdev->state.disk >= D_OUTDATED &&
+                   mdev->state.conn < C_CONNECTED) {
                        dev_err(DEV, "The peer's disk size is too small!\n");
                        conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
-                       mdev->ldev->dc.disk_size = my_usize;
                        put_ldev(mdev);
                        return -EIO;
                }
+
+               if (my_usize != p_usize) {
+                       struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
+
+                       new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+                       if (!new_disk_conf) {
+                               dev_err(DEV, "Allocation of new disk_conf failed\n");
+                               put_ldev(mdev);
+                               return -ENOMEM;
+                       }
+
+                       mutex_lock(&mdev->tconn->conf_update);
+                       old_disk_conf = mdev->ldev->disk_conf;
+                       *new_disk_conf = *old_disk_conf;
+                       new_disk_conf->disk_size = p_usize;
+
+                       rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+                       mutex_unlock(&mdev->tconn->conf_update);
+                       synchronize_rcu();
+                       kfree(old_disk_conf);
+
+                       dev_info(DEV, "Peer sets u_size to %lu sectors\n",
+                                (unsigned long)my_usize);
+               }
+
                put_ldev(mdev);
        }
 
@@ -3519,6 +3691,7 @@ static union drbd_state convert_state(union drbd_state ps)
        union drbd_state ms;
 
        static enum drbd_conns c_tab[] = {
+               [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
                [C_CONNECTED] = C_CONNECTED,
 
                [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
@@ -3621,9 +3794,20 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
        os = ns = drbd_read_state(mdev);
        spin_unlock_irq(&mdev->tconn->req_lock);
 
-       /* peer says his disk is uptodate, while we think it is inconsistent,
-        * and this happens while we think we have a sync going on. */
-       if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
+       /* If some other part of the code (asender thread, timeout)
+        * already decided to close the connection again,
+        * we must not "re-establish" it here. */
+       if (os.conn <= C_TEAR_DOWN)
+               return false;
+
+       /* If this is the "end of sync" confirmation, usually the peer disk
+        * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
+        * set) resync started in PausedSyncT, or if the timing of pause-/
+        * unpause-sync events has been "just right", the peer disk may
+        * transition from D_CONSISTENT to D_UP_TO_DATE as well.
+        */
+       if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
+           real_peer_disk == D_UP_TO_DATE &&
            os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
                /* If we are (becoming) SyncSource, but peer is still in sync
                 * preparation, ignore its uptodate-ness to avoid flapping, it
@@ -3741,13 +3925,11 @@ static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
                        /* Nowadays only used when forcing a node into primary role and
                           setting its disk to UpToDate with that */
                        drbd_send_uuids(mdev);
-                       drbd_send_state(mdev);
+                       drbd_send_current_state(mdev);
                }
        }
 
-       mutex_lock(&mdev->tconn->net_conf_update);
-       mdev->tconn->net_conf->want_lose = 0; /* without copy; single bit op is atomic */
-       mutex_unlock(&mdev->tconn->net_conf_update);
+       clear_bit(DISCARD_MY_DATA, &mdev->flags);
 
        drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
 
@@ -4129,6 +4311,7 @@ static struct data_cmd drbd_cmd_handler[] = {
        [P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
        [P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
        [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
+       [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
 };
 
 static void drbdd(struct drbd_tconn *tconn)
@@ -4146,13 +4329,15 @@ static void drbdd(struct drbd_tconn *tconn)
 
                cmd = &drbd_cmd_handler[pi.cmd];
                if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
-                       conn_err(tconn, "unknown packet type %d, l: %d!\n", pi.cmd, pi.size);
+                       conn_err(tconn, "Unexpected data packet %s (0x%04x)",
+                                cmdname(pi.cmd), pi.cmd);
                        goto err_out;
                }
 
                shs = cmd->pkt_size;
                if (pi.size > shs && !cmd->expect_payload) {
-                       conn_err(tconn, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size);
+                       conn_err(tconn, "No payload expected %s l:%d\n",
+                                cmdname(pi.cmd), pi.size);
                        goto err_out;
                }
 
@@ -4187,21 +4372,41 @@ void conn_flush_workqueue(struct drbd_tconn *tconn)
        wait_for_completion(&barr.done);
 }
 
-static void drbd_disconnect(struct drbd_tconn *tconn)
+static void conn_disconnect(struct drbd_tconn *tconn)
 {
+       struct drbd_conf *mdev;
        enum drbd_conns oc;
-       int rv = SS_UNKNOWN_ERROR;
+       int vnr;
 
        if (tconn->cstate == C_STANDALONE)
                return;
 
+       /* We are about to start the cleanup after connection loss.
+        * Make sure drbd_make_request knows about that.
+        * Usually we should be in some network failure state already,
+        * but just in case we are not, we fix it up here.
+        */
+       conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
+
        /* asender does not clean up anything. it must not interfere, either */
        drbd_thread_stop(&tconn->asender);
        drbd_free_sock(tconn);
 
-       down_read(&drbd_cfg_rwsem);
-       idr_for_each(&tconn->volumes, drbd_disconnected, tconn);
-       up_read(&drbd_cfg_rwsem);
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               kref_get(&mdev->kref);
+               rcu_read_unlock();
+               drbd_disconnected(mdev);
+               kref_put(&mdev->kref, &drbd_minor_destroy);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+
+       if (!list_empty(&tconn->current_epoch->list))
+               conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
+       /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
+       atomic_set(&tconn->current_epoch->epoch_size, 0);
+
        conn_info(tconn, "Connection closed\n");
 
        if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
@@ -4210,30 +4415,16 @@ static void drbd_disconnect(struct drbd_tconn *tconn)
        spin_lock_irq(&tconn->req_lock);
        oc = tconn->cstate;
        if (oc >= C_UNCONNECTED)
-               rv = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+               _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
 
        spin_unlock_irq(&tconn->req_lock);
 
-       if (oc == C_DISCONNECTING) {
-               struct net_conf *old_conf;
-
-               mutex_lock(&tconn->net_conf_update);
-               old_conf = tconn->net_conf;
-               rcu_assign_pointer(tconn->net_conf, NULL);
-               conn_free_crypto(tconn);
-               mutex_unlock(&tconn->net_conf_update);
-
-               synchronize_rcu();
-               kfree(old_conf);
-
-               conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE);
-       }
+       if (oc == C_DISCONNECTING)
+               conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
 }
 
-static int drbd_disconnected(int vnr, void *p, void *data)
+static int drbd_disconnected(struct drbd_conf *mdev)
 {
-       struct drbd_conf *mdev = (struct drbd_conf *)p;
-       enum drbd_fencing_p fp;
        unsigned int i;
 
        /* wait for current activity to cease. */
@@ -4259,8 +4450,6 @@ static int drbd_disconnected(int vnr, void *p, void *data)
        atomic_set(&mdev->rs_pending_cnt, 0);
        wake_up(&mdev->misc_wait);
 
-       del_timer(&mdev->request_timer);
-
        del_timer_sync(&mdev->resync_timer);
        resync_timer_fn((unsigned long)mdev);
 
@@ -4271,6 +4460,11 @@ static int drbd_disconnected(int vnr, void *p, void *data)
 
        drbd_finish_peer_reqs(mdev);
 
+       /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
+          might have issued a work again. The one before drbd_finish_peer_reqs() is
+          necessary to reclain net_ee in drbd_finish_peer_reqs(). */
+       drbd_flush_workqueue(mdev);
+
        kfree(mdev->p_uuid);
        mdev->p_uuid = NULL;
 
@@ -4279,12 +4473,6 @@ static int drbd_disconnected(int vnr, void *p, void *data)
 
        drbd_md_sync(mdev);
 
-       fp = FP_DONT_CARE;
-       if (get_ldev(mdev)) {
-               fp = mdev->ldev->dc.fencing;
-               put_ldev(mdev);
-       }
-
        /* serialize with bitmap writeout triggered by the state change,
         * if any. */
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
@@ -4311,10 +4499,6 @@ static int drbd_disconnected(int vnr, void *p, void *data)
        D_ASSERT(list_empty(&mdev->sync_ee));
        D_ASSERT(list_empty(&mdev->done_ee));
 
-       /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
-       atomic_set(&mdev->current_epoch->epoch_size, 0);
-       D_ASSERT(list_empty(&mdev->current_epoch->list));
-
        return 0;
 }
 
@@ -4367,7 +4551,7 @@ static int drbd_do_features(struct drbd_tconn *tconn)
 
        if (pi.cmd != P_CONNECTION_FEATURES) {
                conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
-                    cmdname(pi.cmd), pi.cmd);
+                        cmdname(pi.cmd), pi.cmd);
                return -1;
        }
 
@@ -4476,7 +4660,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
 
        if (pi.cmd != P_AUTH_CHALLENGE) {
                conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
-                   cmdname(pi.cmd), pi.cmd);
+                        cmdname(pi.cmd), pi.cmd);
                rv = 0;
                goto fail;
        }
@@ -4535,7 +4719,7 @@ static int drbd_do_auth(struct drbd_tconn *tconn)
 
        if (pi.cmd != P_AUTH_RESPONSE) {
                conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
-                       cmdname(pi.cmd), pi.cmd);
+                        cmdname(pi.cmd), pi.cmd);
                rv = 0;
                goto fail;
        }
@@ -4593,9 +4777,9 @@ int drbdd_init(struct drbd_thread *thi)
        conn_info(tconn, "receiver (re)started\n");
 
        do {
-               h = drbd_connect(tconn);
+               h = conn_connect(tconn);
                if (h == 0) {
-                       drbd_disconnect(tconn);
+                       conn_disconnect(tconn);
                        schedule_timeout_interruptible(HZ);
                }
                if (h == -1) {
@@ -4607,7 +4791,7 @@ int drbdd_init(struct drbd_thread *thi)
        if (h > 0)
                drbdd(tconn);
 
-       drbd_disconnect(tconn);
+       conn_disconnect(tconn);
 
        conn_info(tconn, "receiver terminated\n");
        return 0;
@@ -4642,6 +4826,11 @@ static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
        if (!mdev)
                return -EIO;
 
+       if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
+               D_ASSERT(tconn->agreed_pro_version < 100);
+               return got_conn_RqSReply(tconn, pi);
+       }
+
        if (retcode >= SS_SUCCESS) {
                set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
        } else {
@@ -4810,7 +4999,7 @@ static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
 
        update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
-       dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
+       dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
            (unsigned long long)sector, be32_to_cpu(p->blksize));
 
        return validate_req_change_req_state(mdev, p->block_id, sector,
@@ -4854,21 +5043,22 @@ static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
 
 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
 {
-       struct drbd_conf *mdev;
        struct p_barrier_ack *p = pi->data;
+       struct drbd_conf *mdev;
+       int vnr;
 
-       mdev = vnr_to_mdev(tconn, pi->vnr);
-       if (!mdev)
-               return -EIO;
-
-       tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
+       tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
 
-       if (mdev->state.conn == C_AHEAD &&
-           atomic_read(&mdev->ap_in_flight) == 0 &&
-           !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
-               mdev->start_resync_timer.expires = jiffies + HZ;
-               add_timer(&mdev->start_resync_timer);
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (mdev->state.conn == C_AHEAD &&
+                   atomic_read(&mdev->ap_in_flight) == 0 &&
+                   !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+                       mdev->start_resync_timer.expires = jiffies + HZ;
+                       add_timer(&mdev->start_resync_timer);
+               }
        }
+       rcu_read_unlock();
 
        return 0;
 }
@@ -4931,30 +5121,33 @@ static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
 {
        struct drbd_conf *mdev;
-       int i, not_empty = 0;
+       int vnr, not_empty = 0;
 
        do {
                clear_bit(SIGNAL_ASENDER, &tconn->flags);
                flush_signals(current);
-               down_read(&drbd_cfg_rwsem);
-               idr_for_each_entry(&tconn->volumes, mdev, i) {
+
+               rcu_read_lock();
+               idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+                       kref_get(&mdev->kref);
+                       rcu_read_unlock();
                        if (drbd_finish_peer_reqs(mdev)) {
-                               up_read(&drbd_cfg_rwsem);
-                               return 1; /* error */
+                               kref_put(&mdev->kref, &drbd_minor_destroy);
+                               return 1;
                        }
+                       kref_put(&mdev->kref, &drbd_minor_destroy);
+                       rcu_read_lock();
                }
-               up_read(&drbd_cfg_rwsem);
                set_bit(SIGNAL_ASENDER, &tconn->flags);
 
                spin_lock_irq(&tconn->req_lock);
-               rcu_read_lock();
-               idr_for_each_entry(&tconn->volumes, mdev, i) {
+               idr_for_each_entry(&tconn->volumes, mdev, vnr) {
                        not_empty = !list_empty(&mdev->done_ee);
                        if (not_empty)
                                break;
                }
-               rcu_read_unlock();
                spin_unlock_irq(&tconn->req_lock);
+               rcu_read_unlock();
        } while (not_empty);
 
        return 0;
@@ -4997,7 +5190,7 @@ int drbd_asender(struct drbd_thread *thi)
        int expect   = header_size;
        bool ping_timeout_active = false;
        struct net_conf *nc;
-       int ping_timeo, no_cork, ping_int;
+       int ping_timeo, tcp_cork, ping_int;
 
        current->policy = SCHED_RR;  /* Make this a realtime task! */
        current->rt_priority = 2;    /* more important than all other tasks */
@@ -5008,7 +5201,7 @@ int drbd_asender(struct drbd_thread *thi)
                rcu_read_lock();
                nc = rcu_dereference(tconn->net_conf);
                ping_timeo = nc->ping_timeo;
-               no_cork = nc->no_cork;
+               tcp_cork = nc->tcp_cork;
                ping_int = nc->ping_int;
                rcu_read_unlock();
 
@@ -5023,14 +5216,14 @@ int drbd_asender(struct drbd_thread *thi)
 
                /* TODO: conditionally cork; it may hurt latency if we cork without
                   much to send */
-               if (!no_cork)
+               if (tcp_cork)
                        drbd_tcp_cork(tconn->meta.socket);
                if (tconn_finish_peer_reqs(tconn)) {
                        conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
                        goto reconnect;
                }
                /* but unconditionally uncork unless disabled */
-               if (!no_cork)
+               if (tcp_cork)
                        drbd_tcp_uncork(tconn->meta.socket);
 
                /* short circuit, recv_msg would return EINTR anyways. */
@@ -5082,8 +5275,8 @@ int drbd_asender(struct drbd_thread *thi)
                                goto reconnect;
                        cmd = &asender_tbl[pi.cmd];
                        if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
-                               conn_err(tconn, "unknown command %d on meta (l: %d)\n",
-                                       pi.cmd, pi.size);
+                               conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
+                                        cmdname(pi.cmd), pi.cmd);
                                goto disconnect;
                        }
                        expect = header_size + cmd->pkt_size;