]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/block/drbd/drbd_receiver.c
drbd: moved req_lock and transfer log from mdev to tconn
[karo-tx-linux.git] / drivers / block / drbd / drbd_receiver.c
index 25d32c5aa50ab58e0b2fdb4e7dfde2377ec96878..af968a0bae077ec134cb90bc32489d34554570c2 100644 (file)
@@ -210,9 +210,9 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
        LIST_HEAD(reclaimed);
        struct drbd_epoch_entry *e, *t;
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        reclaim_net_ee(mdev, &reclaimed);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        list_for_each_entry_safe(e, t, &reclaimed, w.list)
                drbd_free_net_ee(mdev, e);
@@ -237,7 +237,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
 
        /* Yes, we may run up to @number over max_buffers. If we
         * follow it strictly, the admin will get it wrong anyways. */
-       if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
+       if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers)
                page = drbd_pp_first_pages_or_try_alloc(mdev, number);
 
        while (page == NULL) {
@@ -245,7 +245,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
 
                drbd_kick_lo_and_reclaim_net(mdev);
 
-               if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
+               if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) {
                        page = drbd_pp_first_pages_or_try_alloc(mdev, number);
                        if (page)
                                break;
@@ -269,7 +269,7 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool
 }
 
 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+ * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
  * Either links the page chain back to the global pool,
  * or returns all pages to the system. */
 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
@@ -333,14 +333,18 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
        if (!page)
                goto fail;
 
-       INIT_HLIST_NODE(&e->collision);
+       drbd_clear_interval(&e->i);
        e->epoch = NULL;
        e->mdev = mdev;
        e->pages = page;
        atomic_set(&e->pending_bios, 0);
-       e->size = data_size;
+       e->i.size = data_size;
        e->flags = 0;
-       e->sector = sector;
+       e->i.sector = sector;
+       /*
+        * The block_id is opaque to the receiver.  It is not endianness
+        * converted, and sent back to the sender unchanged.
+        */
        e->block_id = id;
 
        return e;
@@ -356,7 +360,7 @@ void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int i
                kfree(e->digest);
        drbd_pp_free(mdev, e->pages, is_net);
        D_ASSERT(atomic_read(&e->pending_bios) == 0);
-       D_ASSERT(hlist_unhashed(&e->collision));
+       D_ASSERT(drbd_interval_empty(&e->i));
        mempool_free(e, drbd_ee_mempool);
 }
 
@@ -367,9 +371,9 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
        int count = 0;
        int is_net = list == &mdev->net_ee;
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        list_splice_init(list, &work_list);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        list_for_each_entry_safe(e, t, &work_list, w.list) {
                drbd_free_some_ee(mdev, e, is_net);
@@ -381,7 +385,7 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
 
 /*
  * This function is called from _asender only_
- * but see also comments in _req_mod(,barrier_acked)
+ * but see also comments in _req_mod(,BARRIER_ACKED)
  * and receive_Barrier.
  *
  * Move entries from net_ee to done_ee, if ready.
@@ -395,10 +399,10 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
        struct drbd_epoch_entry *e, *t;
        int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        reclaim_net_ee(mdev, &reclaimed);
        list_splice_init(&mdev->done_ee, &work_list);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        list_for_each_entry_safe(e, t, &reclaimed, w.list)
                drbd_free_net_ee(mdev, e);
@@ -425,18 +429,18 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
         * and calling prepare_to_wait in the fast path */
        while (!list_empty(head)) {
                prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
-               spin_unlock_irq(&mdev->req_lock);
+               spin_unlock_irq(&mdev->tconn->req_lock);
                io_schedule();
                finish_wait(&mdev->ee_wait, &wait);
-               spin_lock_irq(&mdev->req_lock);
+               spin_lock_irq(&mdev->tconn->req_lock);
        }
 }
 
 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
 {
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        _drbd_wait_ee_list_empty(mdev, head);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 }
 
 /* see also kernel_accept; which is only present since 2.6.18.
@@ -512,7 +516,7 @@ static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
        set_fs(KERNEL_DS);
 
        for (;;) {
-               rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
+               rv = sock_recvmsg(mdev->tconn->data.socket, &msg, size, msg.msg_flags);
                if (rv == size)
                        break;
 
@@ -574,11 +578,11 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
        int err;
        int disconnect_on_error = 1;
 
-       if (!get_net_conf(mdev))
+       if (!get_net_conf(mdev->tconn))
                return NULL;
 
        what = "sock_create_kern";
-       err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
+       err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family,
                SOCK_STREAM, IPPROTO_TCP, &sock);
        if (err < 0) {
                sock = NULL;
@@ -586,9 +590,9 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
        }
 
        sock->sk->sk_rcvtimeo =
-       sock->sk->sk_sndtimeo =  mdev->net_conf->try_connect_int*HZ;
-       drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
-                       mdev->net_conf->rcvbuf_size);
+       sock->sk->sk_sndtimeo =  mdev->tconn->net_conf->try_connect_int*HZ;
+       drbd_setbufsize(sock, mdev->tconn->net_conf->sndbuf_size,
+                       mdev->tconn->net_conf->rcvbuf_size);
 
        /* explicitly bind to the configured IP as source IP
        *  for the outgoing connections.
@@ -597,9 +601,9 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
        * Make sure to use 0 as port number, so linux selects
        *  a free one dynamically.
        */
-       memcpy(&src_in6, mdev->net_conf->my_addr,
-              min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
-       if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
+       memcpy(&src_in6, mdev->tconn->net_conf->my_addr,
+              min_t(int, mdev->tconn->net_conf->my_addr_len, sizeof(src_in6)));
+       if (((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family == AF_INET6)
                src_in6.sin6_port = 0;
        else
                ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
@@ -607,7 +611,7 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
        what = "bind before connect";
        err = sock->ops->bind(sock,
                              (struct sockaddr *) &src_in6,
-                             mdev->net_conf->my_addr_len);
+                             mdev->tconn->net_conf->my_addr_len);
        if (err < 0)
                goto out;
 
@@ -616,8 +620,8 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
        disconnect_on_error = 0;
        what = "connect";
        err = sock->ops->connect(sock,
-                                (struct sockaddr *)mdev->net_conf->peer_addr,
-                                mdev->net_conf->peer_addr_len, 0);
+                                (struct sockaddr *)mdev->tconn->net_conf->peer_addr,
+                                mdev->tconn->net_conf->peer_addr_len, 0);
 
 out:
        if (err < 0) {
@@ -640,7 +644,7 @@ out:
                if (disconnect_on_error)
                        drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
        }
-       put_net_conf(mdev);
+       put_net_conf(mdev->tconn);
        return sock;
 }
 
@@ -650,30 +654,30 @@ static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
        struct socket *s_estab = NULL, *s_listen;
        const char *what;
 
-       if (!get_net_conf(mdev))
+       if (!get_net_conf(mdev->tconn))
                return NULL;
 
        what = "sock_create_kern";
-       err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
+       err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family,
                SOCK_STREAM, IPPROTO_TCP, &s_listen);
        if (err) {
                s_listen = NULL;
                goto out;
        }
 
-       timeo = mdev->net_conf->try_connect_int * HZ;
+       timeo = mdev->tconn->net_conf->try_connect_int * HZ;
        timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
 
        s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
        s_listen->sk->sk_rcvtimeo = timeo;
        s_listen->sk->sk_sndtimeo = timeo;
-       drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
-                       mdev->net_conf->rcvbuf_size);
+       drbd_setbufsize(s_listen, mdev->tconn->net_conf->sndbuf_size,
+                       mdev->tconn->net_conf->rcvbuf_size);
 
        what = "bind before listen";
        err = s_listen->ops->bind(s_listen,
-                             (struct sockaddr *) mdev->net_conf->my_addr,
-                             mdev->net_conf->my_addr_len);
+                             (struct sockaddr *) mdev->tconn->net_conf->my_addr,
+                             mdev->tconn->net_conf->my_addr_len);
        if (err < 0)
                goto out;
 
@@ -688,7 +692,7 @@ out:
                        drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
                }
        }
-       put_net_conf(mdev);
+       put_net_conf(mdev->tconn);
 
        return s_estab;
 }
@@ -696,19 +700,19 @@ out:
 static int drbd_send_fp(struct drbd_conf *mdev,
        struct socket *sock, enum drbd_packets cmd)
 {
-       struct p_header80 *h = &mdev->data.sbuf.header.h80;
+       struct p_header80 *h = &mdev->tconn->data.sbuf.header.h80;
 
        return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
 }
 
 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
 {
-       struct p_header80 *h = &mdev->data.rbuf.header.h80;
+       struct p_header80 *h = &mdev->tconn->data.rbuf.header.h80;
        int rr;
 
        rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
 
-       if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
+       if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC))
                return be16_to_cpu(h->command);
 
        return 0xffff;
@@ -751,7 +755,7 @@ static int drbd_connect(struct drbd_conf *mdev)
        struct socket *s, *sock, *msock;
        int try, h, ok;
 
-       D_ASSERT(!mdev->data.socket);
+       D_ASSERT(!mdev->tconn->data.socket);
 
        if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
                return -2;
@@ -787,7 +791,7 @@ static int drbd_connect(struct drbd_conf *mdev)
                }
 
                if (sock && msock) {
-                       schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
+                       schedule_timeout_interruptible(mdev->tconn->net_conf->ping_timeo*HZ/10);
                        ok = drbd_socket_okay(mdev, &sock);
                        ok = drbd_socket_okay(mdev, &msock) && ok;
                        if (ok)
@@ -829,7 +833,7 @@ retry:
                if (signal_pending(current)) {
                        flush_signals(current);
                        smp_rmb();
-                       if (get_t_state(&mdev->receiver) == Exiting)
+                       if (get_t_state(&mdev->tconn->receiver) == EXITING)
                                goto out_release_sockets;
                }
 
@@ -851,26 +855,26 @@ retry:
        msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
 
        /* NOT YET ...
-        * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
+        * sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
         * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
         * first set it to the P_HAND_SHAKE timeout,
         * which we set to 4x the configured ping_timeout. */
        sock->sk->sk_sndtimeo =
-       sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
+       sock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_timeo*4*HZ/10;
 
-       msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
-       msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+       msock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
+       msock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
 
        /* we don't want delays.
         * we use TCP_CORK where appropriate, though */
        drbd_tcp_nodelay(sock);
        drbd_tcp_nodelay(msock);
 
-       mdev->data.socket = sock;
-       mdev->meta.socket = msock;
-       mdev->last_received = jiffies;
+       mdev->tconn->data.socket = sock;
+       mdev->tconn->meta.socket = msock;
+       mdev->tconn->last_received = jiffies;
 
-       D_ASSERT(mdev->asender.task == NULL);
+       D_ASSERT(mdev->tconn->asender.task == NULL);
 
        h = drbd_do_handshake(mdev);
        if (h <= 0)
@@ -891,13 +895,13 @@ retry:
        if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
                return 0;
 
-       sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
+       sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10;
        sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
 
        atomic_set(&mdev->packet_seq, 0);
        mdev->peer_seq = 0;
 
-       drbd_thread_start(&mdev->asender);
+       drbd_thread_start(&mdev->tconn->asender);
 
        if (drbd_send_protocol(mdev) == -1)
                return -1;
@@ -921,7 +925,7 @@ out_release_sockets:
 
 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
 {
-       union p_header *h = &mdev->data.rbuf.header;
+       union p_header *h = &mdev->tconn->data.rbuf.header;
        int r;
 
        r = drbd_recv(mdev, h, sizeof(*h));
@@ -931,10 +935,10 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
                return false;
        }
 
-       if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
+       if (likely(h->h80.magic == cpu_to_be32(DRBD_MAGIC))) {
                *cmd = be16_to_cpu(h->h80.command);
                *packet_size = be16_to_cpu(h->h80.length);
-       } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
+       } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) {
                *cmd = be16_to_cpu(h->h95.command);
                *packet_size = be32_to_cpu(h->h95.length);
        } else {
@@ -944,7 +948,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi
                    be16_to_cpu(h->h80.length));
                return false;
        }
-       mdev->last_received = jiffies;
+       mdev->tconn->last_received = jiffies;
 
        return true;
 }
@@ -1087,8 +1091,8 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
        struct bio *bios = NULL;
        struct bio *bio;
        struct page *page = e->pages;
-       sector_t sector = e->sector;
-       unsigned ds = e->size;
+       sector_t sector = e->i.sector;
+       unsigned ds = e->i.size;
        unsigned n_bios = 0;
        unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
        int err = -ENOMEM;
@@ -1103,7 +1107,7 @@ next_bio:
                dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
                goto fail;
        }
-       /* > e->sector, unless this is the first bio */
+       /* > e->i.sector, unless this is the first bio */
        bio->bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        bio->bi_rw = rw;
@@ -1159,7 +1163,7 @@ fail:
 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
        int rv;
-       struct p_barrier *p = &mdev->data.rbuf.barrier;
+       struct p_barrier *p = &mdev->tconn->data.rbuf.barrier;
        struct drbd_epoch *epoch;
 
        inc_unacked(mdev);
@@ -1240,7 +1244,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
        void *dig_vv = mdev->int_dig_vv;
        unsigned long *data;
 
-       dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
+       dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
                crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
 
        if (dgs) {
@@ -1256,9 +1260,12 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
 
        data_size -= dgs;
 
-       ERR_IF(data_size == 0) return NULL;
-       ERR_IF(data_size &  0x1ff) return NULL;
-       ERR_IF(data_size >  DRBD_MAX_BIO_SIZE) return NULL;
+       if (!expect(data_size != 0))
+               return NULL;
+       if (!expect(IS_ALIGNED(data_size, 512)))
+               return NULL;
+       if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
+               return NULL;
 
        /* even though we trust out peer,
         * we sometimes have to double check. */
@@ -1354,7 +1361,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
        void *dig_in = mdev->int_dig_in;
        void *dig_vv = mdev->int_dig_vv;
 
-       dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
+       dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
                crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
 
        if (dgs) {
@@ -1410,17 +1417,17 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 {
        struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
-       sector_t sector = e->sector;
+       sector_t sector = e->i.sector;
        int ok;
 
-       D_ASSERT(hlist_unhashed(&e->collision));
+       D_ASSERT(drbd_interval_empty(&e->i));
 
        if (likely((e->flags & EE_WAS_ERROR) == 0)) {
-               drbd_set_in_sync(mdev, sector, e->size);
+               drbd_set_in_sync(mdev, sector, e->i.size);
                ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
        } else {
                /* Record failure to sync */
-               drbd_rs_failed_io(mdev, sector, e->size);
+               drbd_rs_failed_io(mdev, sector, e->i.size);
 
                ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
        }
@@ -1445,9 +1452,9 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
 
        e->w.cb = e_end_resync_block;
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        list_add(&e->w.list, &mdev->sync_ee);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        atomic_add(data_size >> 9, &mdev->rs_sect_ev);
        if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
@@ -1455,9 +1462,9 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
 
        /* don't care for the reason here */
        dev_err(DEV, "submit failed, triggering re-connect\n");
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        list_del(&e->w.list);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        drbd_free_ee(mdev, e);
 fail:
@@ -1465,22 +1472,37 @@ fail:
        return false;
 }
 
+static struct drbd_request *
+find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
+            sector_t sector, bool missing_ok, const char *func)
+{
+       struct drbd_request *req;
+
+       /* Request object according to our peer */
+       req = (struct drbd_request *)(unsigned long)id;
+       if (drbd_contains_interval(root, sector, &req->i))
+               return req;
+       if (!missing_ok) {
+               dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func,
+                       (unsigned long)id, (unsigned long long)sector);
+       }
+       return NULL;
+}
+
 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
        struct drbd_request *req;
        sector_t sector;
        int ok;
-       struct p_data *p = &mdev->data.rbuf.data;
+       struct p_data *p = &mdev->tconn->data.rbuf.data;
 
        sector = be64_to_cpu(p->sector);
 
-       spin_lock_irq(&mdev->req_lock);
-       req = _ar_id_to_req(mdev, p->block_id, sector);
-       spin_unlock_irq(&mdev->req_lock);
-       if (unlikely(!req)) {
-               dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
+       spin_lock_irq(&mdev->tconn->req_lock);
+       req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
+       spin_unlock_irq(&mdev->tconn->req_lock);
+       if (unlikely(!req))
                return false;
-       }
 
        /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
         * special casing it there for the various failure cases.
@@ -1488,7 +1510,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
        ok = recv_dless_read(mdev, req, sector, data_size);
 
        if (ok)
-               req_mod(req, data_received);
+               req_mod(req, DATA_RECEIVED);
        /* else: nothing. handled from drbd_disconnect...
         * I don't think we may complete this just yet
         * in case we are "on-disconnect: freeze" */
@@ -1500,7 +1522,7 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
 {
        sector_t sector;
        int ok;
-       struct p_data *p = &mdev->data.rbuf.data;
+       struct p_data *p = &mdev->tconn->data.rbuf.data;
 
        sector = be64_to_cpu(p->sector);
        D_ASSERT(p->block_id == ID_SYNCER);
@@ -1508,7 +1530,7 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
        if (get_ldev(mdev)) {
                /* data is submitted to disk within recv_resync_read.
                 * corresponding put_ldev done below on error,
-                * or in drbd_endio_write_sec. */
+                * or in drbd_endio_sec. */
                ok = recv_resync_read(mdev, sector, data_size);
        } else {
                if (__ratelimit(&drbd_ratelimit_state))
@@ -1530,10 +1552,10 @@ static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, un
 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 {
        struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
-       sector_t sector = e->sector;
+       sector_t sector = e->i.sector;
        int ok = 1, pcmd;
 
-       if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
+       if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) {
                if (likely((e->flags & EE_WAS_ERROR) == 0)) {
                        pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
                                mdev->state.conn <= C_PAUSED_SYNC_T &&
@@ -1541,7 +1563,7 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
                                P_RS_WRITE_ACK : P_WRITE_ACK;
                        ok &= drbd_send_ack(mdev, pcmd, e);
                        if (pcmd == P_RS_WRITE_ACK)
-                               drbd_set_in_sync(mdev, sector, e->size);
+                               drbd_set_in_sync(mdev, sector, e->i.size);
                } else {
                        ok  = drbd_send_ack(mdev, P_NEG_ACK, e);
                        /* we expect it to be marked out of sync anyways...
@@ -1551,14 +1573,14 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
        }
        /* we delete from the conflict detection hash _after_ we sent out the
         * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
-       if (mdev->net_conf->two_primaries) {
-               spin_lock_irq(&mdev->req_lock);
-               D_ASSERT(!hlist_unhashed(&e->collision));
-               hlist_del_init(&e->collision);
-               spin_unlock_irq(&mdev->req_lock);
-       } else {
-               D_ASSERT(hlist_unhashed(&e->collision));
-       }
+       if (mdev->tconn->net_conf->two_primaries) {
+               spin_lock_irq(&mdev->tconn->req_lock);
+               D_ASSERT(!drbd_interval_empty(&e->i));
+               drbd_remove_interval(&mdev->epoch_entries, &e->i);
+               drbd_clear_interval(&e->i);
+               spin_unlock_irq(&mdev->tconn->req_lock);
+       } else
+               D_ASSERT(drbd_interval_empty(&e->i));
 
        drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
 
@@ -1570,13 +1592,14 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
        struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
        int ok = 1;
 
-       D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
+       D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
        ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
 
-       spin_lock_irq(&mdev->req_lock);
-       D_ASSERT(!hlist_unhashed(&e->collision));
-       hlist_del_init(&e->collision);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
+       D_ASSERT(!drbd_interval_empty(&e->i));
+       drbd_remove_interval(&mdev->epoch_entries, &e->i);
+       drbd_clear_interval(&e->i);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        dec_unacked(mdev);
 
@@ -1652,7 +1675,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 {
        sector_t sector;
        struct drbd_epoch_entry *e;
-       struct p_data *p = &mdev->data.rbuf.data;
+       struct p_data *p = &mdev->tconn->data.rbuf.data;
        int rw = WRITE;
        u32 dp_flags;
 
@@ -1669,7 +1692,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
        /* get_ldev(mdev) successful.
         * Corresponding put_ldev done either below (on various errors),
-        * or in drbd_endio_write_sec, if we successfully submit the data at
+        * or in drbd_endio_sec, if we successfully submit the data at
         * the end of this function. */
 
        sector = be64_to_cpu(p->sector);
@@ -1694,37 +1717,31 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        spin_unlock(&mdev->epoch_lock);
 
        /* I'm the receiver, I do hold a net_cnt reference. */
-       if (!mdev->net_conf->two_primaries) {
-               spin_lock_irq(&mdev->req_lock);
+       if (!mdev->tconn->net_conf->two_primaries) {
+               spin_lock_irq(&mdev->tconn->req_lock);
        } else {
                /* don't get the req_lock yet,
                 * we may sleep in drbd_wait_peer_seq */
-               const int size = e->size;
+               const int size = e->i.size;
                const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
                DEFINE_WAIT(wait);
-               struct drbd_request *i;
-               struct hlist_node *n;
-               struct hlist_head *slot;
                int first;
 
-               D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
-               BUG_ON(mdev->ee_hash == NULL);
-               BUG_ON(mdev->tl_hash == NULL);
+               D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
 
                /* conflict detection and handling:
                 * 1. wait on the sequence number,
                 *    in case this data packet overtook ACK packets.
-                * 2. check our hash tables for conflicting requests.
-                *    we only need to walk the tl_hash, since an ee can not
-                *    have a conflict with an other ee: on the submitting
-                *    node, the corresponding req had already been conflicting,
-                *    and a conflicting req is never sent.
+                * 2. check our interval trees for conflicting requests:
+                *    we only need to check the write_requests tree; the
+                *    epoch_entries tree cannot contain any overlaps because
+                *    they were already eliminated on the submitting node.
                 *
                 * Note: for two_primaries, we are protocol C,
                 * so there cannot be any request that is DONE
                 * but still on the transfer log.
                 *
-                * unconditionally add to the ee_hash.
+                * unconditionally add to the epoch_entries tree.
                 *
                 * if no conflicting request is found:
                 *    submit.
@@ -1748,34 +1765,35 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
                        goto out_interrupted;
 
-               spin_lock_irq(&mdev->req_lock);
+               spin_lock_irq(&mdev->tconn->req_lock);
 
-               hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
+               drbd_insert_interval(&mdev->epoch_entries, &e->i);
 
-#define OVERLAPS overlaps(i->sector, i->size, sector, size)
-               slot = tl_hash_slot(mdev, sector);
                first = 1;
                for (;;) {
+                       struct drbd_interval *i;
                        int have_unacked = 0;
                        int have_conflict = 0;
                        prepare_to_wait(&mdev->misc_wait, &wait,
                                TASK_INTERRUPTIBLE);
-                       hlist_for_each_entry(i, n, slot, collision) {
-                               if (OVERLAPS) {
-                                       /* only ALERT on first iteration,
-                                        * we may be woken up early... */
-                                       if (first)
-                                               dev_alert(DEV, "%s[%u] Concurrent local write detected!"
-                                                     " new: %llus +%u; pending: %llus +%u\n",
-                                                     current->comm, current->pid,
-                                                     (unsigned long long)sector, size,
-                                                     (unsigned long long)i->sector, i->size);
-                                       if (i->rq_state & RQ_NET_PENDING)
-                                               ++have_unacked;
-                                       ++have_conflict;
-                               }
+
+                       i = drbd_find_overlap(&mdev->write_requests, sector, size);
+                       if (i) {
+                               struct drbd_request *req2 =
+                                       container_of(i, struct drbd_request, i);
+
+                               /* only ALERT on first iteration,
+                                * we may be woken up early... */
+                               if (first)
+                                       dev_alert(DEV, "%s[%u] Concurrent local write detected!"
+                                             " new: %llus +%u; pending: %llus +%u\n",
+                                             current->comm, current->pid,
+                                             (unsigned long long)sector, size,
+                                             (unsigned long long)req2->i.sector, req2->i.size);
+                               if (req2->rq_state & RQ_NET_PENDING)
+                                       ++have_unacked;
+                               ++have_conflict;
                        }
-#undef OVERLAPS
                        if (!have_conflict)
                                break;
 
@@ -1787,7 +1805,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                                e->w.cb = e_send_discard_ack;
                                list_add_tail(&e->w.list, &mdev->done_ee);
 
-                               spin_unlock_irq(&mdev->req_lock);
+                               spin_unlock_irq(&mdev->tconn->req_lock);
 
                                /* we could probably send that P_DISCARD_ACK ourselves,
                                 * but I don't like the receiver using the msock */
@@ -1799,15 +1817,16 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                        }
 
                        if (signal_pending(current)) {
-                               hlist_del_init(&e->collision);
+                               drbd_remove_interval(&mdev->epoch_entries, &e->i);
+                               drbd_clear_interval(&e->i);
 
-                               spin_unlock_irq(&mdev->req_lock);
+                               spin_unlock_irq(&mdev->tconn->req_lock);
 
                                finish_wait(&mdev->misc_wait, &wait);
                                goto out_interrupted;
                        }
 
-                       spin_unlock_irq(&mdev->req_lock);
+                       spin_unlock_irq(&mdev->tconn->req_lock);
                        if (first) {
                                first = 0;
                                dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
@@ -1818,15 +1837,15 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                                D_ASSERT(have_unacked == 0);
                        }
                        schedule();
-                       spin_lock_irq(&mdev->req_lock);
+                       spin_lock_irq(&mdev->tconn->req_lock);
                }
                finish_wait(&mdev->misc_wait, &wait);
        }
 
        list_add(&e->w.list, &mdev->active_ee);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
-       switch (mdev->net_conf->wire_protocol) {
+       switch (mdev->tconn->net_conf->wire_protocol) {
        case DRBD_PROT_C:
                inc_unacked(mdev);
                /* corresponding dec_unacked() in e_end_block()
@@ -1844,10 +1863,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
        if (mdev->state.pdsk < D_INCONSISTENT) {
                /* In case we have the only disk of the cluster, */
-               drbd_set_out_of_sync(mdev, e->sector, e->size);
+               drbd_set_out_of_sync(mdev, e->i.sector, e->i.size);
                e->flags |= EE_CALL_AL_COMPLETE_IO;
                e->flags &= ~EE_MAY_SET_IN_SYNC;
-               drbd_al_begin_io(mdev, e->sector);
+               drbd_al_begin_io(mdev, e->i.sector);
        }
 
        if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
@@ -1855,12 +1874,13 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
        /* don't care for the reason here */
        dev_err(DEV, "submit failed, triggering re-connect\n");
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        list_del(&e->w.list);
-       hlist_del_init(&e->collision);
-       spin_unlock_irq(&mdev->req_lock);
+       drbd_remove_interval(&mdev->epoch_entries, &e->i);
+       drbd_clear_interval(&e->i);
+       spin_unlock_irq(&mdev->tconn->req_lock);
        if (e->flags & EE_CALL_AL_COMPLETE_IO)
-               drbd_al_complete_io(mdev, e->sector);
+               drbd_al_complete_io(mdev, e->i.sector);
 
 out_interrupted:
        drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
@@ -1944,7 +1964,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
        struct digest_info *di = NULL;
        int size, verb;
        unsigned int fault_type;
-       struct p_block_req *p = &mdev->data.rbuf.block_req;
+       struct p_block_req *p = &mdev->tconn->data.rbuf.block_req;
 
        sector = be64_to_cpu(p->sector);
        size   = be32_to_cpu(p->blksize);
@@ -2028,7 +2048,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
                        goto out_free_e;
 
                if (cmd == P_CSUM_RS_REQUEST) {
-                       D_ASSERT(mdev->agreed_pro_version >= 89);
+                       D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
                        e->w.cb = w_e_end_csum_rs_req;
                        /* used in the sector offset progress display */
                        mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
@@ -2045,7 +2065,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
 
        case P_OV_REQUEST:
                if (mdev->ov_start_sector == ~(sector_t)0 &&
-                   mdev->agreed_pro_version >= 90) {
+                   mdev->tconn->agreed_pro_version >= 90) {
                        unsigned long now = jiffies;
                        int i;
                        mdev->ov_start_sector = sector;
@@ -2102,18 +2122,18 @@ submit_for_resync:
 
 submit:
        inc_unacked(mdev);
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        list_add_tail(&e->w.list, &mdev->read_ee);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
                return true;
 
        /* don't care for the reason here */
        dev_err(DEV, "submit failed, triggering re-connect\n");
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        list_del(&e->w.list);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
        /* no drbd_rs_complete_io(), we are dropping the connection anyways */
 
 out_free_e:
@@ -2133,7 +2153,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
        ch_peer = mdev->p_uuid[UI_SIZE];
        ch_self = mdev->comm_bm_set;
 
-       switch (mdev->net_conf->after_sb_0p) {
+       switch (mdev->tconn->net_conf->after_sb_0p) {
        case ASB_CONSENSUS:
        case ASB_DISCARD_SECONDARY:
        case ASB_CALL_HELPER:
@@ -2172,7 +2192,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
                        if (ch_peer == 0) { rv =  1; break; }
                        if (ch_self == 0) { rv = -1; break; }
                }
-               if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
+               if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
                        break;
        case ASB_DISCARD_LEAST_CHG:
                if      (ch_self < ch_peer)
@@ -2198,7 +2218,7 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
 {
        int hg, rv = -100;
 
-       switch (mdev->net_conf->after_sb_1p) {
+       switch (mdev->tconn->net_conf->after_sb_1p) {
        case ASB_DISCARD_YOUNGER_PRI:
        case ASB_DISCARD_OLDER_PRI:
        case ASB_DISCARD_LEAST_CHG:
@@ -2247,7 +2267,7 @@ static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
 {
        int hg, rv = -100;
 
-       switch (mdev->net_conf->after_sb_2p) {
+       switch (mdev->tconn->net_conf->after_sb_2p) {
        case ASB_DISCARD_YOUNGER_PRI:
        case ASB_DISCARD_OLDER_PRI:
        case ASB_DISCARD_LEAST_CHG:
@@ -2340,7 +2360,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
 
                if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
 
-                       if (mdev->agreed_pro_version < 91)
+                       if (mdev->tconn->agreed_pro_version < 91)
                                return -1091;
 
                        if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
@@ -2361,7 +2381,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
 
                if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
 
-                       if (mdev->agreed_pro_version < 91)
+                       if (mdev->tconn->agreed_pro_version < 91)
                                return -1091;
 
                        if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
@@ -2407,14 +2427,14 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
        *rule_nr = 51;
        peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
        if (self == peer) {
-               if (mdev->agreed_pro_version < 96 ?
+               if (mdev->tconn->agreed_pro_version < 96 ?
                    (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
                    (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
                    peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
                        /* The last P_SYNC_UUID did not get though. Undo the last start of
                           resync as sync source modifications of the peer's UUIDs. */
 
-                       if (mdev->agreed_pro_version < 91)
+                       if (mdev->tconn->agreed_pro_version < 91)
                                return -1091;
 
                        mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
@@ -2444,14 +2464,14 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
        *rule_nr = 71;
        self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
        if (self == peer) {
-               if (mdev->agreed_pro_version < 96 ?
+               if (mdev->tconn->agreed_pro_version < 96 ?
                    (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
                    (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
                    self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
                        /* The last P_SYNC_UUID did not get though. Undo the last start of
                           resync as sync source modifications of our UUIDs. */
 
-                       if (mdev->agreed_pro_version < 91)
+                       if (mdev->tconn->agreed_pro_version < 91)
                                return -1091;
 
                        _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
@@ -2538,7 +2558,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
        if (abs(hg) == 100)
                drbd_khelper(mdev, "initial-split-brain");
 
-       if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
+       if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) {
                int pcount = (mdev->state.role == R_PRIMARY)
                           + (peer_role == R_PRIMARY);
                int forced = (hg == -100);
@@ -2567,9 +2587,9 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
        }
 
        if (hg == -100) {
-               if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
+               if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
                        hg = -1;
-               if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
+               if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
                        hg = 1;
 
                if (abs(hg) < 100)
@@ -2595,7 +2615,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
 
        if (hg < 0 && /* by intention we do not use mydisk here. */
            mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
-               switch (mdev->net_conf->rr_conflict) {
+               switch (mdev->tconn->net_conf->rr_conflict) {
                case ASB_CALL_HELPER:
                        drbd_khelper(mdev, "pri-lost");
                        /* fall through */
@@ -2608,7 +2628,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
                }
        }
 
-       if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+       if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
                if (hg == 0)
                        dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
                else
@@ -2663,7 +2683,7 @@ static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
 
 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       struct p_protocol *p = &mdev->data.rbuf.protocol;
+       struct p_protocol *p = &mdev->tconn->data.rbuf.protocol;
        int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
        int p_want_lose, p_two_primaries, cf;
        char p_integrity_alg[SHARED_SECRET_MAX] = "";
@@ -2681,38 +2701,38 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
        if (cf & CF_DRY_RUN)
                set_bit(CONN_DRY_RUN, &mdev->flags);
 
-       if (p_proto != mdev->net_conf->wire_protocol) {
+       if (p_proto != mdev->tconn->net_conf->wire_protocol) {
                dev_err(DEV, "incompatible communication protocols\n");
                goto disconnect;
        }
 
-       if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
+       if (cmp_after_sb(p_after_sb_0p, mdev->tconn->net_conf->after_sb_0p)) {
                dev_err(DEV, "incompatible after-sb-0pri settings\n");
                goto disconnect;
        }
 
-       if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
+       if (cmp_after_sb(p_after_sb_1p, mdev->tconn->net_conf->after_sb_1p)) {
                dev_err(DEV, "incompatible after-sb-1pri settings\n");
                goto disconnect;
        }
 
-       if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
+       if (cmp_after_sb(p_after_sb_2p, mdev->tconn->net_conf->after_sb_2p)) {
                dev_err(DEV, "incompatible after-sb-2pri settings\n");
                goto disconnect;
        }
 
-       if (p_want_lose && mdev->net_conf->want_lose) {
+       if (p_want_lose && mdev->tconn->net_conf->want_lose) {
                dev_err(DEV, "both sides have the 'want_lose' flag set\n");
                goto disconnect;
        }
 
-       if (p_two_primaries != mdev->net_conf->two_primaries) {
+       if (p_two_primaries != mdev->tconn->net_conf->two_primaries) {
                dev_err(DEV, "incompatible setting of the two-primaries options\n");
                goto disconnect;
        }
 
-       if (mdev->agreed_pro_version >= 87) {
-               unsigned char *my_alg = mdev->net_conf->integrity_alg;
+       if (mdev->tconn->agreed_pro_version >= 87) {
+               unsigned char *my_alg = mdev->tconn->net_conf->integrity_alg;
 
                if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
                        return false;
@@ -2763,11 +2783,11 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
 {
        int ok = true;
-       struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
+       struct p_rs_param_95 *p = &mdev->tconn->data.rbuf.rs_param_95;
        unsigned int header_size, data_size, exp_max_sz;
        struct crypto_hash *verify_tfm = NULL;
        struct crypto_hash *csums_tfm = NULL;
-       const int apv = mdev->agreed_pro_version;
+       const int apv = mdev->tconn->agreed_pro_version;
        int *rs_plan_s = NULL;
        int fifo_size = 0;
 
@@ -2926,7 +2946,7 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev,
 
 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       struct p_sizes *p = &mdev->data.rbuf.sizes;
+       struct p_sizes *p = &mdev->tconn->data.rbuf.sizes;
        enum determine_dev_size dd = unchanged;
        sector_t p_size, p_usize, my_usize;
        int ldsc = 0; /* local disk size changed */
@@ -3029,7 +3049,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       struct p_uuids *p = &mdev->data.rbuf.uuids;
+       struct p_uuids *p = &mdev->tconn->data.rbuf.uuids;
        u64 *p_uuid;
        int i, updated_uuids = 0;
 
@@ -3054,7 +3074,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        if (get_ldev(mdev)) {
                int skip_initial_sync =
                        mdev->state.conn == C_CONNECTED &&
-                       mdev->agreed_pro_version >= 90 &&
+                       mdev->tconn->agreed_pro_version >= 90 &&
                        mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
                        (p_uuid[UI_FLAGS] & 8);
                if (skip_initial_sync) {
@@ -3123,7 +3143,7 @@ static union drbd_state convert_state(union drbd_state ps)
 
 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       struct p_req_state *p = &mdev->data.rbuf.req_state;
+       struct p_req_state *p = &mdev->tconn->data.rbuf.req_state;
        union drbd_state mask, val;
        enum drbd_state_rv rv;
 
@@ -3149,7 +3169,7 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
 
 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       struct p_state *p = &mdev->data.rbuf.state;
+       struct p_state *p = &mdev->tconn->data.rbuf.state;
        union drbd_state os, ns, peer_state;
        enum drbd_disk_state real_peer_disk;
        enum chg_state_flags cs_flags;
@@ -3163,10 +3183,10 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
        }
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
  retry:
        os = ns = mdev->state;
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        /* peer says his disk is uptodate, while we think it is inconsistent,
         * and this happens while we think we have a sync going on. */
@@ -3250,7 +3270,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                }
        }
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        if (mdev->state.i != os.i)
                goto retry;
        clear_bit(CONSIDER_RESYNC, &mdev->flags);
@@ -3262,9 +3282,9 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
        if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
            test_bit(NEW_CUR_UUID, &mdev->flags)) {
-               /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
+               /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
                   for temporal network outages! */
-               spin_unlock_irq(&mdev->req_lock);
+               spin_unlock_irq(&mdev->tconn->req_lock);
                dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
                tl_clear(mdev);
                drbd_uuid_new_current(mdev);
@@ -3274,7 +3294,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        }
        rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
        ns = mdev->state;
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        if (rv < SS_SUCCESS) {
                drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
@@ -3292,7 +3312,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
                }
        }
 
-       mdev->net_conf->want_lose = 0;
+       mdev->tconn->net_conf->want_lose = 0;
 
        drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
 
@@ -3301,7 +3321,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
 
 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
+       struct p_rs_uuid *p = &mdev->tconn->data.rbuf.rs_uuid;
 
        wait_event(mdev->misc_wait,
                   mdev->state.conn == C_WF_SYNC_UUID ||
@@ -3500,7 +3520,7 @@ static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigne
        void *buffer;
        int err;
        int ok = false;
-       struct p_header80 *h = &mdev->data.rbuf.header.h80;
+       struct p_header80 *h = &mdev->tconn->data.rbuf.header.h80;
 
        drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
        /* you are supposed to send additional out-of-sync information
@@ -3598,7 +3618,8 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
        while (size > 0) {
                want = min_t(int, size, sizeof(sink));
                r = drbd_recv(mdev, sink, want);
-               ERR_IF(r <= 0) break;
+               if (!expect(r > 0))
+                       break;
                size -= r;
        }
        return size == 0;
@@ -3608,14 +3629,14 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, u
 {
        /* Make sure we've acked all the TCP data associated
         * with the data requests being unplugged */
-       drbd_tcp_quickack(mdev->data.socket);
+       drbd_tcp_quickack(mdev->tconn->data.socket);
 
        return true;
 }
 
 static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-       struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+       struct p_block_desc *p = &mdev->tconn->data.rbuf.block_desc;
 
        switch (mdev->state.conn) {
        case C_WF_SYNC_UUID:
@@ -3669,21 +3690,21 @@ static struct data_cmd drbd_cmd_handler[] = {
 };
 
 /* All handler functions that expect a sub-header get that sub-heder in
-   mdev->data.rbuf.header.head.payload.
+   mdev->tconn->data.rbuf.header.head.payload.
 
-   Usually in mdev->data.rbuf.header.head the callback can find the usual
+   Usually in mdev->tconn->data.rbuf.header.head the callback can find the usual
    p_header, but they may not rely on that. Since there is also p_header95 !
  */
 
 static void drbdd(struct drbd_conf *mdev)
 {
-       union p_header *header = &mdev->data.rbuf.header;
+       union p_header *header = &mdev->tconn->data.rbuf.header;
        unsigned int packet_size;
        enum drbd_packets cmd;
        size_t shs; /* sub header size */
        int rv;
 
-       while (get_t_state(&mdev->receiver) == Running) {
+       while (get_t_state(&mdev->tconn->receiver) == RUNNING) {
                drbd_thread_current_set_cpu(mdev);
                if (!drbd_recv_header(mdev, &cmd, &packet_size))
                        goto err_out;
@@ -3732,40 +3753,10 @@ void drbd_flush_workqueue(struct drbd_conf *mdev)
 
        barr.w.cb = w_prev_work_done;
        init_completion(&barr.done);
-       drbd_queue_work(&mdev->data.work, &barr.w);
+       drbd_queue_work(&mdev->tconn->data.work, &barr.w);
        wait_for_completion(&barr.done);
 }
 
-void drbd_free_tl_hash(struct drbd_conf *mdev)
-{
-       struct hlist_head *h;
-
-       spin_lock_irq(&mdev->req_lock);
-
-       if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
-               spin_unlock_irq(&mdev->req_lock);
-               return;
-       }
-       /* paranoia code */
-       for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
-               if (h->first)
-                       dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
-                               (int)(h - mdev->ee_hash), h->first);
-       kfree(mdev->ee_hash);
-       mdev->ee_hash = NULL;
-       mdev->ee_hash_s = 0;
-
-       /* paranoia code */
-       for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
-               if (h->first)
-                       dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
-                               (int)(h - mdev->tl_hash), h->first);
-       kfree(mdev->tl_hash);
-       mdev->tl_hash = NULL;
-       mdev->tl_hash_s = 0;
-       spin_unlock_irq(&mdev->req_lock);
-}
-
 static void drbd_disconnect(struct drbd_conf *mdev)
 {
        enum drbd_fencing_p fp;
@@ -3777,15 +3768,15 @@ static void drbd_disconnect(struct drbd_conf *mdev)
                return;
 
        /* asender does not clean up anything. it must not interfere, either */
-       drbd_thread_stop(&mdev->asender);
+       drbd_thread_stop(&mdev->tconn->asender);
        drbd_free_sock(mdev);
 
        /* wait for current activity to cease. */
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
        _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
        _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        /* We do not have data structures that would allow us to
         * get the rs_pending_cnt down to 0 again.
@@ -3837,7 +3828,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
        if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
                drbd_try_outdate_peer_async(mdev);
 
-       spin_lock_irq(&mdev->req_lock);
+       spin_lock_irq(&mdev->tconn->req_lock);
        os = mdev->state;
        if (os.conn >= C_UNCONNECTED) {
                /* Do not restart in case we are C_DISCONNECTING */
@@ -3845,16 +3836,16 @@ static void drbd_disconnect(struct drbd_conf *mdev)
                ns.conn = C_UNCONNECTED;
                rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
        }
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        if (os.conn == C_DISCONNECTING) {
-               wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
+               wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0);
 
                crypto_free_hash(mdev->cram_hmac_tfm);
                mdev->cram_hmac_tfm = NULL;
 
-               kfree(mdev->net_conf);
-               mdev->net_conf = NULL;
+               kfree(mdev->tconn->net_conf);
+               mdev->tconn->net_conf = NULL;
                drbd_request_state(mdev, NS(conn, C_STANDALONE));
        }
 
@@ -3900,26 +3891,26 @@ static void drbd_disconnect(struct drbd_conf *mdev)
  */
 static int drbd_send_handshake(struct drbd_conf *mdev)
 {
-       /* ASSERT current == mdev->receiver ... */
-       struct p_handshake *p = &mdev->data.sbuf.handshake;
+       /* ASSERT current == mdev->tconn->receiver ... */
+       struct p_handshake *p = &mdev->tconn->data.sbuf.handshake;
        int ok;
 
-       if (mutex_lock_interruptible(&mdev->data.mutex)) {
+       if (mutex_lock_interruptible(&mdev->tconn->data.mutex)) {
                dev_err(DEV, "interrupted during initial handshake\n");
                return 0; /* interrupted. not ok. */
        }
 
-       if (mdev->data.socket == NULL) {
-               mutex_unlock(&mdev->data.mutex);
+       if (mdev->tconn->data.socket == NULL) {
+               mutex_unlock(&mdev->tconn->data.mutex);
                return 0;
        }
 
        memset(p, 0, sizeof(*p));
        p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
        p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
-       ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
+       ok = _drbd_send_cmd( mdev, mdev->tconn->data.socket, P_HAND_SHAKE,
                             (struct p_header80 *)p, sizeof(*p), 0 );
-       mutex_unlock(&mdev->data.mutex);
+       mutex_unlock(&mdev->tconn->data.mutex);
        return ok;
 }
 
@@ -3932,8 +3923,8 @@ static int drbd_send_handshake(struct drbd_conf *mdev)
  */
 static int drbd_do_handshake(struct drbd_conf *mdev)
 {
-       /* ASSERT current == mdev->receiver ... */
-       struct p_handshake *p = &mdev->data.rbuf.handshake;
+       /* ASSERT current == mdev->tconn->receiver ... */
+       struct p_handshake *p = &mdev->tconn->data.rbuf.handshake;
        const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
        unsigned int length;
        enum drbd_packets cmd;
@@ -3976,10 +3967,10 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
            PRO_VERSION_MIN > p->protocol_max)
                goto incompat;
 
-       mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+       mdev->tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
 
        dev_info(DEV, "Handshake successful: "
-            "Agreed network protocol version %d\n", mdev->agreed_pro_version);
+            "Agreed network protocol version %d\n", mdev->tconn->agreed_pro_version);
 
        return 1;
 
@@ -4014,7 +4005,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        char *response = NULL;
        char *right_response = NULL;
        char *peers_ch = NULL;
-       unsigned int key_len = strlen(mdev->net_conf->shared_secret);
+       unsigned int key_len = strlen(mdev->tconn->net_conf->shared_secret);
        unsigned int resp_size;
        struct hash_desc desc;
        enum drbd_packets cmd;
@@ -4025,7 +4016,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
        desc.flags = 0;
 
        rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
-                               (u8 *)mdev->net_conf->shared_secret, key_len);
+                               (u8 *)mdev->tconn->net_conf->shared_secret, key_len);
        if (rv) {
                dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
                rv = -1;
@@ -4139,7 +4130,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
 
        if (rv)
                dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
-                    resp_size, mdev->net_conf->cram_hmac_alg);
+                    resp_size, mdev->tconn->net_conf->cram_hmac_alg);
        else
                rv = -1;
 
@@ -4175,9 +4166,9 @@ int drbdd_init(struct drbd_thread *thi)
        } while (h == 0);
 
        if (h > 0) {
-               if (get_net_conf(mdev)) {
+               if (get_net_conf(mdev->tconn)) {
                        drbdd(mdev);
-                       put_net_conf(mdev);
+                       put_net_conf(mdev->tconn);
                }
        }
 
@@ -4216,7 +4207,7 @@ static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
 {
        /* restore idle timeout */
-       mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
+       mdev->tconn->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
        if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
                wake_up(&mdev->misc_wait);
 
@@ -4229,7 +4220,7 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
        sector_t sector = be64_to_cpu(p->sector);
        int blksize = be32_to_cpu(p->blksize);
 
-       D_ASSERT(mdev->agreed_pro_version >= 89);
+       D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
 
        update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
@@ -4246,51 +4237,22 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
        return true;
 }
 
-/* when we receive the ACK for a write request,
- * verify that we actually know about it */
-static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
-       u64 id, sector_t sector)
-{
-       struct hlist_head *slot = tl_hash_slot(mdev, sector);
-       struct hlist_node *n;
-       struct drbd_request *req;
-
-       hlist_for_each_entry(req, n, slot, collision) {
-               if ((unsigned long)req == (unsigned long)id) {
-                       if (req->sector != sector) {
-                               dev_err(DEV, "_ack_id_to_req: found req %p but it has "
-                                   "wrong sector (%llus versus %llus)\n", req,
-                                   (unsigned long long)req->sector,
-                                   (unsigned long long)sector);
-                               break;
-                       }
-                       return req;
-               }
-       }
-       return NULL;
-}
-
-typedef struct drbd_request *(req_validator_fn)
-       (struct drbd_conf *mdev, u64 id, sector_t sector);
-
-static int validate_req_change_req_state(struct drbd_conf *mdev,
-       u64 id, sector_t sector, req_validator_fn validator,
-       const char *func, enum drbd_req_event what)
+static int
+validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
+                             struct rb_root *root, const char *func,
+                             enum drbd_req_event what, bool missing_ok)
 {
        struct drbd_request *req;
        struct bio_and_error m;
 
-       spin_lock_irq(&mdev->req_lock);
-       req = validator(mdev, id, sector);
+       spin_lock_irq(&mdev->tconn->req_lock);
+       req = find_request(mdev, root, id, sector, missing_ok, func);
        if (unlikely(!req)) {
-               spin_unlock_irq(&mdev->req_lock);
-
-               dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
-                       (void *)(unsigned long)id, (unsigned long long)sector);
+               spin_unlock_irq(&mdev->tconn->req_lock);
                return false;
        }
        __req_mod(req, what, &m);
-       spin_unlock_irq(&mdev->req_lock);
+       spin_unlock_irq(&mdev->tconn->req_lock);
 
        if (m.bio)
                complete_master_bio(mdev, &m);
@@ -4306,27 +4268,27 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
 
        update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
-       if (is_syncer_block_id(p->block_id)) {
+       if (p->block_id == ID_SYNCER) {
                drbd_set_in_sync(mdev, sector, blksize);
                dec_rs_pending(mdev);
                return true;
        }
        switch (be16_to_cpu(h->command)) {
        case P_RS_WRITE_ACK:
-               D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
-               what = write_acked_by_peer_and_sis;
+               D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
+               what = WRITE_ACKED_BY_PEER_AND_SIS;
                break;
        case P_WRITE_ACK:
-               D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
-               what = write_acked_by_peer;
+               D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
+               what = WRITE_ACKED_BY_PEER;
                break;
        case P_RECV_ACK:
-               D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
-               what = recv_acked_by_peer;
+               D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B);
+               what = RECV_ACKED_BY_PEER;
                break;
        case P_DISCARD_ACK:
-               D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
-               what = conflict_discarded_by_peer;
+               D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
+               what = CONFLICT_DISCARDED_BY_PEER;
                break;
        default:
                D_ASSERT(0);
@@ -4334,7 +4296,8 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
        }
 
        return validate_req_change_req_state(mdev, p->block_id, sector,
-               _ack_id_to_req, __func__ , what);
+                                            &mdev->write_requests, __func__,
+                                            what, false);
 }
 
 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4342,42 +4305,31 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
        struct p_block_ack *p = (struct p_block_ack *)h;
        sector_t sector = be64_to_cpu(p->sector);
        int size = be32_to_cpu(p->blksize);
-       struct drbd_request *req;
-       struct bio_and_error m;
+       bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A ||
+                         mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B;
+       bool found;
 
        update_peer_seq(mdev, be32_to_cpu(p->seq_num));
 
-       if (is_syncer_block_id(p->block_id)) {
+       if (p->block_id == ID_SYNCER) {
                dec_rs_pending(mdev);
                drbd_rs_failed_io(mdev, sector, size);
                return true;
        }
 
-       spin_lock_irq(&mdev->req_lock);
-       req = _ack_id_to_req(mdev, p->block_id, sector);
-       if (!req) {
-               spin_unlock_irq(&mdev->req_lock);
-               if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
-                   mdev->net_conf->wire_protocol == DRBD_PROT_B) {
-                       /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
-                          The master bio might already be completed, therefore the
-                          request is no longer in the collision hash.
-                          => Do not try to validate block_id as request. */
-                       /* In Protocol B we might already have got a P_RECV_ACK
-                          but then get a P_NEG_ACK after wards. */
-                       drbd_set_out_of_sync(mdev, sector, size);
-                       return true;
-               } else {
-                       dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
-                               (void *)(unsigned long)p->block_id, (unsigned long long)sector);
+       found = validate_req_change_req_state(mdev, p->block_id, sector,
+                                             &mdev->write_requests, __func__,
+                                             NEG_ACKED, missing_ok);
+       if (!found) {
+               /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
+                  The master bio might already be completed, therefore the
+                  request is no longer in the collision hash. */
+               /* In Protocol B we might already have got a P_RECV_ACK
+                  but then get a P_NEG_ACK afterwards. */
+               if (!missing_ok)
                        return false;
-               }
+               drbd_set_out_of_sync(mdev, sector, size);
        }
-       __req_mod(req, neg_acked, &m);
-       spin_unlock_irq(&mdev->req_lock);
-
-       if (m.bio)
-               complete_master_bio(mdev, &m);
        return true;
 }
 
@@ -4391,7 +4343,8 @@ static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
            (unsigned long long)sector, be32_to_cpu(p->blksize));
 
        return validate_req_change_req_state(mdev, p->block_id, sector,
-               _ar_id_to_req, __func__ , neg_acked);
+                                            &mdev->read_requests, __func__,
+                                            NEG_ACKED, false);
 }
 
 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
@@ -4474,7 +4427,7 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
                w = kmalloc(sizeof(*w), GFP_NOIO);
                if (w) {
                        w->cb = w_ov_finished;
-                       drbd_queue_work_front(&mdev->data.work, w);
+                       drbd_queue_work_front(&mdev->tconn->data.work, w);
                } else {
                        dev_err(DEV, "kmalloc(w) failed.");
                        ov_oos_print(mdev);
@@ -4526,7 +4479,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
 int drbd_asender(struct drbd_thread *thi)
 {
        struct drbd_conf *mdev = thi->mdev;
-       struct p_header80 *h = &mdev->meta.rbuf.header.h80;
+       struct p_header80 *h = &mdev->tconn->meta.rbuf.header.h80;
        struct asender_cmd *cmd = NULL;
 
        int rv, len;
@@ -4541,20 +4494,23 @@ int drbd_asender(struct drbd_thread *thi)
        current->policy = SCHED_RR;  /* Make this a realtime task! */
        current->rt_priority = 2;    /* more important than all other tasks */
 
-       while (get_t_state(thi) == Running) {
+       while (get_t_state(thi) == RUNNING) {
                drbd_thread_current_set_cpu(mdev);
                if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
-                       ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
-                       mdev->meta.socket->sk->sk_rcvtimeo =
-                               mdev->net_conf->ping_timeo*HZ/10;
+                       if (!drbd_send_ping(mdev)) {
+                               dev_err(DEV, "drbd_send_ping has failed\n");
+                               goto reconnect;
+                       }
+                       mdev->tconn->meta.socket->sk->sk_rcvtimeo =
+                               mdev->tconn->net_conf->ping_timeo*HZ/10;
                        ping_timeout_active = 1;
                }
 
                /* conditionally cork;
                 * it may hurt latency if we cork without much to send */
-               if (!mdev->net_conf->no_cork &&
+               if (!mdev->tconn->net_conf->no_cork &&
                        3 < atomic_read(&mdev->unacked_cnt))
-                       drbd_tcp_cork(mdev->meta.socket);
+                       drbd_tcp_cork(mdev->tconn->meta.socket);
                while (1) {
                        clear_bit(SIGNAL_ASENDER, &mdev->flags);
                        flush_signals(current);
@@ -4562,9 +4518,9 @@ int drbd_asender(struct drbd_thread *thi)
                                goto reconnect;
                        /* to avoid race with newly queued ACKs */
                        set_bit(SIGNAL_ASENDER, &mdev->flags);
-                       spin_lock_irq(&mdev->req_lock);
+                       spin_lock_irq(&mdev->tconn->req_lock);
                        empty = list_empty(&mdev->done_ee);
-                       spin_unlock_irq(&mdev->req_lock);
+                       spin_unlock_irq(&mdev->tconn->req_lock);
                        /* new ack may have been queued right here,
                         * but then there is also a signal pending,
                         * and we start over... */
@@ -4572,14 +4528,14 @@ int drbd_asender(struct drbd_thread *thi)
                                break;
                }
                /* but unconditionally uncork unless disabled */
-               if (!mdev->net_conf->no_cork)
-                       drbd_tcp_uncork(mdev->meta.socket);
+               if (!mdev->tconn->net_conf->no_cork)
+                       drbd_tcp_uncork(mdev->tconn->meta.socket);
 
                /* short circuit, recv_msg would return EINTR anyways. */
                if (signal_pending(current))
                        continue;
 
-               rv = drbd_recv_short(mdev, mdev->meta.socket,
+               rv = drbd_recv_short(mdev, mdev->tconn->meta.socket,
                                     buf, expect-received, 0);
                clear_bit(SIGNAL_ASENDER, &mdev->flags);
 
@@ -4602,6 +4558,11 @@ int drbd_asender(struct drbd_thread *thi)
                        dev_err(DEV, "meta connection shut down by peer.\n");
                        goto reconnect;
                } else if (rv == -EAGAIN) {
+                       /* If the data socket received something meanwhile,
+                        * that is good enough: peer is still alive. */
+                       if (time_after(mdev->tconn->last_received,
+                               jiffies - mdev->tconn->meta.socket->sk->sk_rcvtimeo))
+                               continue;
                        if (ping_timeout_active) {
                                dev_err(DEV, "PingAck did not arrive in time.\n");
                                goto reconnect;
@@ -4616,7 +4577,7 @@ int drbd_asender(struct drbd_thread *thi)
                }
 
                if (received == expect && cmd == NULL) {
-                       if (unlikely(h->magic != BE_DRBD_MAGIC)) {
+                       if (unlikely(h->magic != cpu_to_be32(DRBD_MAGIC))) {
                                dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
                                    be32_to_cpu(h->magic),
                                    be16_to_cpu(h->command),
@@ -4633,10 +4594,11 @@ int drbd_asender(struct drbd_thread *thi)
                                goto disconnect;
                        }
                        expect = cmd->pkt_size;
-                       ERR_IF(len != expect-sizeof(struct p_header80))
+                       if (!expect(len == expect - sizeof(struct p_header80)))
                                goto reconnect;
                }
                if (received == expect) {
+                       mdev->tconn->last_received = jiffies;
                        D_ASSERT(cmd != NULL);
                        if (!cmd->process(mdev, h))
                                goto reconnect;