]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/block/drbd/drbd_req.h
drbd: moved req_lock and transfer log from mdev to tconn
[karo-tx-linux.git] / drivers / block / drbd / drbd_req.h
index 32e2c3e6a8134220943873c671cb7ac6adf83a10..4b0858bf28666c5711990ca6a4851adb48c4d408 100644 (file)
  */
 
 enum drbd_req_event {
-       created,
-       to_be_send,
-       to_be_submitted,
+       CREATED,
+       TO_BE_SENT,
+       TO_BE_SUBMITTED,
 
        /* XXX yes, now I am inconsistent...
         * these are not "events" but "actions"
         * oh, well... */
-       queue_for_net_write,
-       queue_for_net_read,
-       queue_for_send_oos,
-
-       send_canceled,
-       send_failed,
-       handed_over_to_network,
-       oos_handed_to_network,
-       connection_lost_while_pending,
-       read_retry_remote_canceled,
-       recv_acked_by_peer,
-       write_acked_by_peer,
-       write_acked_by_peer_and_sis, /* and set_in_sync */
-       conflict_discarded_by_peer,
-       neg_acked,
-       barrier_acked, /* in protocol A and B */
-       data_received, /* (remote read) */
-
-       read_completed_with_error,
-       read_ahead_completed_with_error,
-       write_completed_with_error,
-       completed_ok,
-       resend,
-       fail_frozen_disk_io,
-       restart_frozen_disk_io,
-       nothing, /* for tracing only */
+       QUEUE_FOR_NET_WRITE,
+       QUEUE_FOR_NET_READ,
+       QUEUE_FOR_SEND_OOS,
+
+       SEND_CANCELED,
+       SEND_FAILED,
+       HANDED_OVER_TO_NETWORK,
+       OOS_HANDED_TO_NETWORK,
+       CONNECTION_LOST_WHILE_PENDING,
+       READ_RETRY_REMOTE_CANCELED,
+       RECV_ACKED_BY_PEER,
+       WRITE_ACKED_BY_PEER,
+       WRITE_ACKED_BY_PEER_AND_SIS, /* and set_in_sync */
+       CONFLICT_DISCARDED_BY_PEER,
+       NEG_ACKED,
+       BARRIER_ACKED, /* in protocol A and B */
+       DATA_RECEIVED, /* (remote read) */
+
+       READ_COMPLETED_WITH_ERROR,
+       READ_AHEAD_COMPLETED_WITH_ERROR,
+       WRITE_COMPLETED_WITH_ERROR,
+       COMPLETED_OK,
+       RESEND,
+       FAIL_FROZEN_DISK_IO,
+       RESTART_FROZEN_DISK_IO,
+       NOTHING,
 };
 
 /* encoding of request states for now.  we don't actually need that many bits.
@@ -138,8 +138,8 @@ enum drbd_req_state_bits {
         *        recv_ack (B) or implicit "ack" (A),
         *        still waiting for the barrier ack.
         *        master_bio may already be completed and invalidated.
-        * 11100: write_acked (C),
-        *        data_received (for remote read, any protocol)
+        * 11100: write acked (C),
+        *        data received (for remote read, any protocol)
         *        or finally the barrier ack has arrived (B,A)...
         *        request can be freed
         * 01100: neg-acked (write, protocol C)
@@ -222,49 +222,6 @@ enum drbd_req_state_bits {
 #define MR_READ_SHIFT  1
 #define MR_READ        (1 << MR_READ_SHIFT)
 
-/* epoch entries */
-static inline
-struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
-       BUG_ON(mdev->ee_hash_s == 0);
-       return mdev->ee_hash +
-               ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
-}
-
-/* transfer log (drbd_request objects) */
-static inline
-struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
-       BUG_ON(mdev->tl_hash_s == 0);
-       return mdev->tl_hash +
-               ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
-}
-
-/* application reads (drbd_request objects) */
-static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
-{
-       return mdev->app_reads_hash
-               + ((unsigned int)(sector) % APP_R_HSIZE);
-}
-
-/* when we receive the answer for a read request,
- * verify that we actually know about it */
-static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
-       u64 id, sector_t sector)
-{
-       struct hlist_head *slot = ar_hash_slot(mdev, sector);
-       struct hlist_node *n;
-       struct drbd_request *req;
-
-       hlist_for_each_entry(req, n, slot, colision) {
-               if ((unsigned long)req == (unsigned long)id) {
-                       D_ASSERT(req->sector == sector);
-                       return req;
-               }
-       }
-       return NULL;
-}
-
 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
 {
        struct bio *bio;
@@ -289,9 +246,9 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
                req->mdev        = mdev;
                req->master_bio  = bio_src;
                req->epoch       = 0;
-               req->sector      = bio_src->bi_sector;
-               req->size        = bio_src->bi_size;
-               INIT_HLIST_NODE(&req->colision);
+               req->i.sector     = bio_src->bi_sector;
+               req->i.size      = bio_src->bi_size;
+               drbd_clear_interval(&req->i);
                INIT_LIST_HEAD(&req->tl_requests);
                INIT_LIST_HEAD(&req->w.list);
        }
@@ -303,11 +260,6 @@ static inline void drbd_req_free(struct drbd_request *req)
        mempool_free(req, drbd_request_mempool);
 }
 
-static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
-{
-       return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
-}
-
 /* Short lived temporary struct on the stack.
  * We could squirrel the error to be returned into
  * bio->bi_size, or similar. But that would be too ugly. */
@@ -323,6 +275,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 extern void complete_master_bio(struct drbd_conf *mdev,
                struct bio_and_error *m);
 extern void request_timer_fn(unsigned long data);
+extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
 
 /* use this if you don't want to deal with calling complete_master_bio()
  * outside the spinlock, e.g. when walking some list on cleanup. */
@@ -352,9 +305,9 @@ static inline int req_mod(struct drbd_request *req,
        struct bio_and_error m;
        int rv;
 
-       spin_lock_irqsave(&mdev->req_lock, flags);
+       spin_lock_irqsave(&mdev->tconn->req_lock, flags);
        rv = __req_mod(req, what, &m);
-       spin_unlock_irqrestore(&mdev->req_lock, flags);
+       spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
 
        if (m.bio)
                complete_master_bio(mdev, &m);