4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2006-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 2006-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
8 Copyright (C) 2006-2008, Philipp Reisner <philipp.reisner@linbit.com>.
10 DRBD is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 DRBD is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/drbd.h>
33 #include "drbd_wrappers.h"
35 /* The request callbacks will be called in irq context by the IDE drivers,
36 and in Softirqs/Tasklets/BH context by the SCSI drivers,
37 and by the receiver and worker in kernel-thread context.
38 Try to get the locking right :) */
41 * Objects of type struct drbd_request do only exist on a R_PRIMARY node, and are
42 * associated with IO requests originating from the block layer above us.
44 * There are quite a few things that may happen to a drbd request
45 * during its lifetime.
48 * It will be marked with the intention to be
49 * submitted to local disk and/or
50 * send via the network.
52 * It has to be placed on the transfer log and other housekeeping lists,
53 * In case we have a network connection.
55 * It may be identified as a concurrent (write) request
56 * and be handled accordingly.
58 * It may me handed over to the local disk subsystem.
59 * It may be completed by the local disk subsystem,
60 * either successfully or with io-error.
61 * In case it is a READ request, and it failed locally,
62 * it may be retried remotely.
64 * It may be queued for sending.
65 * It may be handed over to the network stack,
67 * It may be acknowledged by the "peer" according to the wire_protocol in use.
68 * this may be a negative ack.
69 * It may receive a faked ack when the network connection is lost and the
70 * transfer log is cleaned up.
71 * Sending may be canceled due to network connection loss.
72 * When it finally has outlived its time,
73 * corresponding dirty bits in the resync-bitmap may be cleared or set,
74 * it will be destroyed,
75 * and completion will be signalled to the originator,
76 * with or without "success".
84 /* XXX yes, now I am inconsistent...
85 * these are not "events" but "actions"
93 handed_over_to_network,
94 oos_handed_to_network,
95 connection_lost_while_pending,
96 read_retry_remote_canceled,
99 write_acked_by_peer_and_sis, /* and set_in_sync */
100 conflict_discarded_by_peer,
102 barrier_acked, /* in protocol A and B */
103 data_received, /* (remote read) */
105 read_completed_with_error,
106 read_ahead_completed_with_error,
107 write_completed_with_error,
112 restart_frozen_disk_io,
113 nothing, /* for tracing only */
116 /* encoding of request states for now. we don't actually need that many bits.
117 * we don't need to do atomic bit operations either, since most of the time we
118 * need to look at the connection state and/or manipulate some lists at the
119 * same time, so we should hold the request lock anyways.
121 enum drbd_req_state_bits {
123 * 0000: no local possible
124 * 0001: to be submitted
125 * UNUSED, we could map: 011: submitted, completion still pending
127 * 0010: completed with error
128 * 1001: Aborted (before completion)
129 * 1x10: Aborted and completed -> free
132 __RQ_LOCAL_COMPLETED,
137 * 00000: no network possible
139 * 00011: to be send, on worker queue
140 * 00101: sent, expecting recv_ack (B) or write_ack (C)
142 * recv_ack (B) or implicit "ack" (A),
143 * still waiting for the barrier ack.
144 * master_bio may already be completed and invalidated.
145 * 11100: write_acked (C),
146 * data_received (for remote read, any protocol)
147 * or finally the barrier ack has arrived (B,A)...
148 * request can be freed
149 * 01100: neg-acked (write, protocol C)
150 * or neg-d-acked (read, any protocol)
151 * or killed from the transfer log
152 * during cleanup after connection loss
153 * request can be freed
154 * 01000: canceled or send failed...
155 * request can be freed
158 /* if "SENT" is not set, yet, this can still fail or be canceled.
159 * if "SENT" is set already, we still wait for an Ack packet.
160 * when cleared, the master_bio may be completed.
161 * in (B,A) the request object may still linger on the transaction log
162 * until the corresponding barrier ack comes in */
165 /* If it is QUEUED, and it is a WRITE, it is also registered in the
166 * transfer log. Currently we need this flag to avoid conflicts between
167 * worker canceling the request and tl_clear_barrier killing it from
168 * transfer log. We should restructure the code so this conflict does
169 * no longer occur. */
172 /* well, actually only "handed over to the network stack".
174 * TODO can potentially be dropped because of the similar meaning
175 * of RQ_NET_SENT and ~RQ_NET_QUEUED.
176 * however it is not exactly the same. before we drop it
177 * we must ensure that we can tell a request with network part
178 * from a request without, regardless of what happens to it. */
181 /* when set, the request may be freed (if RQ_NET_QUEUED is clear).
182 * basically this means the corresponding P_BARRIER_ACK was received */
185 /* whether or not we know (C) or pretend (B,A) that the write
186 * was successfully written on the peer.
190 /* peer called drbd_set_in_sync() for this write */
193 /* keep this last, its for the RQ_NET_MASK */
196 /* Set when this is a write, clear for a read */
199 /* Should call drbd_al_complete_io() for this request... */
203 #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
204 #define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
205 #define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
206 #define RQ_LOCAL_ABORTED (1UL << __RQ_LOCAL_ABORTED)
208 #define RQ_LOCAL_MASK ((RQ_LOCAL_ABORTED << 1)-1)
210 #define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
211 #define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
212 #define RQ_NET_SENT (1UL << __RQ_NET_SENT)
213 #define RQ_NET_DONE (1UL << __RQ_NET_DONE)
214 #define RQ_NET_OK (1UL << __RQ_NET_OK)
215 #define RQ_NET_SIS (1UL << __RQ_NET_SIS)
218 #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK)
220 #define RQ_WRITE (1UL << __RQ_WRITE)
221 #define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
223 /* For waking up the frozen transfer log mod_req() has to return if the request
224 should be counted in the epoch object*/
225 #define MR_WRITE_SHIFT 0
226 #define MR_WRITE (1 << MR_WRITE_SHIFT)
227 #define MR_READ_SHIFT 1
228 #define MR_READ (1 << MR_READ_SHIFT)
232 struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector)
234 BUG_ON(mdev->ee_hash_s == 0);
235 return mdev->ee_hash +
236 ((unsigned int)(sector>>HT_SHIFT) % mdev->ee_hash_s);
239 /* transfer log (drbd_request objects) */
241 struct hlist_head *tl_hash_slot(struct drbd_conf *mdev, sector_t sector)
243 BUG_ON(mdev->tl_hash_s == 0);
244 return mdev->tl_hash +
245 ((unsigned int)(sector>>HT_SHIFT) % mdev->tl_hash_s);
248 /* application reads (drbd_request objects) */
249 static struct hlist_head *ar_hash_slot(struct drbd_conf *mdev, sector_t sector)
251 return mdev->app_reads_hash
252 + ((unsigned int)(sector) % APP_R_HSIZE);
255 /* when we receive the answer for a read request,
256 * verify that we actually know about it */
257 static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev,
258 u64 id, sector_t sector)
260 struct hlist_head *slot = ar_hash_slot(mdev, sector);
261 struct hlist_node *n;
262 struct drbd_request *req;
264 hlist_for_each_entry(req, n, slot, collision) {
265 if ((unsigned long)req == (unsigned long)id) {
266 D_ASSERT(req->sector == sector);
273 static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src)
276 bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */
278 req->private_bio = bio;
280 bio->bi_private = req;
281 bio->bi_end_io = drbd_endio_pri;
285 static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
288 struct drbd_request *req =
289 mempool_alloc(drbd_request_mempool, GFP_NOIO);
291 drbd_req_make_private_bio(req, bio_src);
293 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
295 req->master_bio = bio_src;
297 req->sector = bio_src->bi_sector;
298 req->size = bio_src->bi_size;
299 INIT_HLIST_NODE(&req->collision);
300 INIT_LIST_HEAD(&req->tl_requests);
301 INIT_LIST_HEAD(&req->w.list);
306 static inline void drbd_req_free(struct drbd_request *req)
308 mempool_free(req, drbd_request_mempool);
311 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
313 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
316 /* Short lived temporary struct on the stack.
317 * We could squirrel the error to be returned into
318 * bio->bi_size, or similar. But that would be too ugly. */
319 struct bio_and_error {
324 extern void _req_may_be_done(struct drbd_request *req,
325 struct bio_and_error *m);
326 extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
327 struct bio_and_error *m);
328 extern void complete_master_bio(struct drbd_conf *mdev,
329 struct bio_and_error *m);
330 extern void request_timer_fn(unsigned long data);
331 extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
333 /* use this if you don't want to deal with calling complete_master_bio()
334 * outside the spinlock, e.g. when walking some list on cleanup. */
335 static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
337 struct drbd_conf *mdev = req->mdev;
338 struct bio_and_error m;
341 /* __req_mod possibly frees req, do not touch req after that! */
342 rv = __req_mod(req, what, &m);
344 complete_master_bio(mdev, &m);
349 /* completion of master bio is outside of our spinlock.
350 * We still may or may not be inside some irqs disabled section
351 * of the lower level driver completion callback, so we need to
352 * spin_lock_irqsave here. */
353 static inline int req_mod(struct drbd_request *req,
354 enum drbd_req_event what)
357 struct drbd_conf *mdev = req->mdev;
358 struct bio_and_error m;
361 spin_lock_irqsave(&mdev->req_lock, flags);
362 rv = __req_mod(req, what, &m);
363 spin_unlock_irqrestore(&mdev->req_lock, flags);
366 complete_master_bio(mdev, &m);
371 static inline bool drbd_should_do_remote(union drbd_state s)
373 return s.pdsk == D_UP_TO_DATE ||
374 (s.pdsk >= D_INCONSISTENT &&
375 s.conn >= C_WF_BITMAP_T &&
377 /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
378 That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
381 static inline bool drbd_should_send_oos(union drbd_state s)
383 return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
384 /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
385 since we enter state C_AHEAD only if proto >= 96 */