4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/version.h>
28 #include <linux/drbd.h>
29 #include <linux/sched.h>
30 #include <linux/smp_lock.h>
31 #include <linux/wait.h>
33 #include <linux/memcontrol.h>
34 #include <linux/mm_inline.h>
35 #include <linux/slab.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/scatterlist.h>
43 #define SLEEP_TIME (HZ/10)
45 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
55 * more endio handlers:
56 atodb_endio in drbd_actlog.c
57 drbd_bm_async_io_complete in drbd_bitmap.c
59 * For all these callbacks, note the following:
60 * The callbacks will be called in irq context by the IDE drivers,
61 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
62 * Try to get the locking right :)
67 /* About the global_state_lock
68 Each state transition on an device holds a read lock. In case we have
69 to evaluate the sync after dependencies, we grab a write lock, because
70 we need stable states on all devices for that. */
71 rwlock_t global_state_lock;
73 /* used for synchronous meta data and bitmap IO
74 * submitted by drbd_md_sync_page_io()
76 void drbd_md_io_complete(struct bio *bio, int error)
78 struct drbd_md_io *md_io;
80 md_io = (struct drbd_md_io *)bio->bi_private;
83 complete(&md_io->event);
86 /* reads on behalf of the partner,
87 * "submitted" by the receiver
89 void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
91 unsigned long flags = 0;
92 struct drbd_epoch_entry *e = NULL;
93 struct drbd_conf *mdev;
94 int uptodate = bio_flagged(bio, BIO_UPTODATE);
100 dev_warn(DEV, "read: error=%d s=%llus\n", error,
101 (unsigned long long)e->sector);
102 if (!error && !uptodate) {
103 dev_warn(DEV, "read: setting error to -EIO s=%llus\n",
104 (unsigned long long)e->sector);
105 /* strange behavior of some lower level drivers...
106 * fail the request by clearing the uptodate flag,
107 * but do not return any error?! */
111 D_ASSERT(e->block_id != ID_VACANT);
113 spin_lock_irqsave(&mdev->req_lock, flags);
114 mdev->read_cnt += e->size >> 9;
115 list_del(&e->w.list);
116 if (list_empty(&mdev->read_ee))
117 wake_up(&mdev->ee_wait);
118 spin_unlock_irqrestore(&mdev->req_lock, flags);
120 drbd_chk_io_error(mdev, error, FALSE);
121 drbd_queue_work(&mdev->data.work, &e->w);
125 /* writes on behalf of the partner, or resync writes,
126 * "submitted" by the receiver.
128 void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
130 unsigned long flags = 0;
131 struct drbd_epoch_entry *e = NULL;
132 struct drbd_conf *mdev;
136 int do_al_complete_io;
137 int uptodate = bio_flagged(bio, BIO_UPTODATE);
138 int is_barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
144 dev_warn(DEV, "write: error=%d s=%llus\n", error,
145 (unsigned long long)e->sector);
146 if (!error && !uptodate) {
147 dev_warn(DEV, "write: setting error to -EIO s=%llus\n",
148 (unsigned long long)e->sector);
149 /* strange behavior of some lower level drivers...
150 * fail the request by clearing the uptodate flag,
151 * but do not return any error?! */
155 /* error == -ENOTSUPP would be a better test,
156 * alas it is not reliable */
157 if (error && is_barrier && e->flags & EE_IS_BARRIER) {
158 drbd_bump_write_ordering(mdev, WO_bdev_flush);
159 spin_lock_irqsave(&mdev->req_lock, flags);
160 list_del(&e->w.list);
161 e->w.cb = w_e_reissue;
162 /* put_ldev actually happens below, once we come here again. */
164 spin_unlock_irqrestore(&mdev->req_lock, flags);
165 drbd_queue_work(&mdev->data.work, &e->w);
169 D_ASSERT(e->block_id != ID_VACANT);
171 spin_lock_irqsave(&mdev->req_lock, flags);
172 mdev->writ_cnt += e->size >> 9;
173 is_syncer_req = is_syncer_block_id(e->block_id);
175 /* after we moved e to done_ee,
176 * we may no longer access it,
177 * it may be freed/reused already!
178 * (as soon as we release the req_lock) */
179 e_sector = e->sector;
180 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
182 list_del(&e->w.list); /* has been on active_ee or sync_ee */
183 list_add_tail(&e->w.list, &mdev->done_ee);
185 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
186 * neither did we wake possibly waiting conflicting requests.
187 * done from "drbd_process_done_ee" within the appropriate w.cb
188 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
190 do_wake = is_syncer_req
191 ? list_empty(&mdev->sync_ee)
192 : list_empty(&mdev->active_ee);
195 __drbd_chk_io_error(mdev, FALSE);
196 spin_unlock_irqrestore(&mdev->req_lock, flags);
199 drbd_rs_complete_io(mdev, e_sector);
202 wake_up(&mdev->ee_wait);
204 if (do_al_complete_io)
205 drbd_al_complete_io(mdev, e_sector);
212 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
214 void drbd_endio_pri(struct bio *bio, int error)
217 struct drbd_request *req = bio->bi_private;
218 struct drbd_conf *mdev = req->mdev;
219 struct bio_and_error m;
220 enum drbd_req_event what;
221 int uptodate = bio_flagged(bio, BIO_UPTODATE);
224 dev_warn(DEV, "p %s: error=%d\n",
225 bio_data_dir(bio) == WRITE ? "write" : "read", error);
226 if (!error && !uptodate) {
227 dev_warn(DEV, "p %s: setting error to -EIO\n",
228 bio_data_dir(bio) == WRITE ? "write" : "read");
229 /* strange behavior of some lower level drivers...
230 * fail the request by clearing the uptodate flag,
231 * but do not return any error?! */
235 /* to avoid recursion in __req_mod */
236 if (unlikely(error)) {
237 what = (bio_data_dir(bio) == WRITE)
238 ? write_completed_with_error
239 : (bio_rw(bio) == READA)
240 ? read_completed_with_error
241 : read_ahead_completed_with_error;
245 bio_put(req->private_bio);
246 req->private_bio = ERR_PTR(error);
248 spin_lock_irqsave(&mdev->req_lock, flags);
249 __req_mod(req, what, &m);
250 spin_unlock_irqrestore(&mdev->req_lock, flags);
253 complete_master_bio(mdev, &m);
256 int w_io_error(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
258 struct drbd_request *req = container_of(w, struct drbd_request, w);
260 /* NOTE: mdev->ldev can be NULL by the time we get here! */
261 /* D_ASSERT(mdev->ldev->dc.on_io_error != EP_PASS_ON); */
263 /* the only way this callback is scheduled is from _req_may_be_done,
264 * when it is done and had a local write error, see comments there */
270 int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
272 struct drbd_request *req = container_of(w, struct drbd_request, w);
274 /* We should not detach for read io-error,
275 * but try to WRITE the P_DATA_REPLY to the failed location,
276 * to give the disk the chance to relocate that block */
278 spin_lock_irq(&mdev->req_lock);
280 mdev->state.conn < C_CONNECTED ||
281 mdev->state.pdsk <= D_INCONSISTENT) {
282 _req_mod(req, send_canceled);
283 spin_unlock_irq(&mdev->req_lock);
284 dev_alert(DEV, "WE ARE LOST. Local IO failure, no peer.\n");
287 spin_unlock_irq(&mdev->req_lock);
289 return w_send_read_req(mdev, w, 0);
292 int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
294 ERR_IF(cancel) return 1;
295 dev_err(DEV, "resync inactive, but callback triggered??\n");
296 return 1; /* Simply ignore this! */
299 void drbd_csum(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
301 struct hash_desc desc;
302 struct scatterlist sg;
303 struct bio_vec *bvec;
309 sg_init_table(&sg, 1);
310 crypto_hash_init(&desc);
312 __bio_for_each_segment(bvec, bio, i, 0) {
313 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
314 crypto_hash_update(&desc, &sg, sg.length);
316 crypto_hash_final(&desc, digest);
319 static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
321 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
326 D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
328 if (unlikely(cancel)) {
329 drbd_free_ee(mdev, e);
333 if (likely(drbd_bio_uptodate(e->private_bio))) {
334 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
335 digest = kmalloc(digest_size, GFP_NOIO);
337 drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
339 inc_rs_pending(mdev);
340 ok = drbd_send_drequest_csum(mdev,
348 dev_err(DEV, "kmalloc() of digest failed.\n");
354 drbd_free_ee(mdev, e);
357 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
361 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
363 static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
365 struct drbd_epoch_entry *e;
370 /* GFP_TRY, because if there is no memory available right now, this may
371 * be rescheduled for later. It is "only" background resync, after all. */
372 e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
378 spin_lock_irq(&mdev->req_lock);
379 list_add(&e->w.list, &mdev->read_ee);
380 spin_unlock_irq(&mdev->req_lock);
382 e->private_bio->bi_end_io = drbd_endio_read_sec;
383 e->private_bio->bi_rw = READ;
384 e->w.cb = w_e_send_csum;
386 mdev->read_cnt += size >> 9;
387 drbd_generic_make_request(mdev, DRBD_FAULT_RS_RD, e->private_bio);
392 void resync_timer_fn(unsigned long data)
395 struct drbd_conf *mdev = (struct drbd_conf *) data;
398 spin_lock_irqsave(&mdev->req_lock, flags);
400 if (likely(!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))) {
402 if (mdev->state.conn == C_VERIFY_S)
403 mdev->resync_work.cb = w_make_ov_request;
405 mdev->resync_work.cb = w_make_resync_request;
408 mdev->resync_work.cb = w_resync_inactive;
411 spin_unlock_irqrestore(&mdev->req_lock, flags);
413 /* harmless race: list_empty outside data.work.q_lock */
414 if (list_empty(&mdev->resync_work.list) && queue)
415 drbd_queue_work(&mdev->data.work, &mdev->resync_work);
418 int w_make_resync_request(struct drbd_conf *mdev,
419 struct drbd_work *w, int cancel)
423 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
424 int max_segment_size = queue_max_segment_size(mdev->rq_queue);
425 int number, i, size, pe, mx;
426 int align, queued, sndbuf;
428 if (unlikely(cancel))
431 if (unlikely(mdev->state.conn < C_CONNECTED)) {
432 dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
436 if (mdev->state.conn != C_SYNC_TARGET)
437 dev_err(DEV, "%s in w_make_resync_request\n",
438 drbd_conn_str(mdev->state.conn));
440 if (!get_ldev(mdev)) {
441 /* Since we only need to access mdev->rsync a
442 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
443 to continue resync with a broken disk makes no sense at
445 dev_err(DEV, "Disk broke down during resync!\n");
446 mdev->resync_work.cb = w_resync_inactive;
450 number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
451 pe = atomic_read(&mdev->rs_pending_cnt);
453 mutex_lock(&mdev->data.mutex);
454 if (mdev->data.socket)
455 mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
458 mutex_unlock(&mdev->data.mutex);
460 /* For resync rates >160MB/sec, allow more pending RS requests */
464 /* Limit the number of pending RS requests to no more than the peer's receive buffer */
465 if ((pe + number) > mx) {
469 for (i = 0; i < number; i++) {
470 /* Stop generating RS requests, when half of the send buffer is filled */
471 mutex_lock(&mdev->data.mutex);
472 if (mdev->data.socket) {
473 queued = mdev->data.socket->sk->sk_wmem_queued;
474 sndbuf = mdev->data.socket->sk->sk_sndbuf;
479 mutex_unlock(&mdev->data.mutex);
480 if (queued > sndbuf / 2)
484 size = BM_BLOCK_SIZE;
485 bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
488 mdev->bm_resync_fo = drbd_bm_bits(mdev);
489 mdev->resync_work.cb = w_resync_inactive;
494 sector = BM_BIT_TO_SECT(bit);
496 if (drbd_try_rs_begin_io(mdev, sector)) {
497 mdev->bm_resync_fo = bit;
500 mdev->bm_resync_fo = bit + 1;
502 if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
503 drbd_rs_complete_io(mdev, sector);
507 #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
508 /* try to find some adjacent bits.
509 * we stop if we have already the maximum req size.
511 * Additionally always align bigger requests, in order to
512 * be prepared for all stripe sizes of software RAIDs.
514 * we _do_ care about the agreed-upon q->max_segment_size
515 * here, as splitting up the requests on the other side is more
516 * difficult. the consequence is, that on lvm and md and other
517 * "indirect" devices, this is dead code, since
518 * q->max_segment_size will be PAGE_SIZE.
522 if (size + BM_BLOCK_SIZE > max_segment_size)
525 /* Be always aligned */
526 if (sector & ((1<<(align+3))-1))
529 /* do not cross extent boundaries */
530 if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
532 /* now, is it actually dirty, after all?
533 * caution, drbd_bm_test_bit is tri-state for some
534 * obscure reason; ( b == 0 ) would get the out-of-band
535 * only accidentally right because of the "oddly sized"
536 * adjustment below */
537 if (drbd_bm_test_bit(mdev, bit+1) != 1)
540 size += BM_BLOCK_SIZE;
541 if ((BM_BLOCK_SIZE << align) <= size)
545 /* if we merged some,
546 * reset the offset to start the next drbd_bm_find_next from */
547 if (size > BM_BLOCK_SIZE)
548 mdev->bm_resync_fo = bit + 1;
551 /* adjust very last sectors, in case we are oddly sized */
552 if (sector + (size>>9) > capacity)
553 size = (capacity-sector)<<9;
554 if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
555 switch (read_for_csum(mdev, sector, size)) {
556 case 0: /* Disk failure*/
559 case 2: /* Allocation failed */
560 drbd_rs_complete_io(mdev, sector);
561 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
563 /* case 1: everything ok */
566 inc_rs_pending(mdev);
567 if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
568 sector, size, ID_SYNCER)) {
569 dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
570 dec_rs_pending(mdev);
577 if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
578 /* last syncer _request_ was sent,
579 * but the P_RS_DATA_REPLY not yet received. sync will end (and
580 * next sync group will resume), as soon as we receive the last
581 * resync data block, and the last bit is cleared.
582 * until then resync "work" is "inactive" ...
584 mdev->resync_work.cb = w_resync_inactive;
590 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
595 static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
599 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
601 if (unlikely(cancel))
604 if (unlikely(mdev->state.conn < C_CONNECTED)) {
605 dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
609 number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
610 if (atomic_read(&mdev->rs_pending_cnt) > number)
613 number -= atomic_read(&mdev->rs_pending_cnt);
615 sector = mdev->ov_position;
616 for (i = 0; i < number; i++) {
617 if (sector >= capacity) {
618 mdev->resync_work.cb = w_resync_inactive;
622 size = BM_BLOCK_SIZE;
624 if (drbd_try_rs_begin_io(mdev, sector)) {
625 mdev->ov_position = sector;
629 if (sector + (size>>9) > capacity)
630 size = (capacity-sector)<<9;
632 inc_rs_pending(mdev);
633 if (!drbd_send_ov_request(mdev, sector, size)) {
634 dec_rs_pending(mdev);
637 sector += BM_SECT_PER_BIT;
639 mdev->ov_position = sector;
642 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
647 int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
651 drbd_resync_finished(mdev);
656 static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
660 drbd_resync_finished(mdev);
665 int drbd_resync_finished(struct drbd_conf *mdev)
667 unsigned long db, dt, dbdt;
669 union drbd_state os, ns;
671 char *khelper_cmd = NULL;
673 /* Remove all elements from the resync LRU. Since future actions
674 * might set bits in the (main) bitmap, then the entries in the
675 * resync LRU would be wrong. */
676 if (drbd_rs_del_all(mdev)) {
677 /* In case this is not possible now, most probably because
678 * there are P_RS_DATA_REPLY Packets lingering on the worker's
679 * queue (or even the read operations for those packets
680 * is not finished by now). Retry in 100ms. */
683 __set_current_state(TASK_INTERRUPTIBLE);
684 schedule_timeout(HZ / 10);
685 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
687 w->cb = w_resync_finished;
688 drbd_queue_work(&mdev->data.work, w);
691 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
694 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
698 dbdt = Bit2KB(db/dt);
699 mdev->rs_paused /= HZ;
704 spin_lock_irq(&mdev->req_lock);
707 /* This protects us against multiple calls (that can happen in the presence
708 of application IO), and against connectivity loss just before we arrive here. */
709 if (os.conn <= C_CONNECTED)
713 ns.conn = C_CONNECTED;
715 dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
716 (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ?
717 "Online verify " : "Resync",
718 dt + mdev->rs_paused, mdev->rs_paused, dbdt);
720 n_oos = drbd_bm_total_weight(mdev);
722 if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
724 dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
726 khelper_cmd = "out-of-sync";
729 D_ASSERT((n_oos - mdev->rs_failed) == 0);
731 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
732 khelper_cmd = "after-resync-target";
734 if (mdev->csums_tfm && mdev->rs_total) {
735 const unsigned long s = mdev->rs_same_csum;
736 const unsigned long t = mdev->rs_total;
739 (t < 100000) ? ((s*100)/t) : (s/(t/100));
740 dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; "
741 "transferred %luK total %luK\n",
743 Bit2KB(mdev->rs_same_csum),
744 Bit2KB(mdev->rs_total - mdev->rs_same_csum),
745 Bit2KB(mdev->rs_total));
749 if (mdev->rs_failed) {
750 dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
752 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
753 ns.disk = D_INCONSISTENT;
754 ns.pdsk = D_UP_TO_DATE;
756 ns.disk = D_UP_TO_DATE;
757 ns.pdsk = D_INCONSISTENT;
760 ns.disk = D_UP_TO_DATE;
761 ns.pdsk = D_UP_TO_DATE;
763 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
766 for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
767 _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
768 drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
769 _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
771 dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
775 drbd_uuid_set_bm(mdev, 0UL);
778 /* Now the two UUID sets are equal, update what we
779 * know of the peer. */
781 for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
782 mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
786 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
788 spin_unlock_irq(&mdev->req_lock);
794 mdev->ov_start_sector = 0;
796 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
797 dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
798 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
802 drbd_khelper(mdev, khelper_cmd);
808 static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
810 if (drbd_bio_has_active_page(e->private_bio)) {
811 /* This might happen if sendpage() has not finished */
812 spin_lock_irq(&mdev->req_lock);
813 list_add_tail(&e->w.list, &mdev->net_ee);
814 spin_unlock_irq(&mdev->req_lock);
816 drbd_free_ee(mdev, e);
820 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
821 * @mdev: DRBD device.
823 * @cancel: The connection will be closed anyways
825 int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
827 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
830 if (unlikely(cancel)) {
831 drbd_free_ee(mdev, e);
836 if (likely(drbd_bio_uptodate(e->private_bio))) {
837 ok = drbd_send_block(mdev, P_DATA_REPLY, e);
839 if (__ratelimit(&drbd_ratelimit_state))
840 dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
841 (unsigned long long)e->sector);
843 ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
848 move_to_net_ee_or_free(mdev, e);
851 dev_err(DEV, "drbd_send_block() failed\n");
856 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
857 * @mdev: DRBD device.
859 * @cancel: The connection will be closed anyways
861 int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
863 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
866 if (unlikely(cancel)) {
867 drbd_free_ee(mdev, e);
872 if (get_ldev_if_state(mdev, D_FAILED)) {
873 drbd_rs_complete_io(mdev, e->sector);
877 if (likely(drbd_bio_uptodate(e->private_bio))) {
878 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
879 inc_rs_pending(mdev);
880 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
882 if (__ratelimit(&drbd_ratelimit_state))
883 dev_err(DEV, "Not sending RSDataReply, "
884 "partner DISKLESS!\n");
888 if (__ratelimit(&drbd_ratelimit_state))
889 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
890 (unsigned long long)e->sector);
892 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
894 /* update resync data with failure */
895 drbd_rs_failed_io(mdev, e->sector, e->size);
900 move_to_net_ee_or_free(mdev, e);
903 dev_err(DEV, "drbd_send_block() failed\n");
907 int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
909 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
910 struct digest_info *di;
915 if (unlikely(cancel)) {
916 drbd_free_ee(mdev, e);
921 drbd_rs_complete_io(mdev, e->sector);
923 di = (struct digest_info *)(unsigned long)e->block_id;
925 if (likely(drbd_bio_uptodate(e->private_bio))) {
926 /* quick hack to try to avoid a race against reconfiguration.
927 * a real fix would be much more involved,
928 * introducing more locking mechanisms */
929 if (mdev->csums_tfm) {
930 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
931 D_ASSERT(digest_size == di->digest_size);
932 digest = kmalloc(digest_size, GFP_NOIO);
935 drbd_csum(mdev, mdev->csums_tfm, e->private_bio, digest);
936 eq = !memcmp(digest, di->digest, digest_size);
941 drbd_set_in_sync(mdev, e->sector, e->size);
942 mdev->rs_same_csum++;
943 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
945 inc_rs_pending(mdev);
946 e->block_id = ID_SYNCER;
947 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
950 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
951 if (__ratelimit(&drbd_ratelimit_state))
952 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
959 move_to_net_ee_or_free(mdev, e);
962 dev_err(DEV, "drbd_send_block/ack() failed\n");
966 int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
968 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
973 if (unlikely(cancel))
976 if (unlikely(!drbd_bio_uptodate(e->private_bio)))
979 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
980 /* FIXME if this allocation fails, online verify will not terminate! */
981 digest = kmalloc(digest_size, GFP_NOIO);
983 drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
984 inc_rs_pending(mdev);
985 ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
986 digest, digest_size, P_OV_REPLY);
988 dec_rs_pending(mdev);
993 drbd_free_ee(mdev, e);
1000 void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
1002 if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
1003 mdev->ov_last_oos_size += size>>9;
1005 mdev->ov_last_oos_start = sector;
1006 mdev->ov_last_oos_size = size>>9;
1008 drbd_set_out_of_sync(mdev, sector, size);
1009 set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
1012 int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1014 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1015 struct digest_info *di;
1020 if (unlikely(cancel)) {
1021 drbd_free_ee(mdev, e);
1026 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1027 * the resync lru has been cleaned up already */
1028 drbd_rs_complete_io(mdev, e->sector);
1030 di = (struct digest_info *)(unsigned long)e->block_id;
1032 if (likely(drbd_bio_uptodate(e->private_bio))) {
1033 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1034 digest = kmalloc(digest_size, GFP_NOIO);
1036 drbd_csum(mdev, mdev->verify_tfm, e->private_bio, digest);
1038 D_ASSERT(digest_size == di->digest_size);
1039 eq = !memcmp(digest, di->digest, digest_size);
1043 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1044 if (__ratelimit(&drbd_ratelimit_state))
1045 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1053 drbd_ov_oos_found(mdev, e->sector, e->size);
1057 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size,
1058 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1060 drbd_free_ee(mdev, e);
1062 if (--mdev->ov_left == 0) {
1064 drbd_resync_finished(mdev);
1070 int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1072 struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1077 int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1079 struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1080 struct p_barrier *p = &mdev->data.sbuf.barrier;
1083 /* really avoid racing with tl_clear. w.cb may have been referenced
1084 * just before it was reassigned and re-queued, so double check that.
1085 * actually, this race was harmless, since we only try to send the
1086 * barrier packet here, and otherwise do nothing with the object.
1087 * but compare with the head of w_clear_epoch */
1088 spin_lock_irq(&mdev->req_lock);
1089 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1091 spin_unlock_irq(&mdev->req_lock);
1095 if (!drbd_get_data_sock(mdev))
1097 p->barrier = b->br_number;
1098 /* inc_ap_pending was done where this was queued.
1099 * dec_ap_pending will be done in got_BarrierAck
1100 * or (on connection loss) in w_clear_epoch. */
1101 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
1102 (struct p_header *)p, sizeof(*p), 0);
1103 drbd_put_data_sock(mdev);
1108 int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1112 return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
1116 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1117 * @mdev: DRBD device.
1119 * @cancel: The connection will be closed anyways
1121 int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1123 struct drbd_request *req = container_of(w, struct drbd_request, w);
1126 if (unlikely(cancel)) {
1127 req_mod(req, send_canceled);
1131 ok = drbd_send_dblock(mdev, req);
1132 req_mod(req, ok ? handed_over_to_network : send_failed);
1138 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1139 * @mdev: DRBD device.
1141 * @cancel: The connection will be closed anyways
1143 int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1145 struct drbd_request *req = container_of(w, struct drbd_request, w);
1148 if (unlikely(cancel)) {
1149 req_mod(req, send_canceled);
1153 ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
1154 (unsigned long)req);
1157 /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
1158 * so this is probably redundant */
1159 if (mdev->state.conn >= C_CONNECTED)
1160 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
1162 req_mod(req, ok ? handed_over_to_network : send_failed);
1167 static int _drbd_may_sync_now(struct drbd_conf *mdev)
1169 struct drbd_conf *odev = mdev;
1172 if (odev->sync_conf.after == -1)
1174 odev = minor_to_mdev(odev->sync_conf.after);
1175 ERR_IF(!odev) return 1;
1176 if ((odev->state.conn >= C_SYNC_SOURCE &&
1177 odev->state.conn <= C_PAUSED_SYNC_T) ||
1178 odev->state.aftr_isp || odev->state.peer_isp ||
1179 odev->state.user_isp)
1185 * _drbd_pause_after() - Pause resync on all devices that may not resync now
1186 * @mdev: DRBD device.
1188 * Called from process context only (admin command and after_state_ch).
1190 static int _drbd_pause_after(struct drbd_conf *mdev)
1192 struct drbd_conf *odev;
1195 for (i = 0; i < minor_count; i++) {
1196 odev = minor_to_mdev(i);
1199 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1201 if (!_drbd_may_sync_now(odev))
1202 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
1203 != SS_NOTHING_TO_DO);
1210 * _drbd_resume_next() - Resume resync on all devices that may resync now
1211 * @mdev: DRBD device.
1213 * Called from process context only (admin command and worker).
1215 static int _drbd_resume_next(struct drbd_conf *mdev)
1217 struct drbd_conf *odev;
1220 for (i = 0; i < minor_count; i++) {
1221 odev = minor_to_mdev(i);
1224 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1226 if (odev->state.aftr_isp) {
1227 if (_drbd_may_sync_now(odev))
1228 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
1230 != SS_NOTHING_TO_DO) ;
1236 void resume_next_sg(struct drbd_conf *mdev)
1238 write_lock_irq(&global_state_lock);
1239 _drbd_resume_next(mdev);
1240 write_unlock_irq(&global_state_lock);
1243 void suspend_other_sg(struct drbd_conf *mdev)
1245 write_lock_irq(&global_state_lock);
1246 _drbd_pause_after(mdev);
1247 write_unlock_irq(&global_state_lock);
1250 static int sync_after_error(struct drbd_conf *mdev, int o_minor)
1252 struct drbd_conf *odev;
1256 if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
1257 return ERR_SYNC_AFTER;
1259 /* check for loops */
1260 odev = minor_to_mdev(o_minor);
1263 return ERR_SYNC_AFTER_CYCLE;
1265 /* dependency chain ends here, no cycles. */
1266 if (odev->sync_conf.after == -1)
1269 /* follow the dependency chain */
1270 odev = minor_to_mdev(odev->sync_conf.after);
1274 int drbd_alter_sa(struct drbd_conf *mdev, int na)
1279 write_lock_irq(&global_state_lock);
1280 retcode = sync_after_error(mdev, na);
1281 if (retcode == NO_ERROR) {
1282 mdev->sync_conf.after = na;
1284 changes = _drbd_pause_after(mdev);
1285 changes |= _drbd_resume_next(mdev);
1288 write_unlock_irq(&global_state_lock);
1293 * drbd_start_resync() - Start the resync process
1294 * @mdev: DRBD device.
1295 * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1297 * This function might bring you directly into one of the
1298 * C_PAUSED_SYNC_* states.
1300 void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1302 union drbd_state ns;
1305 if (mdev->state.conn >= C_SYNC_SOURCE) {
1306 dev_err(DEV, "Resync already running!\n");
1310 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1311 drbd_rs_cancel_all(mdev);
1313 if (side == C_SYNC_TARGET) {
1314 /* Since application IO was locked out during C_WF_BITMAP_T and
1315 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1316 we check that we might make the data inconsistent. */
1317 r = drbd_khelper(mdev, "before-resync-target");
1318 r = (r >> 8) & 0xff;
1320 dev_info(DEV, "before-resync-target handler returned %d, "
1321 "dropping connection.\n", r);
1322 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1327 drbd_state_lock(mdev);
1329 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1330 drbd_state_unlock(mdev);
1334 if (side == C_SYNC_TARGET) {
1335 mdev->bm_resync_fo = 0;
1336 } else /* side == C_SYNC_SOURCE */ {
1339 get_random_bytes(&uuid, sizeof(u64));
1340 drbd_uuid_set(mdev, UI_BITMAP, uuid);
1341 drbd_send_sync_uuid(mdev, uuid);
1343 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1346 write_lock_irq(&global_state_lock);
1349 ns.aftr_isp = !_drbd_may_sync_now(mdev);
1353 if (side == C_SYNC_TARGET)
1354 ns.disk = D_INCONSISTENT;
1355 else /* side == C_SYNC_SOURCE */
1356 ns.pdsk = D_INCONSISTENT;
1358 r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1361 if (ns.conn < C_CONNECTED)
1362 r = SS_UNKNOWN_ERROR;
1364 if (r == SS_SUCCESS) {
1366 mdev->rs_mark_left = drbd_bm_total_weight(mdev);
1367 mdev->rs_failed = 0;
1368 mdev->rs_paused = 0;
1370 mdev->rs_mark_time = jiffies;
1371 mdev->rs_same_csum = 0;
1372 _drbd_pause_after(mdev);
1374 write_unlock_irq(&global_state_lock);
1375 drbd_state_unlock(mdev);
1378 if (r == SS_SUCCESS) {
1379 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1380 drbd_conn_str(ns.conn),
1381 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1382 (unsigned long) mdev->rs_total);
1384 if (mdev->rs_total == 0) {
1385 /* Peer still reachable? Beware of failing before-resync-target handlers! */
1387 __set_current_state(TASK_INTERRUPTIBLE);
1388 schedule_timeout(mdev->net_conf->ping_timeo*HZ/9); /* 9 instead 10 */
1389 drbd_resync_finished(mdev);
1393 /* ns.conn may already be != mdev->state.conn,
1394 * we may have been paused in between, or become paused until
1395 * the timer triggers.
1396 * No matter, that is handled in resync_timer_fn() */
1397 if (ns.conn == C_SYNC_TARGET)
1398 mod_timer(&mdev->resync_timer, jiffies);
1404 int drbd_worker(struct drbd_thread *thi)
1406 struct drbd_conf *mdev = thi->mdev;
1407 struct drbd_work *w = NULL;
1408 LIST_HEAD(work_list);
1411 sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
1413 while (get_t_state(thi) == Running) {
1414 drbd_thread_current_set_cpu(mdev);
1416 if (down_trylock(&mdev->data.work.s)) {
1417 mutex_lock(&mdev->data.mutex);
1418 if (mdev->data.socket && !mdev->net_conf->no_cork)
1419 drbd_tcp_uncork(mdev->data.socket);
1420 mutex_unlock(&mdev->data.mutex);
1422 intr = down_interruptible(&mdev->data.work.s);
1424 mutex_lock(&mdev->data.mutex);
1425 if (mdev->data.socket && !mdev->net_conf->no_cork)
1426 drbd_tcp_cork(mdev->data.socket);
1427 mutex_unlock(&mdev->data.mutex);
1431 D_ASSERT(intr == -EINTR);
1432 flush_signals(current);
1433 ERR_IF (get_t_state(thi) == Running)
1438 if (get_t_state(thi) != Running)
1440 /* With this break, we have done a down() but not consumed
1441 the entry from the list. The cleanup code takes care of
1445 spin_lock_irq(&mdev->data.work.q_lock);
1446 ERR_IF(list_empty(&mdev->data.work.q)) {
1447 /* something terribly wrong in our logic.
1448 * we were able to down() the semaphore,
1449 * but the list is empty... doh.
1451 * what is the best thing to do now?
1452 * try again from scratch, restarting the receiver,
1453 * asender, whatnot? could break even more ugly,
1454 * e.g. when we are primary, but no good local data.
1456 * I'll try to get away just starting over this loop.
1458 spin_unlock_irq(&mdev->data.work.q_lock);
1461 w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
1462 list_del_init(&w->list);
1463 spin_unlock_irq(&mdev->data.work.q_lock);
1465 if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
1466 /* dev_warn(DEV, "worker: a callback failed! \n"); */
1467 if (mdev->state.conn >= C_CONNECTED)
1468 drbd_force_state(mdev,
1469 NS(conn, C_NETWORK_FAILURE));
1472 D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
1473 D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
1475 spin_lock_irq(&mdev->data.work.q_lock);
1477 while (!list_empty(&mdev->data.work.q)) {
1478 list_splice_init(&mdev->data.work.q, &work_list);
1479 spin_unlock_irq(&mdev->data.work.q_lock);
1481 while (!list_empty(&work_list)) {
1482 w = list_entry(work_list.next, struct drbd_work, list);
1483 list_del_init(&w->list);
1485 i++; /* dead debugging code */
1488 spin_lock_irq(&mdev->data.work.q_lock);
1490 sema_init(&mdev->data.work.s, 0);
1491 /* DANGEROUS race: if someone did queue his work within the spinlock,
1492 * but up() ed outside the spinlock, we could get an up() on the
1493 * semaphore without corresponding list entry.
1496 spin_unlock_irq(&mdev->data.work.q_lock);
1498 D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1499 /* _drbd_set_state only uses stop_nowait.
1500 * wait here for the Exiting receiver. */
1501 drbd_thread_stop(&mdev->receiver);
1502 drbd_mdev_cleanup(mdev);
1504 dev_info(DEV, "worker terminated\n");
1506 clear_bit(DEVICE_DYING, &mdev->flags);
1507 clear_bit(CONFIG_PENDING, &mdev->flags);
1508 wake_up(&mdev->state_wait);