4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/sched.h>
29 #include <linux/wait.h>
31 #include <linux/memcontrol.h>
32 #include <linux/mm_inline.h>
33 #include <linux/slab.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/scatterlist.h>
41 static int w_make_ov_request(struct drbd_work *w, int cancel);
42 static int w_make_resync_request(struct drbd_work *w, int cancel);
47 * drbd_md_io_complete (defined here)
48 * drbd_request_endio (defined here)
49 * drbd_peer_request_endio (defined here)
50 * bm_async_io_complete (defined in drbd_bitmap.c)
52 * For all these callbacks, note the following:
53 * The callbacks will be called in irq context by the IDE drivers,
54 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
55 * Try to get the locking right :)
60 /* About the global_state_lock
61 Each state transition on an device holds a read lock. In case we have
62 to evaluate the sync after dependencies, we grab a write lock, because
63 we need stable states on all devices for that. */
64 rwlock_t global_state_lock;
66 /* used for synchronous meta data and bitmap IO
67 * submitted by drbd_md_sync_page_io()
69 void drbd_md_io_complete(struct bio *bio, int error)
71 struct drbd_md_io *md_io;
73 md_io = (struct drbd_md_io *)bio->bi_private;
76 complete(&md_io->event);
79 /* reads on behalf of the partner,
80 * "submitted" by the receiver
82 void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
84 unsigned long flags = 0;
85 struct drbd_conf *mdev = peer_req->w.mdev;
87 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
88 mdev->read_cnt += peer_req->i.size >> 9;
89 list_del(&peer_req->w.list);
90 if (list_empty(&mdev->read_ee))
91 wake_up(&mdev->ee_wait);
92 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
93 __drbd_chk_io_error(mdev, false);
94 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
96 drbd_queue_work(&mdev->tconn->data.work, &peer_req->w);
100 /* writes on behalf of the partner, or resync writes,
101 * "submitted" by the receiver, final stage. */
102 static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
104 unsigned long flags = 0;
105 struct drbd_conf *mdev = peer_req->w.mdev;
109 int do_al_complete_io;
111 /* after we moved peer_req to done_ee,
112 * we may no longer access it,
113 * it may be freed/reused already!
114 * (as soon as we release the req_lock) */
115 e_sector = peer_req->i.sector;
116 do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
117 block_id = peer_req->block_id;
119 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
120 mdev->writ_cnt += peer_req->i.size >> 9;
121 list_del(&peer_req->w.list); /* has been on active_ee or sync_ee */
122 list_add_tail(&peer_req->w.list, &mdev->done_ee);
125 * Do not remove from the write_requests tree here: we did not send the
126 * Ack yet and did not wake possibly waiting conflicting requests.
127 * Removed from the tree from "drbd_process_done_ee" within the
128 * appropriate w.cb (e_end_block/e_end_resync_block) or from
129 * _drbd_clear_done_ee.
132 do_wake = list_empty(block_id == ID_SYNCER ? &mdev->sync_ee : &mdev->active_ee);
134 if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
135 __drbd_chk_io_error(mdev, false);
136 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
138 if (block_id == ID_SYNCER)
139 drbd_rs_complete_io(mdev, e_sector);
142 wake_up(&mdev->ee_wait);
144 if (do_al_complete_io)
145 drbd_al_complete_io(mdev, e_sector);
147 wake_asender(mdev->tconn);
151 /* writes on behalf of the partner, or resync writes,
152 * "submitted" by the receiver.
154 void drbd_peer_request_endio(struct bio *bio, int error)
156 struct drbd_peer_request *peer_req = bio->bi_private;
157 struct drbd_conf *mdev = peer_req->w.mdev;
158 int uptodate = bio_flagged(bio, BIO_UPTODATE);
159 int is_write = bio_data_dir(bio) == WRITE;
161 if (error && __ratelimit(&drbd_ratelimit_state))
162 dev_warn(DEV, "%s: error=%d s=%llus\n",
163 is_write ? "write" : "read", error,
164 (unsigned long long)peer_req->i.sector);
165 if (!error && !uptodate) {
166 if (__ratelimit(&drbd_ratelimit_state))
167 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
168 is_write ? "write" : "read",
169 (unsigned long long)peer_req->i.sector);
170 /* strange behavior of some lower level drivers...
171 * fail the request by clearing the uptodate flag,
172 * but do not return any error?! */
177 set_bit(__EE_WAS_ERROR, &peer_req->flags);
179 bio_put(bio); /* no need for the bio anymore */
180 if (atomic_dec_and_test(&peer_req->pending_bios)) {
182 drbd_endio_write_sec_final(peer_req);
184 drbd_endio_read_sec_final(peer_req);
188 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
190 void drbd_request_endio(struct bio *bio, int error)
193 struct drbd_request *req = bio->bi_private;
194 struct drbd_conf *mdev = req->w.mdev;
195 struct bio_and_error m;
196 enum drbd_req_event what;
197 int uptodate = bio_flagged(bio, BIO_UPTODATE);
199 if (!error && !uptodate) {
200 dev_warn(DEV, "p %s: setting error to -EIO\n",
201 bio_data_dir(bio) == WRITE ? "write" : "read");
202 /* strange behavior of some lower level drivers...
203 * fail the request by clearing the uptodate flag,
204 * but do not return any error?! */
208 /* to avoid recursion in __req_mod */
209 if (unlikely(error)) {
210 what = (bio_data_dir(bio) == WRITE)
211 ? WRITE_COMPLETED_WITH_ERROR
212 : (bio_rw(bio) == READ)
213 ? READ_COMPLETED_WITH_ERROR
214 : READ_AHEAD_COMPLETED_WITH_ERROR;
218 bio_put(req->private_bio);
219 req->private_bio = ERR_PTR(error);
221 /* not req_mod(), we need irqsave here! */
222 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
223 __req_mod(req, what, &m);
224 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
227 complete_master_bio(mdev, &m);
230 int w_read_retry_remote(struct drbd_work *w, int cancel)
232 struct drbd_request *req = container_of(w, struct drbd_request, w);
233 struct drbd_conf *mdev = w->mdev;
235 /* We should not detach for read io-error,
236 * but try to WRITE the P_DATA_REPLY to the failed location,
237 * to give the disk the chance to relocate that block */
239 spin_lock_irq(&mdev->tconn->req_lock);
240 if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
241 _req_mod(req, READ_RETRY_REMOTE_CANCELED);
242 spin_unlock_irq(&mdev->tconn->req_lock);
245 spin_unlock_irq(&mdev->tconn->req_lock);
247 return w_send_read_req(w, 0);
250 void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
251 struct drbd_peer_request *peer_req, void *digest)
253 struct hash_desc desc;
254 struct scatterlist sg;
255 struct page *page = peer_req->pages;
262 sg_init_table(&sg, 1);
263 crypto_hash_init(&desc);
265 while ((tmp = page_chain_next(page))) {
266 /* all but the last page will be fully used */
267 sg_set_page(&sg, page, PAGE_SIZE, 0);
268 crypto_hash_update(&desc, &sg, sg.length);
271 /* and now the last, possibly only partially used page */
272 len = peer_req->i.size & (PAGE_SIZE - 1);
273 sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
274 crypto_hash_update(&desc, &sg, sg.length);
275 crypto_hash_final(&desc, digest);
278 void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
280 struct hash_desc desc;
281 struct scatterlist sg;
282 struct bio_vec *bvec;
288 sg_init_table(&sg, 1);
289 crypto_hash_init(&desc);
291 __bio_for_each_segment(bvec, bio, i, 0) {
292 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
293 crypto_hash_update(&desc, &sg, sg.length);
295 crypto_hash_final(&desc, digest);
298 /* MAYBE merge common code with w_e_end_ov_req */
299 static int w_e_send_csum(struct drbd_work *w, int cancel)
301 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
302 struct drbd_conf *mdev = w->mdev;
307 if (unlikely(cancel))
310 if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
313 digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
314 digest = kmalloc(digest_size, GFP_NOIO);
316 sector_t sector = peer_req->i.sector;
317 unsigned int size = peer_req->i.size;
318 drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
319 /* Free peer_req and pages before send.
320 * In case we block on congestion, we could otherwise run into
321 * some distributed deadlock, if the other side blocks on
322 * congestion as well, because our receiver blocks in
323 * drbd_pp_alloc due to pp_in_use > max_buffers. */
324 drbd_free_ee(mdev, peer_req);
326 inc_rs_pending(mdev);
327 ok = drbd_send_drequest_csum(mdev, sector, size,
332 dev_err(DEV, "kmalloc() of digest failed.\n");
338 drbd_free_ee(mdev, peer_req);
341 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
345 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
347 static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
349 struct drbd_peer_request *peer_req;
354 if (drbd_rs_should_slow_down(mdev, sector))
357 /* GFP_TRY, because if there is no memory available right now, this may
358 * be rescheduled for later. It is "only" background resync, after all. */
359 peer_req = drbd_alloc_ee(mdev, ID_SYNCER /* unused */, sector, size, GFP_TRY);
363 peer_req->w.cb = w_e_send_csum;
364 spin_lock_irq(&mdev->tconn->req_lock);
365 list_add(&peer_req->w.list, &mdev->read_ee);
366 spin_unlock_irq(&mdev->tconn->req_lock);
368 atomic_add(size >> 9, &mdev->rs_sect_ev);
369 if (drbd_submit_peer_request(mdev, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
372 /* If it failed because of ENOMEM, retry should help. If it failed
373 * because bio_add_page failed (probably broken lower level driver),
374 * retry may or may not help.
375 * If it does not, you may need to force disconnect. */
376 spin_lock_irq(&mdev->tconn->req_lock);
377 list_del(&peer_req->w.list);
378 spin_unlock_irq(&mdev->tconn->req_lock);
380 drbd_free_ee(mdev, peer_req);
386 int w_resync_timer(struct drbd_work *w, int cancel)
388 struct drbd_conf *mdev = w->mdev;
389 switch (mdev->state.conn) {
391 w_make_ov_request(w, cancel);
394 w_make_resync_request(w, cancel);
401 void resync_timer_fn(unsigned long data)
403 struct drbd_conf *mdev = (struct drbd_conf *) data;
405 if (list_empty(&mdev->resync_work.list))
406 drbd_queue_work(&mdev->tconn->data.work, &mdev->resync_work);
409 static void fifo_set(struct fifo_buffer *fb, int value)
413 for (i = 0; i < fb->size; i++)
414 fb->values[i] = value;
417 static int fifo_push(struct fifo_buffer *fb, int value)
421 ov = fb->values[fb->head_index];
422 fb->values[fb->head_index++] = value;
424 if (fb->head_index >= fb->size)
430 static void fifo_add_val(struct fifo_buffer *fb, int value)
434 for (i = 0; i < fb->size; i++)
435 fb->values[i] += value;
438 static int drbd_rs_controller(struct drbd_conf *mdev)
440 unsigned int sect_in; /* Number of sectors that came in since the last turn */
441 unsigned int want; /* The number of sectors we want in the proxy */
442 int req_sect; /* Number of sectors to request in this turn */
443 int correction; /* Number of sectors more we need in the proxy*/
444 int cps; /* correction per invocation of drbd_rs_controller() */
445 int steps; /* Number of time steps to plan ahead */
449 sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
450 mdev->rs_in_flight -= sect_in;
452 spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
454 steps = mdev->rs_plan_s.size; /* (mdev->ldev->dc.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
456 if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
457 want = ((mdev->ldev->dc.resync_rate * 2 * SLEEP_TIME) / HZ) * steps;
458 } else { /* normal path */
459 want = mdev->ldev->dc.c_fill_target ? mdev->ldev->dc.c_fill_target :
460 sect_in * mdev->ldev->dc.c_delay_target * HZ / (SLEEP_TIME * 10);
463 correction = want - mdev->rs_in_flight - mdev->rs_planed;
466 cps = correction / steps;
467 fifo_add_val(&mdev->rs_plan_s, cps);
468 mdev->rs_planed += cps * steps;
470 /* What we do in this step */
471 curr_corr = fifo_push(&mdev->rs_plan_s, 0);
472 spin_unlock(&mdev->peer_seq_lock);
473 mdev->rs_planed -= curr_corr;
475 req_sect = sect_in + curr_corr;
479 max_sect = (mdev->ldev->dc.c_max_rate * 2 * SLEEP_TIME) / HZ;
480 if (req_sect > max_sect)
484 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
485 sect_in, mdev->rs_in_flight, want, correction,
486 steps, cps, mdev->rs_planed, curr_corr, req_sect);
492 static int drbd_rs_number_requests(struct drbd_conf *mdev)
495 if (mdev->rs_plan_s.size) { /* mdev->ldev->dc.c_plan_ahead */
496 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
497 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
499 mdev->c_sync_rate = mdev->ldev->dc.resync_rate;
500 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
503 /* ignore the amount of pending requests, the resync controller should
504 * throttle down to incoming reply rate soon enough anyways. */
508 static int w_make_resync_request(struct drbd_work *w, int cancel)
510 struct drbd_conf *mdev = w->mdev;
513 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
515 int number, rollback_i, size;
516 int align, queued, sndbuf;
519 if (unlikely(cancel))
522 if (mdev->rs_total == 0) {
524 drbd_resync_finished(mdev);
528 if (!get_ldev(mdev)) {
529 /* Since we only need to access mdev->rsync a
530 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
531 to continue resync with a broken disk makes no sense at
533 dev_err(DEV, "Disk broke down during resync!\n");
537 max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
538 number = drbd_rs_number_requests(mdev);
542 for (i = 0; i < number; i++) {
543 /* Stop generating RS requests, when half of the send buffer is filled */
544 mutex_lock(&mdev->tconn->data.mutex);
545 if (mdev->tconn->data.socket) {
546 queued = mdev->tconn->data.socket->sk->sk_wmem_queued;
547 sndbuf = mdev->tconn->data.socket->sk->sk_sndbuf;
552 mutex_unlock(&mdev->tconn->data.mutex);
553 if (queued > sndbuf / 2)
557 size = BM_BLOCK_SIZE;
558 bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
560 if (bit == DRBD_END_OF_BITMAP) {
561 mdev->bm_resync_fo = drbd_bm_bits(mdev);
566 sector = BM_BIT_TO_SECT(bit);
568 if (drbd_rs_should_slow_down(mdev, sector) ||
569 drbd_try_rs_begin_io(mdev, sector)) {
570 mdev->bm_resync_fo = bit;
573 mdev->bm_resync_fo = bit + 1;
575 if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
576 drbd_rs_complete_io(mdev, sector);
580 #if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
581 /* try to find some adjacent bits.
582 * we stop if we have already the maximum req size.
584 * Additionally always align bigger requests, in order to
585 * be prepared for all stripe sizes of software RAIDs.
590 if (size + BM_BLOCK_SIZE > max_bio_size)
593 /* Be always aligned */
594 if (sector & ((1<<(align+3))-1))
597 /* do not cross extent boundaries */
598 if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
600 /* now, is it actually dirty, after all?
601 * caution, drbd_bm_test_bit is tri-state for some
602 * obscure reason; ( b == 0 ) would get the out-of-band
603 * only accidentally right because of the "oddly sized"
604 * adjustment below */
605 if (drbd_bm_test_bit(mdev, bit+1) != 1)
608 size += BM_BLOCK_SIZE;
609 if ((BM_BLOCK_SIZE << align) <= size)
613 /* if we merged some,
614 * reset the offset to start the next drbd_bm_find_next from */
615 if (size > BM_BLOCK_SIZE)
616 mdev->bm_resync_fo = bit + 1;
619 /* adjust very last sectors, in case we are oddly sized */
620 if (sector + (size>>9) > capacity)
621 size = (capacity-sector)<<9;
622 if (mdev->tconn->agreed_pro_version >= 89 && mdev->tconn->csums_tfm) {
623 switch (read_for_csum(mdev, sector, size)) {
624 case -EIO: /* Disk failure */
627 case -EAGAIN: /* allocation failed, or ldev busy */
628 drbd_rs_complete_io(mdev, sector);
629 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
639 inc_rs_pending(mdev);
640 if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
641 sector, size, ID_SYNCER)) {
642 dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
643 dec_rs_pending(mdev);
650 if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
651 /* last syncer _request_ was sent,
652 * but the P_RS_DATA_REPLY not yet received. sync will end (and
653 * next sync group will resume), as soon as we receive the last
654 * resync data block, and the last bit is cleared.
655 * until then resync "work" is "inactive" ...
662 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
663 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
668 static int w_make_ov_request(struct drbd_work *w, int cancel)
670 struct drbd_conf *mdev = w->mdev;
673 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
675 if (unlikely(cancel))
678 number = drbd_rs_number_requests(mdev);
680 sector = mdev->ov_position;
681 for (i = 0; i < number; i++) {
682 if (sector >= capacity) {
686 size = BM_BLOCK_SIZE;
688 if (drbd_rs_should_slow_down(mdev, sector) ||
689 drbd_try_rs_begin_io(mdev, sector)) {
690 mdev->ov_position = sector;
694 if (sector + (size>>9) > capacity)
695 size = (capacity-sector)<<9;
697 inc_rs_pending(mdev);
698 if (!drbd_send_ov_request(mdev, sector, size)) {
699 dec_rs_pending(mdev);
702 sector += BM_SECT_PER_BIT;
704 mdev->ov_position = sector;
707 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
708 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
712 int w_ov_finished(struct drbd_work *w, int cancel)
714 struct drbd_conf *mdev = w->mdev;
717 drbd_resync_finished(mdev);
722 static int w_resync_finished(struct drbd_work *w, int cancel)
724 struct drbd_conf *mdev = w->mdev;
727 drbd_resync_finished(mdev);
732 static void ping_peer(struct drbd_conf *mdev)
734 struct drbd_tconn *tconn = mdev->tconn;
736 clear_bit(GOT_PING_ACK, &tconn->flags);
738 wait_event(tconn->ping_wait,
739 test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
742 int drbd_resync_finished(struct drbd_conf *mdev)
744 unsigned long db, dt, dbdt;
746 union drbd_state os, ns;
748 char *khelper_cmd = NULL;
751 /* Remove all elements from the resync LRU. Since future actions
752 * might set bits in the (main) bitmap, then the entries in the
753 * resync LRU would be wrong. */
754 if (drbd_rs_del_all(mdev)) {
755 /* In case this is not possible now, most probably because
756 * there are P_RS_DATA_REPLY Packets lingering on the worker's
757 * queue (or even the read operations for those packets
758 * is not finished by now). Retry in 100ms. */
760 schedule_timeout_interruptible(HZ / 10);
761 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
763 w->cb = w_resync_finished;
764 drbd_queue_work(&mdev->tconn->data.work, w);
767 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
770 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
774 dbdt = Bit2KB(db/dt);
775 mdev->rs_paused /= HZ;
782 spin_lock_irq(&mdev->tconn->req_lock);
785 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
787 /* This protects us against multiple calls (that can happen in the presence
788 of application IO), and against connectivity loss just before we arrive here. */
789 if (os.conn <= C_CONNECTED)
793 ns.conn = C_CONNECTED;
795 dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
796 verify_done ? "Online verify " : "Resync",
797 dt + mdev->rs_paused, mdev->rs_paused, dbdt);
799 n_oos = drbd_bm_total_weight(mdev);
801 if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
803 dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
805 khelper_cmd = "out-of-sync";
808 D_ASSERT((n_oos - mdev->rs_failed) == 0);
810 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
811 khelper_cmd = "after-resync-target";
813 if (mdev->tconn->csums_tfm && mdev->rs_total) {
814 const unsigned long s = mdev->rs_same_csum;
815 const unsigned long t = mdev->rs_total;
818 (t < 100000) ? ((s*100)/t) : (s/(t/100));
819 dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; "
820 "transferred %luK total %luK\n",
822 Bit2KB(mdev->rs_same_csum),
823 Bit2KB(mdev->rs_total - mdev->rs_same_csum),
824 Bit2KB(mdev->rs_total));
828 if (mdev->rs_failed) {
829 dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
831 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
832 ns.disk = D_INCONSISTENT;
833 ns.pdsk = D_UP_TO_DATE;
835 ns.disk = D_UP_TO_DATE;
836 ns.pdsk = D_INCONSISTENT;
839 ns.disk = D_UP_TO_DATE;
840 ns.pdsk = D_UP_TO_DATE;
842 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
845 for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
846 _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
847 drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
848 _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
850 dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
854 if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
855 /* for verify runs, we don't update uuids here,
856 * so there would be nothing to report. */
857 drbd_uuid_set_bm(mdev, 0UL);
858 drbd_print_uuids(mdev, "updated UUIDs");
860 /* Now the two UUID sets are equal, update what we
861 * know of the peer. */
863 for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
864 mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
869 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
871 spin_unlock_irq(&mdev->tconn->req_lock);
878 mdev->ov_start_sector = 0;
883 drbd_khelper(mdev, khelper_cmd);
889 static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
891 if (drbd_ee_has_active_page(peer_req)) {
892 /* This might happen if sendpage() has not finished */
893 int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
894 atomic_add(i, &mdev->pp_in_use_by_net);
895 atomic_sub(i, &mdev->pp_in_use);
896 spin_lock_irq(&mdev->tconn->req_lock);
897 list_add_tail(&peer_req->w.list, &mdev->net_ee);
898 spin_unlock_irq(&mdev->tconn->req_lock);
899 wake_up(&drbd_pp_wait);
901 drbd_free_ee(mdev, peer_req);
905 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
906 * @mdev: DRBD device.
908 * @cancel: The connection will be closed anyways
910 int w_e_end_data_req(struct drbd_work *w, int cancel)
912 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
913 struct drbd_conf *mdev = w->mdev;
916 if (unlikely(cancel)) {
917 drbd_free_ee(mdev, peer_req);
922 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
923 ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
925 if (__ratelimit(&drbd_ratelimit_state))
926 dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
927 (unsigned long long)peer_req->i.sector);
929 ok = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
934 move_to_net_ee_or_free(mdev, peer_req);
937 dev_err(DEV, "drbd_send_block() failed\n");
942 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
943 * @mdev: DRBD device.
945 * @cancel: The connection will be closed anyways
947 int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
949 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
950 struct drbd_conf *mdev = w->mdev;
953 if (unlikely(cancel)) {
954 drbd_free_ee(mdev, peer_req);
959 if (get_ldev_if_state(mdev, D_FAILED)) {
960 drbd_rs_complete_io(mdev, peer_req->i.sector);
964 if (mdev->state.conn == C_AHEAD) {
965 ok = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
966 } else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
967 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
968 inc_rs_pending(mdev);
969 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
971 if (__ratelimit(&drbd_ratelimit_state))
972 dev_err(DEV, "Not sending RSDataReply, "
973 "partner DISKLESS!\n");
977 if (__ratelimit(&drbd_ratelimit_state))
978 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
979 (unsigned long long)peer_req->i.sector);
981 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
983 /* update resync data with failure */
984 drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
989 move_to_net_ee_or_free(mdev, peer_req);
992 dev_err(DEV, "drbd_send_block() failed\n");
996 int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
998 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
999 struct drbd_conf *mdev = w->mdev;
1000 struct digest_info *di;
1002 void *digest = NULL;
1005 if (unlikely(cancel)) {
1006 drbd_free_ee(mdev, peer_req);
1011 if (get_ldev(mdev)) {
1012 drbd_rs_complete_io(mdev, peer_req->i.sector);
1016 di = peer_req->digest;
1018 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1019 /* quick hack to try to avoid a race against reconfiguration.
1020 * a real fix would be much more involved,
1021 * introducing more locking mechanisms */
1022 if (mdev->tconn->csums_tfm) {
1023 digest_size = crypto_hash_digestsize(mdev->tconn->csums_tfm);
1024 D_ASSERT(digest_size == di->digest_size);
1025 digest = kmalloc(digest_size, GFP_NOIO);
1028 drbd_csum_ee(mdev, mdev->tconn->csums_tfm, peer_req, digest);
1029 eq = !memcmp(digest, di->digest, digest_size);
1034 drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
1035 /* rs_same_csums unit is BM_BLOCK_SIZE */
1036 mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
1037 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
1039 inc_rs_pending(mdev);
1040 peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1041 peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
1043 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
1046 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
1047 if (__ratelimit(&drbd_ratelimit_state))
1048 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1052 move_to_net_ee_or_free(mdev, peer_req);
1055 dev_err(DEV, "drbd_send_block/ack() failed\n");
1059 int w_e_end_ov_req(struct drbd_work *w, int cancel)
1061 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1062 struct drbd_conf *mdev = w->mdev;
1063 sector_t sector = peer_req->i.sector;
1064 unsigned int size = peer_req->i.size;
1069 if (unlikely(cancel))
1072 digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
1073 digest = kmalloc(digest_size, GFP_NOIO);
1075 ok = 0; /* terminate the connection in case the allocation failed */
1079 if (likely(!(peer_req->flags & EE_WAS_ERROR)))
1080 drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
1082 memset(digest, 0, digest_size);
1084 /* Free e and pages before send.
1085 * In case we block on congestion, we could otherwise run into
1086 * some distributed deadlock, if the other side blocks on
1087 * congestion as well, because our receiver blocks in
1088 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1089 drbd_free_ee(mdev, peer_req);
1091 inc_rs_pending(mdev);
1092 ok = drbd_send_drequest_csum(mdev, sector, size,
1093 digest, digest_size,
1096 dec_rs_pending(mdev);
1101 drbd_free_ee(mdev, peer_req);
1106 void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
1108 if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
1109 mdev->ov_last_oos_size += size>>9;
1111 mdev->ov_last_oos_start = sector;
1112 mdev->ov_last_oos_size = size>>9;
1114 drbd_set_out_of_sync(mdev, sector, size);
1117 int w_e_end_ov_reply(struct drbd_work *w, int cancel)
1119 struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
1120 struct drbd_conf *mdev = w->mdev;
1121 struct digest_info *di;
1123 sector_t sector = peer_req->i.sector;
1124 unsigned int size = peer_req->i.size;
1128 if (unlikely(cancel)) {
1129 drbd_free_ee(mdev, peer_req);
1134 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1135 * the resync lru has been cleaned up already */
1136 if (get_ldev(mdev)) {
1137 drbd_rs_complete_io(mdev, peer_req->i.sector);
1141 di = peer_req->digest;
1143 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1144 digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
1145 digest = kmalloc(digest_size, GFP_NOIO);
1147 drbd_csum_ee(mdev, mdev->tconn->verify_tfm, peer_req, digest);
1149 D_ASSERT(digest_size == di->digest_size);
1150 eq = !memcmp(digest, di->digest, digest_size);
1155 /* Free peer_req and pages before send.
1156 * In case we block on congestion, we could otherwise run into
1157 * some distributed deadlock, if the other side blocks on
1158 * congestion as well, because our receiver blocks in
1159 * drbd_pp_alloc due to pp_in_use > max_buffers. */
1160 drbd_free_ee(mdev, peer_req);
1162 drbd_ov_oos_found(mdev, sector, size);
1166 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
1167 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1173 /* let's advance progress step marks only for every other megabyte */
1174 if ((mdev->ov_left & 0x200) == 0x200)
1175 drbd_advance_rs_marks(mdev, mdev->ov_left);
1177 if (mdev->ov_left == 0) {
1179 drbd_resync_finished(mdev);
1185 int w_prev_work_done(struct drbd_work *w, int cancel)
1187 struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1193 int w_send_barrier(struct drbd_work *w, int cancel)
1195 struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1196 struct drbd_conf *mdev = w->mdev;
1197 struct p_barrier *p = &mdev->tconn->data.sbuf.barrier;
1200 /* really avoid racing with tl_clear. w.cb may have been referenced
1201 * just before it was reassigned and re-queued, so double check that.
1202 * actually, this race was harmless, since we only try to send the
1203 * barrier packet here, and otherwise do nothing with the object.
1204 * but compare with the head of w_clear_epoch */
1205 spin_lock_irq(&mdev->tconn->req_lock);
1206 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1208 spin_unlock_irq(&mdev->tconn->req_lock);
1212 if (!drbd_get_data_sock(mdev->tconn))
1214 p->barrier = b->br_number;
1215 /* inc_ap_pending was done where this was queued.
1216 * dec_ap_pending will be done in got_BarrierAck
1217 * or (on connection loss) in w_clear_epoch. */
1218 ok = _drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
1219 &p->head, sizeof(*p), 0);
1220 drbd_put_data_sock(mdev->tconn);
1225 int w_send_write_hint(struct drbd_work *w, int cancel)
1227 struct drbd_conf *mdev = w->mdev;
1230 return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
1233 int w_send_oos(struct drbd_work *w, int cancel)
1235 struct drbd_request *req = container_of(w, struct drbd_request, w);
1236 struct drbd_conf *mdev = w->mdev;
1239 if (unlikely(cancel)) {
1240 req_mod(req, SEND_CANCELED);
1244 ok = drbd_send_oos(mdev, req);
1245 req_mod(req, OOS_HANDED_TO_NETWORK);
1251 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1252 * @mdev: DRBD device.
1254 * @cancel: The connection will be closed anyways
1256 int w_send_dblock(struct drbd_work *w, int cancel)
1258 struct drbd_request *req = container_of(w, struct drbd_request, w);
1259 struct drbd_conf *mdev = w->mdev;
1262 if (unlikely(cancel)) {
1263 req_mod(req, SEND_CANCELED);
1267 ok = drbd_send_dblock(mdev, req);
1268 req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
1274 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1275 * @mdev: DRBD device.
1277 * @cancel: The connection will be closed anyways
1279 int w_send_read_req(struct drbd_work *w, int cancel)
1281 struct drbd_request *req = container_of(w, struct drbd_request, w);
1282 struct drbd_conf *mdev = w->mdev;
1285 if (unlikely(cancel)) {
1286 req_mod(req, SEND_CANCELED);
1290 ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
1291 (unsigned long)req);
1293 req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
1298 int w_restart_disk_io(struct drbd_work *w, int cancel)
1300 struct drbd_request *req = container_of(w, struct drbd_request, w);
1301 struct drbd_conf *mdev = w->mdev;
1303 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
1304 drbd_al_begin_io(mdev, req->i.sector);
1305 /* Calling drbd_al_begin_io() out of the worker might deadlocks
1306 theoretically. Practically it can not deadlock, since this is
1307 only used when unfreezing IOs. All the extents of the requests
1308 that made it into the TL are already active */
1310 drbd_req_make_private_bio(req, req->master_bio);
1311 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1312 generic_make_request(req->private_bio);
1317 static int _drbd_may_sync_now(struct drbd_conf *mdev)
1319 struct drbd_conf *odev = mdev;
1322 if (odev->ldev->dc.resync_after == -1)
1324 odev = minor_to_mdev(odev->ldev->dc.resync_after);
1327 if ((odev->state.conn >= C_SYNC_SOURCE &&
1328 odev->state.conn <= C_PAUSED_SYNC_T) ||
1329 odev->state.aftr_isp || odev->state.peer_isp ||
1330 odev->state.user_isp)
1336 * _drbd_pause_after() - Pause resync on all devices that may not resync now
1337 * @mdev: DRBD device.
1339 * Called from process context only (admin command and after_state_ch).
1341 static int _drbd_pause_after(struct drbd_conf *mdev)
1343 struct drbd_conf *odev;
1346 idr_for_each_entry(&minors, odev, i) {
1347 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1349 if (!_drbd_may_sync_now(odev))
1350 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
1351 != SS_NOTHING_TO_DO);
1358 * _drbd_resume_next() - Resume resync on all devices that may resync now
1359 * @mdev: DRBD device.
1361 * Called from process context only (admin command and worker).
1363 static int _drbd_resume_next(struct drbd_conf *mdev)
1365 struct drbd_conf *odev;
1368 idr_for_each_entry(&minors, odev, i) {
1369 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1371 if (odev->state.aftr_isp) {
1372 if (_drbd_may_sync_now(odev))
1373 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
1375 != SS_NOTHING_TO_DO) ;
1381 void resume_next_sg(struct drbd_conf *mdev)
1383 write_lock_irq(&global_state_lock);
1384 _drbd_resume_next(mdev);
1385 write_unlock_irq(&global_state_lock);
1388 void suspend_other_sg(struct drbd_conf *mdev)
1390 write_lock_irq(&global_state_lock);
1391 _drbd_pause_after(mdev);
1392 write_unlock_irq(&global_state_lock);
1395 static int sync_after_error(struct drbd_conf *mdev, int o_minor)
1397 struct drbd_conf *odev;
1401 if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
1402 return ERR_SYNC_AFTER;
1404 /* check for loops */
1405 odev = minor_to_mdev(o_minor);
1408 return ERR_SYNC_AFTER_CYCLE;
1410 /* dependency chain ends here, no cycles. */
1411 if (odev->ldev->dc.resync_after == -1)
1414 /* follow the dependency chain */
1415 odev = minor_to_mdev(odev->ldev->dc.resync_after);
1419 int drbd_alter_sa(struct drbd_conf *mdev, int na)
1424 write_lock_irq(&global_state_lock);
1425 retcode = sync_after_error(mdev, na);
1426 if (retcode == NO_ERROR) {
1427 mdev->ldev->dc.resync_after = na;
1429 changes = _drbd_pause_after(mdev);
1430 changes |= _drbd_resume_next(mdev);
1433 write_unlock_irq(&global_state_lock);
1437 void drbd_rs_controller_reset(struct drbd_conf *mdev)
1439 atomic_set(&mdev->rs_sect_in, 0);
1440 atomic_set(&mdev->rs_sect_ev, 0);
1441 mdev->rs_in_flight = 0;
1442 mdev->rs_planed = 0;
1443 spin_lock(&mdev->peer_seq_lock);
1444 fifo_set(&mdev->rs_plan_s, 0);
1445 spin_unlock(&mdev->peer_seq_lock);
1448 void start_resync_timer_fn(unsigned long data)
1450 struct drbd_conf *mdev = (struct drbd_conf *) data;
1452 drbd_queue_work(&mdev->tconn->data.work, &mdev->start_resync_work);
1455 int w_start_resync(struct drbd_work *w, int cancel)
1457 struct drbd_conf *mdev = w->mdev;
1459 if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
1460 dev_warn(DEV, "w_start_resync later...\n");
1461 mdev->start_resync_timer.expires = jiffies + HZ/10;
1462 add_timer(&mdev->start_resync_timer);
1466 drbd_start_resync(mdev, C_SYNC_SOURCE);
1467 clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
1472 * drbd_start_resync() - Start the resync process
1473 * @mdev: DRBD device.
1474 * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1476 * This function might bring you directly into one of the
1477 * C_PAUSED_SYNC_* states.
1479 void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1481 union drbd_state ns;
1484 if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
1485 dev_err(DEV, "Resync already running!\n");
1489 if (mdev->state.conn < C_AHEAD) {
1490 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1491 drbd_rs_cancel_all(mdev);
1492 /* This should be done when we abort the resync. We definitely do not
1493 want to have this for connections going back and forth between
1494 Ahead/Behind and SyncSource/SyncTarget */
1497 if (!test_bit(B_RS_H_DONE, &mdev->flags)) {
1498 if (side == C_SYNC_TARGET) {
1499 /* Since application IO was locked out during C_WF_BITMAP_T and
1500 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1501 we check that we might make the data inconsistent. */
1502 r = drbd_khelper(mdev, "before-resync-target");
1503 r = (r >> 8) & 0xff;
1505 dev_info(DEV, "before-resync-target handler returned %d, "
1506 "dropping connection.\n", r);
1507 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1510 } else /* C_SYNC_SOURCE */ {
1511 r = drbd_khelper(mdev, "before-resync-source");
1512 r = (r >> 8) & 0xff;
1515 dev_info(DEV, "before-resync-source handler returned %d, "
1516 "ignoring. Old userland tools?", r);
1518 dev_info(DEV, "before-resync-source handler returned %d, "
1519 "dropping connection.\n", r);
1520 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1527 if (current == mdev->tconn->worker.task) {
1528 /* The worker should not sleep waiting for state_mutex,
1529 that can take long */
1530 if (!mutex_trylock(mdev->state_mutex)) {
1531 set_bit(B_RS_H_DONE, &mdev->flags);
1532 mdev->start_resync_timer.expires = jiffies + HZ/5;
1533 add_timer(&mdev->start_resync_timer);
1537 mutex_lock(mdev->state_mutex);
1539 clear_bit(B_RS_H_DONE, &mdev->flags);
1541 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1542 mutex_unlock(mdev->state_mutex);
1546 write_lock_irq(&global_state_lock);
1549 ns.aftr_isp = !_drbd_may_sync_now(mdev);
1553 if (side == C_SYNC_TARGET)
1554 ns.disk = D_INCONSISTENT;
1555 else /* side == C_SYNC_SOURCE */
1556 ns.pdsk = D_INCONSISTENT;
1558 r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1561 if (ns.conn < C_CONNECTED)
1562 r = SS_UNKNOWN_ERROR;
1564 if (r == SS_SUCCESS) {
1565 unsigned long tw = drbd_bm_total_weight(mdev);
1566 unsigned long now = jiffies;
1569 mdev->rs_failed = 0;
1570 mdev->rs_paused = 0;
1571 mdev->rs_same_csum = 0;
1572 mdev->rs_last_events = 0;
1573 mdev->rs_last_sect_ev = 0;
1574 mdev->rs_total = tw;
1575 mdev->rs_start = now;
1576 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1577 mdev->rs_mark_left[i] = tw;
1578 mdev->rs_mark_time[i] = now;
1580 _drbd_pause_after(mdev);
1582 write_unlock_irq(&global_state_lock);
1584 if (r == SS_SUCCESS) {
1585 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1586 drbd_conn_str(ns.conn),
1587 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1588 (unsigned long) mdev->rs_total);
1589 if (side == C_SYNC_TARGET)
1590 mdev->bm_resync_fo = 0;
1592 /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
1593 * with w_send_oos, or the sync target will get confused as to
1594 * how much bits to resync. We cannot do that always, because for an
1595 * empty resync and protocol < 95, we need to do it here, as we call
1596 * drbd_resync_finished from here in that case.
1597 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
1598 * and from after_state_ch otherwise. */
1599 if (side == C_SYNC_SOURCE && mdev->tconn->agreed_pro_version < 96)
1600 drbd_gen_and_send_sync_uuid(mdev);
1602 if (mdev->tconn->agreed_pro_version < 95 && mdev->rs_total == 0) {
1603 /* This still has a race (about when exactly the peers
1604 * detect connection loss) that can lead to a full sync
1605 * on next handshake. In 8.3.9 we fixed this with explicit
1606 * resync-finished notifications, but the fix
1607 * introduces a protocol change. Sleeping for some
1608 * time longer than the ping interval + timeout on the
1609 * SyncSource, to give the SyncTarget the chance to
1610 * detect connection loss, then waiting for a ping
1611 * response (implicit in drbd_resync_finished) reduces
1612 * the race considerably, but does not solve it. */
1613 if (side == C_SYNC_SOURCE)
1614 schedule_timeout_interruptible(
1615 mdev->tconn->net_conf->ping_int * HZ +
1616 mdev->tconn->net_conf->ping_timeo*HZ/9);
1617 drbd_resync_finished(mdev);
1620 drbd_rs_controller_reset(mdev);
1621 /* ns.conn may already be != mdev->state.conn,
1622 * we may have been paused in between, or become paused until
1623 * the timer triggers.
1624 * No matter, that is handled in resync_timer_fn() */
1625 if (ns.conn == C_SYNC_TARGET)
1626 mod_timer(&mdev->resync_timer, jiffies);
1631 mutex_unlock(mdev->state_mutex);
1634 int drbd_worker(struct drbd_thread *thi)
1636 struct drbd_tconn *tconn = thi->tconn;
1637 struct drbd_work *w = NULL;
1638 struct drbd_conf *mdev;
1639 LIST_HEAD(work_list);
1642 while (get_t_state(thi) == RUNNING) {
1643 drbd_thread_current_set_cpu(thi);
1645 if (down_trylock(&tconn->data.work.s)) {
1646 mutex_lock(&tconn->data.mutex);
1647 if (tconn->data.socket && !tconn->net_conf->no_cork)
1648 drbd_tcp_uncork(tconn->data.socket);
1649 mutex_unlock(&tconn->data.mutex);
1651 intr = down_interruptible(&tconn->data.work.s);
1653 mutex_lock(&tconn->data.mutex);
1654 if (tconn->data.socket && !tconn->net_conf->no_cork)
1655 drbd_tcp_cork(tconn->data.socket);
1656 mutex_unlock(&tconn->data.mutex);
1660 flush_signals(current);
1661 if (get_t_state(thi) == RUNNING) {
1662 conn_warn(tconn, "Worker got an unexpected signal\n");
1668 if (get_t_state(thi) != RUNNING)
1670 /* With this break, we have done a down() but not consumed
1671 the entry from the list. The cleanup code takes care of
1675 spin_lock_irq(&tconn->data.work.q_lock);
1676 if (list_empty(&tconn->data.work.q)) {
1677 /* something terribly wrong in our logic.
1678 * we were able to down() the semaphore,
1679 * but the list is empty... doh.
1681 * what is the best thing to do now?
1682 * try again from scratch, restarting the receiver,
1683 * asender, whatnot? could break even more ugly,
1684 * e.g. when we are primary, but no good local data.
1686 * I'll try to get away just starting over this loop.
1688 conn_warn(tconn, "Work list unexpectedly empty\n");
1689 spin_unlock_irq(&tconn->data.work.q_lock);
1692 w = list_entry(tconn->data.work.q.next, struct drbd_work, list);
1693 list_del_init(&w->list);
1694 spin_unlock_irq(&tconn->data.work.q_lock);
1696 if (!w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS)) {
1697 /* dev_warn(DEV, "worker: a callback failed! \n"); */
1698 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1699 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
1703 spin_lock_irq(&tconn->data.work.q_lock);
1704 while (!list_empty(&tconn->data.work.q)) {
1705 list_splice_init(&tconn->data.work.q, &work_list);
1706 spin_unlock_irq(&tconn->data.work.q_lock);
1708 while (!list_empty(&work_list)) {
1709 w = list_entry(work_list.next, struct drbd_work, list);
1710 list_del_init(&w->list);
1714 spin_lock_irq(&tconn->data.work.q_lock);
1716 sema_init(&tconn->data.work.s, 0);
1717 /* DANGEROUS race: if someone did queue his work within the spinlock,
1718 * but up() ed outside the spinlock, we could get an up() on the
1719 * semaphore without corresponding list entry.
1722 spin_unlock_irq(&tconn->data.work.q_lock);
1724 drbd_thread_stop(&tconn->receiver);
1725 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1726 D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1727 /* _drbd_set_state only uses stop_nowait.
1728 * wait here for the exiting receiver. */
1729 drbd_mdev_cleanup(mdev);
1731 clear_bit(OBJECT_DYING, &tconn->flags);
1732 clear_bit(CONFIG_PENDING, &tconn->flags);
1733 wake_up(&tconn->ping_wait);