#include "drbd_req.h"
static int w_make_ov_request(struct drbd_work *w, int cancel);
-static int w_make_resync_request(struct drbd_work *w, int cancel);
-
/* endio handlers:
{
unsigned long flags = 0;
struct drbd_conf *mdev = peer_req->w.mdev;
- sector_t e_sector;
+ struct drbd_interval i;
int do_wake;
u64 block_id;
int do_al_complete_io;
* we may no longer access it,
* it may be freed/reused already!
* (as soon as we release the req_lock) */
- e_sector = peer_req->i.sector;
+ i = peer_req->i;
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
block_id = peer_req->block_id;
spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
if (block_id == ID_SYNCER)
- drbd_rs_complete_io(mdev, e_sector);
+ drbd_rs_complete_io(mdev, i.sector);
if (do_wake)
wake_up(&mdev->ee_wait);
if (do_al_complete_io)
- drbd_al_complete_io(mdev, e_sector);
+ drbd_al_complete_io(mdev, &i);
wake_asender(mdev->tconn);
put_ldev(mdev);
if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
_req_mod(req, READ_RETRY_REMOTE_CANCELED);
spin_unlock_irq(&mdev->tconn->req_lock);
- return 1;
+ return 0;
}
spin_unlock_irq(&mdev->tconn->req_lock);
struct drbd_conf *mdev = w->mdev;
int digest_size;
void *digest;
- int ok = 1;
+ int err = 0;
if (unlikely(cancel))
goto out;
drbd_free_ee(mdev, peer_req);
peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_CSUM_RS_REQUEST);
+ err = drbd_send_drequest_csum(mdev, sector, size,
+ digest, digest_size,
+ P_CSUM_RS_REQUEST);
kfree(digest);
} else {
dev_err(DEV, "kmalloc() of digest failed.\n");
- ok = 0;
+ err = -ENOMEM;
}
out:
if (peer_req)
drbd_free_ee(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
- return ok;
+ return err;
}
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
break;
}
- return 1;
+ return 0;
}
void resync_timer_fn(unsigned long data)
return number;
}
-static int w_make_resync_request(struct drbd_work *w, int cancel)
+int w_make_resync_request(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
unsigned long bit;
int i = 0;
if (unlikely(cancel))
- return 1;
+ return 0;
if (mdev->rs_total == 0) {
/* empty resync? */
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
if (!get_ldev(mdev)) {
to continue resync with a broken disk makes no sense at
all */
dev_err(DEV, "Disk broke down during resync!\n");
- return 1;
+ return 0;
}
max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
if (bit == DRBD_END_OF_BITMAP) {
mdev->bm_resync_fo = drbd_bm_bits(mdev);
put_ldev(mdev);
- return 1;
+ return 0;
}
sector = BM_BIT_TO_SECT(bit);
switch (read_for_csum(mdev, sector, size)) {
case -EIO: /* Disk failure */
put_ldev(mdev);
- return 0;
+ return -EIO;
case -EAGAIN: /* allocation failed, or ldev busy */
drbd_rs_complete_io(mdev, sector);
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
BUG();
}
} else {
+ int err;
+
inc_rs_pending(mdev);
- if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
- sector, size, ID_SYNCER)) {
+ err = drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
+ sector, size, ID_SYNCER);
+ if (err) {
dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
dec_rs_pending(mdev);
put_ldev(mdev);
- return 0;
+ return err;
}
}
}
* until then resync "work" is "inactive" ...
*/
put_ldev(mdev);
- return 1;
+ return 0;
}
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
put_ldev(mdev);
- return 1;
+ return 0;
}
static int w_make_ov_request(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
kfree(w);
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
static int w_resync_finished(struct drbd_work *w, int cancel)
drbd_resync_finished(mdev);
- return 1;
+ return 0;
}
static void ping_peer(struct drbd_conf *mdev)
ping_peer(mdev);
spin_lock_irq(&mdev->tconn->req_lock);
- os = mdev->state;
+ os = drbd_read_state(mdev);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
- int ok;
+ int err;
if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- ok = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
+ err = drbd_send_block(mdev, P_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
(unsigned long long)peer_req->i.sector);
- ok = !drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
+ err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req);
}
dec_unacked(mdev);
move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
/**
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
- int ok;
+ int err;
if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev_if_state(mdev, D_FAILED)) {
}
if (mdev->state.conn == C_AHEAD) {
- ok = !drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
+ err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req);
} else if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
inc_rs_pending(mdev);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Not sending RSDataReply, "
"partner DISKLESS!\n");
- ok = 1;
+ err = 0;
}
} else {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
(unsigned long long)peer_req->i.sector);
- ok = !drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
/* update resync data with failure */
drbd_rs_failed_io(mdev, peer_req->i.sector, peer_req->i.size);
move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block() failed\n");
- return ok;
+ return err;
}
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
struct digest_info *di;
int digest_size;
void *digest = NULL;
- int ok, eq = 0;
+ int err, eq = 0;
if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
if (get_ldev(mdev)) {
drbd_set_in_sync(mdev, peer_req->i.sector, peer_req->i.size);
/* rs_same_csums unit is BM_BLOCK_SIZE */
mdev->rs_same_csum += peer_req->i.size >> BM_BLOCK_SHIFT;
- ok = !drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
+ err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req);
} else {
inc_rs_pending(mdev);
peer_req->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
peer_req->flags &= ~EE_HAS_DIGEST; /* This peer request no longer has a digest pointer */
kfree(di);
- ok = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
+ err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req);
}
} else {
- ok = !drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
+ err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req);
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
}
dec_unacked(mdev);
move_to_net_ee_or_free(mdev, peer_req);
- if (unlikely(!ok))
+ if (unlikely(err))
dev_err(DEV, "drbd_send_block/ack() failed\n");
- return ok;
+ return err;
}
int w_e_end_ov_req(struct drbd_work *w, int cancel)
unsigned int size = peer_req->i.size;
int digest_size;
void *digest;
- int ok = 1;
+ int err = 0;
if (unlikely(cancel))
goto out;
digest_size = crypto_hash_digestsize(mdev->tconn->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
- ok = 0; /* terminate the connection in case the allocation failed */
+ err = 1; /* terminate the connection in case the allocation failed */
goto out;
}
drbd_free_ee(mdev, peer_req);
peer_req = NULL;
inc_rs_pending(mdev);
- ok = drbd_send_drequest_csum(mdev, sector, size,
- digest, digest_size,
- P_OV_REPLY);
- if (!ok)
+ err = drbd_send_drequest_csum(mdev, sector, size, digest, digest_size, P_OV_REPLY);
+ if (err)
dec_rs_pending(mdev);
kfree(digest);
if (peer_req)
drbd_free_ee(mdev, peer_req);
dec_unacked(mdev);
- return ok;
+ return err;
}
-void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
+void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
{
if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
mdev->ov_last_oos_size += size>>9;
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
int digest_size;
- int ok, eq = 0;
+ int err, eq = 0;
if (unlikely(cancel)) {
drbd_free_ee(mdev, peer_req);
dec_unacked(mdev);
- return 1;
+ return 0;
}
/* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
* drbd_pp_alloc due to pp_in_use > max_buffers. */
drbd_free_ee(mdev, peer_req);
if (!eq)
- drbd_ov_oos_found(mdev, sector, size);
+ drbd_ov_out_of_sync_found(mdev, sector, size);
else
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
- ok = !drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
+ err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size,
eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
dec_unacked(mdev);
drbd_advance_rs_marks(mdev, mdev->ov_left);
if (mdev->ov_left == 0) {
- ov_oos_print(mdev);
+ ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
}
- return ok;
+ return err;
}
int w_prev_work_done(struct drbd_work *w, int cancel)
struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
complete(&b->done);
- return 1;
+ return 0;
}
int w_send_barrier(struct drbd_work *w, int cancel)
{
struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
struct drbd_conf *mdev = w->mdev;
- struct p_barrier *p = &mdev->tconn->data.sbuf.barrier;
- int ok = 1;
+ struct p_barrier *p = mdev->tconn->data.sbuf;
+ int err = 0;
/* really avoid racing with tl_clear. w.cb may have been referenced
* just before it was reassigned and re-queued, so double check that.
cancel = 1;
spin_unlock_irq(&mdev->tconn->req_lock);
if (cancel)
- return 1;
-
- if (drbd_get_data_sock(mdev->tconn))
return 0;
+
+ err = drbd_get_data_sock(mdev->tconn);
+ if (err)
+ return err;
p->barrier = b->br_number;
/* inc_ap_pending was done where this was queued.
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in w_clear_epoch. */
- ok = !_drbd_send_cmd(mdev, mdev->tconn->data.socket, P_BARRIER,
+ err = _drbd_send_cmd(mdev, &mdev->tconn->data, P_BARRIER,
&p->head, sizeof(*p), 0);
drbd_put_data_sock(mdev->tconn);
- return ok;
+ return err;
}
int w_send_write_hint(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
if (cancel)
- return 1;
+ return 0;
return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
}
-int w_send_oos(struct drbd_work *w, int cancel)
+int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_conf *mdev = w->mdev;
- int ok;
+ int err;
if (unlikely(cancel)) {
req_mod(req, SEND_CANCELED);
- return 1;
+ return 0;
}
- ok = drbd_send_oos(mdev, req);
+ err = drbd_send_out_of_sync(mdev, req);
req_mod(req, OOS_HANDED_TO_NETWORK);
- return ok;
+ return err;
}
/**
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_conf *mdev = w->mdev;
- int ok;
+ int err;
if (unlikely(cancel)) {
req_mod(req, SEND_CANCELED);
- return 1;
+ return 0;
}
- ok = drbd_send_dblock(mdev, req);
- req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
+ err = drbd_send_dblock(mdev, req);
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
/**
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_conf *mdev = w->mdev;
- int ok;
+ int err;
if (unlikely(cancel)) {
req_mod(req, SEND_CANCELED);
- return 1;
+ return 0;
}
- ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
- (unsigned long)req);
+ err = drbd_send_drequest(mdev, P_DATA_REQUEST, req->i.sector, req->i.size,
+ (unsigned long)req);
- req_mod(req, ok ? HANDED_OVER_TO_NETWORK : SEND_FAILED);
+ req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
- return ok;
+ return err;
}
int w_restart_disk_io(struct drbd_work *w, int cancel)
struct drbd_conf *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
- drbd_al_begin_io(mdev, req->i.sector);
+ drbd_al_begin_io(mdev, &req->i);
/* Calling drbd_al_begin_io() out of the worker might deadlocks
theoretically. Practically it can not deadlock, since this is
only used when unfreezing IOs. All the extents of the requests
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
generic_make_request(req->private_bio);
- return 1;
+ return 0;
}
static int _drbd_may_sync_now(struct drbd_conf *mdev)
struct drbd_conf *odev = mdev;
while (1) {
+ if (!odev->ldev)
+ return 1;
if (odev->ldev->dc.resync_after == -1)
return 1;
odev = minor_to_mdev(odev->ldev->dc.resync_after);
dev_warn(DEV, "w_start_resync later...\n");
mdev->start_resync_timer.expires = jiffies + HZ/10;
add_timer(&mdev->start_resync_timer);
- return 1;
+ return 0;
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
- return 1;
+ return 0;
}
/**
}
write_lock_irq(&global_state_lock);
- ns = mdev->state;
+ ns = drbd_read_state(mdev);
ns.aftr_isp = !_drbd_may_sync_now(mdev);
ns.pdsk = D_INCONSISTENT;
r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
- ns = mdev->state;
+ ns = drbd_read_state(mdev);
if (ns.conn < C_CONNECTED)
r = SS_UNKNOWN_ERROR;
list_del_init(&w->list);
spin_unlock_irq(&tconn->data.work.q_lock);
- if (!w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS)) {
+ if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS)) {
/* dev_warn(DEV, "worker: a callback failed! \n"); */
if (tconn->cstate >= C_WF_REPORT_PARAMS)
conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);