LIST_HEAD(reclaimed);
struct drbd_epoch_entry *e, *t;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
reclaim_net_ee(mdev, &reclaimed);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(e, t, &reclaimed, w.list)
drbd_free_net_ee(mdev, e);
}
/* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
- * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
+ * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
* Either links the page chain back to the global pool,
* or returns all pages to the system. */
static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
int count = 0;
int is_net = list == &mdev->net_ee;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_splice_init(list, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(e, t, &work_list, w.list) {
drbd_free_some_ee(mdev, e, is_net);
struct drbd_epoch_entry *e, *t;
int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
reclaim_net_ee(mdev, &reclaimed);
list_splice_init(&mdev->done_ee, &work_list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
list_for_each_entry_safe(e, t, &reclaimed, w.list)
drbd_free_net_ee(mdev, e);
* and calling prepare_to_wait in the fast path */
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
io_schedule();
finish_wait(&mdev->ee_wait, &wait);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
}
void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
{
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, head);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
}
/* see also kernel_accept; which is only present since 2.6.18.
set_fs(KERNEL_DS);
for (;;) {
- rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
+ rv = sock_recvmsg(mdev->tconn->data.socket, &msg, size, msg.msg_flags);
if (rv == size)
break;
static int drbd_send_fp(struct drbd_conf *mdev,
struct socket *sock, enum drbd_packets cmd)
{
- struct p_header80 *h = &mdev->data.sbuf.header.h80;
+ struct p_header80 *h = &mdev->tconn->data.sbuf.header.h80;
return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
}
static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
{
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
+ struct p_header80 *h = &mdev->tconn->data.rbuf.header.h80;
int rr;
rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
struct socket *s, *sock, *msock;
int try, h, ok;
- D_ASSERT(!mdev->data.socket);
+ D_ASSERT(!mdev->tconn->data.socket);
if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
return -2;
if (signal_pending(current)) {
flush_signals(current);
smp_rmb();
- if (get_t_state(&mdev->receiver) == EXITING)
+ if (get_t_state(&mdev->tconn->receiver) == EXITING)
goto out_release_sockets;
}
drbd_tcp_nodelay(sock);
drbd_tcp_nodelay(msock);
- mdev->data.socket = sock;
- mdev->meta.socket = msock;
- mdev->last_received = jiffies;
+ mdev->tconn->data.socket = sock;
+ mdev->tconn->meta.socket = msock;
+ mdev->tconn->last_received = jiffies;
- D_ASSERT(mdev->asender.task == NULL);
+ D_ASSERT(mdev->tconn->asender.task == NULL);
h = drbd_do_handshake(mdev);
if (h <= 0)
atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0;
- drbd_thread_start(&mdev->asender);
+ drbd_thread_start(&mdev->tconn->asender);
if (drbd_send_protocol(mdev) == -1)
return -1;
static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
{
- union p_header *h = &mdev->data.rbuf.header;
+ union p_header *h = &mdev->tconn->data.rbuf.header;
int r;
r = drbd_recv(mdev, h, sizeof(*h));
be16_to_cpu(h->h80.length));
return false;
}
- mdev->last_received = jiffies;
+ mdev->tconn->last_received = jiffies;
return true;
}
static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
int rv;
- struct p_barrier *p = &mdev->data.rbuf.barrier;
+ struct p_barrier *p = &mdev->tconn->data.rbuf.barrier;
struct drbd_epoch *epoch;
inc_unacked(mdev);
void *dig_vv = mdev->int_dig_vv;
unsigned long *data;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
+ dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
if (dgs) {
void *dig_in = mdev->int_dig_in;
void *dig_vv = mdev->int_dig_vv;
- dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
+ dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
if (dgs) {
e->w.cb = e_end_resync_block;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_add(&e->w.list, &mdev->sync_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
atomic_add(data_size >> 9, &mdev->rs_sect_ev);
if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
drbd_free_ee(mdev, e);
fail:
struct drbd_request *req;
sector_t sector;
int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ struct p_data *p = &mdev->tconn->data.rbuf.data;
sector = be64_to_cpu(p->sector);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (unlikely(!req))
return false;
{
sector_t sector;
int ok;
- struct p_data *p = &mdev->data.rbuf.data;
+ struct p_data *p = &mdev->tconn->data.rbuf.data;
sector = be64_to_cpu(p->sector);
D_ASSERT(p->block_id == ID_SYNCER);
/* we delete from the conflict detection hash _after_ we sent out the
* P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
if (mdev->tconn->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
D_ASSERT(!drbd_interval_empty(&e->i));
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
} else
D_ASSERT(drbd_interval_empty(&e->i));
D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
D_ASSERT(!drbd_interval_empty(&e->i));
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dec_unacked(mdev);
{
sector_t sector;
struct drbd_epoch_entry *e;
- struct p_data *p = &mdev->data.rbuf.data;
+ struct p_data *p = &mdev->tconn->data.rbuf.data;
int rw = WRITE;
u32 dp_flags;
/* I'm the receiver, I do hold a net_cnt reference. */
if (!mdev->tconn->net_conf->two_primaries) {
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
} else {
/* don't get the req_lock yet,
* we may sleep in drbd_wait_peer_seq */
if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
goto out_interrupted;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
drbd_insert_interval(&mdev->epoch_entries, &e->i);
e->w.cb = e_send_discard_ack;
list_add_tail(&e->w.list, &mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* we could probably send that P_DISCARD_ACK ourselves,
* but I don't like the receiver using the msock */
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
finish_wait(&mdev->misc_wait, &wait);
goto out_interrupted;
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (first) {
first = 0;
dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
D_ASSERT(have_unacked == 0);
}
schedule();
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
}
finish_wait(&mdev->misc_wait, &wait);
}
list_add(&e->w.list, &mdev->active_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
switch (mdev->tconn->net_conf->wire_protocol) {
case DRBD_PROT_C:
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
drbd_remove_interval(&mdev->epoch_entries, &e->i);
drbd_clear_interval(&e->i);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (e->flags & EE_CALL_AL_COMPLETE_IO)
drbd_al_complete_io(mdev, e->i.sector);
struct digest_info *di = NULL;
int size, verb;
unsigned int fault_type;
- struct p_block_req *p = &mdev->data.rbuf.block_req;
+ struct p_block_req *p = &mdev->tconn->data.rbuf.block_req;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
goto out_free_e;
if (cmd == P_CSUM_RS_REQUEST) {
- D_ASSERT(mdev->agreed_pro_version >= 89);
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
e->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */
mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
case P_OV_REQUEST:
if (mdev->ov_start_sector == ~(sector_t)0 &&
- mdev->agreed_pro_version >= 90) {
+ mdev->tconn->agreed_pro_version >= 90) {
unsigned long now = jiffies;
int i;
mdev->ov_start_sector = sector;
submit:
inc_unacked(mdev);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_add_tail(&e->w.list, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
return true;
/* don't care for the reason here */
dev_err(DEV, "submit failed, triggering re-connect\n");
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
list_del(&e->w.list);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* no drbd_rs_complete_io(), we are dropping the connection anyways */
out_free_e:
if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
*rule_nr = 51;
peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of the peer's UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
*rule_nr = 71;
self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
if (self == peer) {
- if (mdev->agreed_pro_version < 96 ?
+ if (mdev->tconn->agreed_pro_version < 96 ?
(mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
(mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
/* The last P_SYNC_UUID did not get though. Undo the last start of
resync as sync source modifications of our UUIDs. */
- if (mdev->agreed_pro_version < 91)
+ if (mdev->tconn->agreed_pro_version < 91)
return -1091;
_drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- struct p_protocol *p = &mdev->data.rbuf.protocol;
+ struct p_protocol *p = &mdev->tconn->data.rbuf.protocol;
int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
int p_want_lose, p_two_primaries, cf;
char p_integrity_alg[SHARED_SECRET_MAX] = "";
goto disconnect;
}
- if (mdev->agreed_pro_version >= 87) {
+ if (mdev->tconn->agreed_pro_version >= 87) {
unsigned char *my_alg = mdev->tconn->net_conf->integrity_alg;
if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
{
int ok = true;
- struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
+ struct p_rs_param_95 *p = &mdev->tconn->data.rbuf.rs_param_95;
unsigned int header_size, data_size, exp_max_sz;
struct crypto_hash *verify_tfm = NULL;
struct crypto_hash *csums_tfm = NULL;
- const int apv = mdev->agreed_pro_version;
+ const int apv = mdev->tconn->agreed_pro_version;
int *rs_plan_s = NULL;
int fifo_size = 0;
static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- struct p_sizes *p = &mdev->data.rbuf.sizes;
+ struct p_sizes *p = &mdev->tconn->data.rbuf.sizes;
enum determine_dev_size dd = unchanged;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- struct p_uuids *p = &mdev->data.rbuf.uuids;
+ struct p_uuids *p = &mdev->tconn->data.rbuf.uuids;
u64 *p_uuid;
int i, updated_uuids = 0;
if (get_ldev(mdev)) {
int skip_initial_sync =
mdev->state.conn == C_CONNECTED &&
- mdev->agreed_pro_version >= 90 &&
+ mdev->tconn->agreed_pro_version >= 90 &&
mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
(p_uuid[UI_FLAGS] & 8);
if (skip_initial_sync) {
static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- struct p_req_state *p = &mdev->data.rbuf.req_state;
+ struct p_req_state *p = &mdev->tconn->data.rbuf.req_state;
union drbd_state mask, val;
enum drbd_state_rv rv;
static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- struct p_state *p = &mdev->data.rbuf.state;
+ struct p_state *p = &mdev->tconn->data.rbuf.state;
union drbd_state os, ns, peer_state;
enum drbd_disk_state real_peer_disk;
enum chg_state_flags cs_flags;
dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
retry:
os = ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* peer says his disk is uptodate, while we think it is inconsistent,
* and this happens while we think we have a sync going on. */
}
}
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
if (mdev->state.i != os.i)
goto retry;
clear_bit(CONSIDER_RESYNC, &mdev->flags);
test_bit(NEW_CUR_UUID, &mdev->flags)) {
/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
for temporal network outages! */
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
tl_clear(mdev);
drbd_uuid_new_current(mdev);
}
rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
ns = mdev->state;
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (rv < SS_SUCCESS) {
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
+ struct p_rs_uuid *p = &mdev->tconn->data.rbuf.rs_uuid;
wait_event(mdev->misc_wait,
mdev->state.conn == C_WF_SYNC_UUID ||
void *buffer;
int err;
int ok = false;
- struct p_header80 *h = &mdev->data.rbuf.header.h80;
+ struct p_header80 *h = &mdev->tconn->data.rbuf.header.h80;
drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
/* you are supposed to send additional out-of-sync information
{
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
- drbd_tcp_quickack(mdev->data.socket);
+ drbd_tcp_quickack(mdev->tconn->data.socket);
return true;
}
static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- struct p_block_desc *p = &mdev->data.rbuf.block_desc;
+ struct p_block_desc *p = &mdev->tconn->data.rbuf.block_desc;
switch (mdev->state.conn) {
case C_WF_SYNC_UUID:
};
/* All handler functions that expect a sub-header get that sub-heder in
- mdev->data.rbuf.header.head.payload.
+ mdev->tconn->data.rbuf.header.head.payload.
- Usually in mdev->data.rbuf.header.head the callback can find the usual
+ Usually in mdev->tconn->data.rbuf.header.head the callback can find the usual
p_header, but they may not rely on that. Since there is also p_header95 !
*/
static void drbdd(struct drbd_conf *mdev)
{
- union p_header *header = &mdev->data.rbuf.header;
+ union p_header *header = &mdev->tconn->data.rbuf.header;
unsigned int packet_size;
enum drbd_packets cmd;
size_t shs; /* sub header size */
int rv;
- while (get_t_state(&mdev->receiver) == RUNNING) {
+ while (get_t_state(&mdev->tconn->receiver) == RUNNING) {
drbd_thread_current_set_cpu(mdev);
if (!drbd_recv_header(mdev, &cmd, &packet_size))
goto err_out;
barr.w.cb = w_prev_work_done;
init_completion(&barr.done);
- drbd_queue_work(&mdev->data.work, &barr.w);
+ drbd_queue_work(&mdev->tconn->data.work, &barr.w);
wait_for_completion(&barr.done);
}
return;
/* asender does not clean up anything. it must not interfere, either */
- drbd_thread_stop(&mdev->asender);
+ drbd_thread_stop(&mdev->tconn->asender);
drbd_free_sock(mdev);
/* wait for current activity to cease. */
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* We do not have data structures that would allow us to
* get the rs_pending_cnt down to 0 again.
if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
drbd_try_outdate_peer_async(mdev);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
os = mdev->state;
if (os.conn >= C_UNCONNECTED) {
/* Do not restart in case we are C_DISCONNECTING */
ns.conn = C_UNCONNECTED;
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (os.conn == C_DISCONNECTING) {
wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0);
*/
static int drbd_send_handshake(struct drbd_conf *mdev)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.sbuf.handshake;
+ /* ASSERT current == mdev->tconn->receiver ... */
+ struct p_handshake *p = &mdev->tconn->data.sbuf.handshake;
int ok;
- if (mutex_lock_interruptible(&mdev->data.mutex)) {
+ if (mutex_lock_interruptible(&mdev->tconn->data.mutex)) {
dev_err(DEV, "interrupted during initial handshake\n");
return 0; /* interrupted. not ok. */
}
- if (mdev->data.socket == NULL) {
- mutex_unlock(&mdev->data.mutex);
+ if (mdev->tconn->data.socket == NULL) {
+ mutex_unlock(&mdev->tconn->data.mutex);
return 0;
}
memset(p, 0, sizeof(*p));
p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
- ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
+ ok = _drbd_send_cmd( mdev, mdev->tconn->data.socket, P_HAND_SHAKE,
(struct p_header80 *)p, sizeof(*p), 0 );
- mutex_unlock(&mdev->data.mutex);
+ mutex_unlock(&mdev->tconn->data.mutex);
return ok;
}
*/
static int drbd_do_handshake(struct drbd_conf *mdev)
{
- /* ASSERT current == mdev->receiver ... */
- struct p_handshake *p = &mdev->data.rbuf.handshake;
+ /* ASSERT current == mdev->tconn->receiver ... */
+ struct p_handshake *p = &mdev->tconn->data.rbuf.handshake;
const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
unsigned int length;
enum drbd_packets cmd;
PRO_VERSION_MIN > p->protocol_max)
goto incompat;
- mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
+ mdev->tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
dev_info(DEV, "Handshake successful: "
- "Agreed network protocol version %d\n", mdev->agreed_pro_version);
+ "Agreed network protocol version %d\n", mdev->tconn->agreed_pro_version);
return 1;
static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
{
/* restore idle timeout */
- mdev->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
+ mdev->tconn->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ;
if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
wake_up(&mdev->misc_wait);
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
- D_ASSERT(mdev->agreed_pro_version >= 89);
+ D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
update_peer_seq(mdev, be32_to_cpu(p->seq_num));
struct drbd_request *req;
struct bio_and_error m;
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
req = find_request(mdev, root, id, sector, missing_ok, func);
if (unlikely(!req)) {
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
return false;
}
__req_mod(req, what, &m);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
if (m.bio)
complete_master_bio(mdev, &m);
w = kmalloc(sizeof(*w), GFP_NOIO);
if (w) {
w->cb = w_ov_finished;
- drbd_queue_work_front(&mdev->data.work, w);
+ drbd_queue_work_front(&mdev->tconn->data.work, w);
} else {
dev_err(DEV, "kmalloc(w) failed.");
ov_oos_print(mdev);
int drbd_asender(struct drbd_thread *thi)
{
struct drbd_conf *mdev = thi->mdev;
- struct p_header80 *h = &mdev->meta.rbuf.header.h80;
+ struct p_header80 *h = &mdev->tconn->meta.rbuf.header.h80;
struct asender_cmd *cmd = NULL;
int rv, len;
dev_err(DEV, "drbd_send_ping has failed\n");
goto reconnect;
}
- mdev->meta.socket->sk->sk_rcvtimeo =
+ mdev->tconn->meta.socket->sk->sk_rcvtimeo =
mdev->tconn->net_conf->ping_timeo*HZ/10;
ping_timeout_active = 1;
}
* it may hurt latency if we cork without much to send */
if (!mdev->tconn->net_conf->no_cork &&
3 < atomic_read(&mdev->unacked_cnt))
- drbd_tcp_cork(mdev->meta.socket);
+ drbd_tcp_cork(mdev->tconn->meta.socket);
while (1) {
clear_bit(SIGNAL_ASENDER, &mdev->flags);
flush_signals(current);
goto reconnect;
/* to avoid race with newly queued ACKs */
set_bit(SIGNAL_ASENDER, &mdev->flags);
- spin_lock_irq(&mdev->req_lock);
+ spin_lock_irq(&mdev->tconn->req_lock);
empty = list_empty(&mdev->done_ee);
- spin_unlock_irq(&mdev->req_lock);
+ spin_unlock_irq(&mdev->tconn->req_lock);
/* new ack may have been queued right here,
* but then there is also a signal pending,
* and we start over... */
}
/* but unconditionally uncork unless disabled */
if (!mdev->tconn->net_conf->no_cork)
- drbd_tcp_uncork(mdev->meta.socket);
+ drbd_tcp_uncork(mdev->tconn->meta.socket);
/* short circuit, recv_msg would return EINTR anyways. */
if (signal_pending(current))
continue;
- rv = drbd_recv_short(mdev, mdev->meta.socket,
+ rv = drbd_recv_short(mdev, mdev->tconn->meta.socket,
buf, expect-received, 0);
clear_bit(SIGNAL_ASENDER, &mdev->flags);
} else if (rv == -EAGAIN) {
/* If the data socket received something meanwhile,
* that is good enough: peer is still alive. */
- if (time_after(mdev->last_received,
- jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ if (time_after(mdev->tconn->last_received,
+ jiffies - mdev->tconn->meta.socket->sk->sk_rcvtimeo))
continue;
if (ping_timeout_active) {
dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect;
}
if (received == expect) {
- mdev->last_received = jiffies;
+ mdev->tconn->last_received = jiffies;
D_ASSERT(cmd != NULL);
if (!cmd->process(mdev, h))
goto reconnect;