/* If the start for which we're re-enabling locking (seq) has been superseded
- by a newer stop (ls_recover_seq), we need to leave locking disabled. */
+ by a newer stop (ls_recover_seq), we need to leave locking disabled.
+
+ We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
+ locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
+ enables locking and clears the requestqueue between a and b. */
static int enable_locking(struct dlm_ls *ls, uint64_t seq)
{
int error = -EINTR;
+ down_write(&ls->ls_recv_active);
+
spin_lock(&ls->ls_recover_lock);
if (ls->ls_recover_seq == seq) {
set_bit(LSFL_RUNNING, &ls->ls_flags);
+ /* unblocks processes waiting to enter the dlm */
up_write(&ls->ls_in_recovery);
error = 0;
}
spin_unlock(&ls->ls_recover_lock);
+
+ up_write(&ls->ls_recv_active);
return error;
}
dlm_astd_resume();
/*
- * This list of root rsb's will be the basis of most of the recovery
- * routines.
+ * Free non-master tossed rsb's. Master rsb's are kept on toss
+ * list and put on root list to be included in resdir recovery.
*/
- dlm_create_root_list(ls);
+ dlm_clear_toss_list(ls);
/*
- * Free all the tossed rsb's so we don't have to recover them.
+ * This list of root rsb's will be the basis of most of the recovery
+ * routines.
*/
- dlm_clear_toss_list(ls);
+ dlm_create_root_list(ls);
/*
* Add or remove nodes from the lockspace's ls_nodes list.
if (rv) {
ls_recover(ls, rv);
kfree(rv->nodeids);
+ kfree(rv->new);
kfree(rv);
}
}