struct interval_node_extent *e2)
{
int rc;
+
if (e1->start == e2->start) {
if (e1->end < e2->end)
rc = -1;
/* Parent is RED, so gparent must not be NULL */
if (node_is_left_child(parent)) {
struct interval_node *uncle;
+
uncle = gparent->in_right;
if (uncle && node_is_red(uncle)) {
uncle->in_color = INTERVAL_BLACK;
__rotate_right(gparent, root);
} else {
struct interval_node *uncle;
+
uncle = gparent->in_left;
if (uncle && node_is_red(uncle)) {
uncle->in_color = INTERVAL_BLACK;
} else {
if (node_is_black_or_0(tmp->in_right)) {
struct interval_node *o_left;
+
o_left = tmp->in_left;
if (o_left)
o_left->in_color = INTERVAL_BLACK;
} else {
if (node_is_black_or_0(tmp->in_left)) {
struct interval_node *o_right;
+
o_right = tmp->in_right;
if (o_right)
o_right->in_color = INTERVAL_BLACK;
root = &res->lr_itree[idx].lit_root;
found = interval_insert(&node->li_node, root);
if (found) { /* The policy group found. */
- struct ldlm_interval *tmp = ldlm_interval_detach(lock);
+ struct ldlm_interval *tmp;
+
+ tmp = ldlm_interval_detach(lock);
LASSERT(tmp != NULL);
ldlm_interval_free(tmp);
ldlm_interval_attach(to_ldlm_interval(found), lock);
}
} else {
int reprocess_failed = 0;
+
lockmode_verify(mode);
/* This loop determines if there are existing locks
ldlm_interval_extent(struct ldlm_interval *node)
{
struct ldlm_lock *lock;
+
LASSERT(!list_empty(&node->li_group));
lock = list_entry(node->li_group.next, struct ldlm_lock,
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
{
int rc = 0;
+
if (!list_empty(&lock->l_lru)) {
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
+
LASSERTF(lock != NULL, "Non-existing lock: %#llx\n", lockh->cookie);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
+
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
LDLM_FL_WAIT_NOREPROC,
nid = libcfs_nid2str(exp->exp_connection->c_peer.nid);
} else if (exp && exp->exp_obd != NULL) {
struct obd_import *imp = exp->exp_obd->u.cli.cl_import;
+
nid = libcfs_nid2str(imp->imp_connection->c_peer.nid);
}
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
int to = cfs_time_seconds(1);
+
while (to > 0) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
mutex_lock(&ldlm_ref_mutex);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
+
if (rc)
CERROR("ldlm_cleanup failed: %d\n", rc);
else
int ldlm_init_export(struct obd_export *exp)
{
int rc;
+
exp->exp_lock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
spin_unlock(&pl->pl_lock);
__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
{
__u64 slv;
+
spin_lock(&pl->pl_lock);
slv = pl->pl_client_lock_volume;
spin_unlock(&pl->pl_lock);
int ldlm_get_enq_timeout(struct ldlm_lock *lock)
{
int timeout = at_get(ldlm_lock_to_ns_at(lock));
+
if (AT_OFF)
return obd_timeout / 2;
/* Since these are non-updating timeouts, we should be conservative.
* again. */
if ((*flags) & LDLM_FL_LOCK_CHANGED) {
int newmode = reply->lock_desc.l_req_mode;
+
LASSERT(!is_replay);
if (newmode && newmode != lock->l_req_mode) {
LDLM_DEBUG(lock, "server returned different mode %s",
rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
if (lock->l_completion_ast != NULL) {
int err = lock->l_completion_ast(lock, *flags, NULL);
+
if (!rc)
rc = err;
if (rc)
int off)
{
int size = req_capsule_msg_size(pill, loc);
+
return ldlm_req_handles_avail(size, off);
}
enum req_location loc, int off)
{
int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
+
return ldlm_req_handles_avail(size, off);
}
{
ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
+
lock_res_and_lock(lock);
/* don't check added & count since we want to process all locks
int flags)
{
int added;
+
added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
if (added <= 0)
return added;
static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
{
struct iter_helper_data *helper = closure;
+
return helper->iter(lock, helper->closure);
}
if (atomic_read(&ns->ns_bref) > 0) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
int rc;
+
CDEBUG(D_DLMTRACE,
"dlm namespace %s free waiting on refcount %d\n",
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
+
ns = list_entry(tmp, struct ldlm_namespace, ns_list_chain);
ldlm_namespace_dump(level, ns);
}