* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
{
- struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
+ struct kern_ipc_perm *ipcp;
+ rcu_read_lock();
+ ipcp = ipc_obtain_object(&sem_ids(ns), id);
if (IS_ERR(ipcp))
- return (struct sem_array *)ipcp;
+ goto err1;
+
+ spin_lock(&ipcp->lock);
+
+ /* ipc_rmid() may have already freed the ID while sem_lock
+ * was spinning: verify that the structure is still valid
+ */
+ if (ipcp->deleted)
+ goto err0;
return container_of(ipcp, struct sem_array, sem_perm);
+err0:
+ spin_unlock(&ipcp->lock);
+err1:
+ rcu_read_unlock();
+ return ERR_PTR(-EINVAL);
}
static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
goto out_free;
}
- sma = sem_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.