static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
-static DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
+DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
copy_kprobe(old_p, p);
ret = add_new_kprobe(old_p, p);
} else {
- ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
+ ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
if (!ap)
return -ENOMEM;
add_aggr_kprobe(ap, old_p);
return 0;
}
-int __kprobes register_kprobe(struct kprobe *p)
+static int __kprobes __register_kprobe(struct kprobe *p,
+ unsigned long called_from)
{
int ret = 0;
struct kprobe *old_p;
- struct module *mod;
+ struct module *probed_mod;
if ((!kernel_text_address((unsigned long) p->addr)) ||
in_kprobes_functions((unsigned long) p->addr))
return -EINVAL;
- if ((mod = module_text_address((unsigned long) p->addr)) &&
- (unlikely(!try_module_get(mod))))
- return -EINVAL;
+ p->mod_refcounted = 0;
+ /* Check are we probing a module */
+ if ((probed_mod = module_text_address((unsigned long) p->addr))) {
+ struct module *calling_mod = module_text_address(called_from);
+ /* We must allow modules to probe themself and
+ * in this case avoid incrementing the module refcount,
+ * so as to allow unloading of self probing modules.
+ */
+ if (calling_mod && (calling_mod != probed_mod)) {
+ if (unlikely(!try_module_get(probed_mod)))
+ return -EINVAL;
+ p->mod_refcounted = 1;
+ } else
+ probed_mod = NULL;
+ }
p->nmissed = 0;
down(&kprobe_mutex);
out:
up(&kprobe_mutex);
- if (ret && mod)
- module_put(mod);
+ if (ret && probed_mod)
+ module_put(probed_mod);
return ret;
}
+int __kprobes register_kprobe(struct kprobe *p)
+{
+ return __register_kprobe(p,
+ (unsigned long)__builtin_return_address(0));
+}
+
void __kprobes unregister_kprobe(struct kprobe *p)
{
struct module *mod;
- struct kprobe *old_p, *cleanup_p;
+ struct kprobe *old_p, *list_p;
+ int cleanup_p;
down(&kprobe_mutex);
old_p = get_kprobe(p->addr);
up(&kprobe_mutex);
return;
}
-
- if ((old_p->pre_handler == aggr_pre_handler) &&
+ if (p != old_p) {
+ list_for_each_entry_rcu(list_p, &old_p->list, list)
+ if (list_p == p)
+ /* kprobe p is a valid probe */
+ goto valid_p;
+ up(&kprobe_mutex);
+ return;
+ }
+valid_p:
+ if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
(p->list.next == &old_p->list) &&
- (p->list.prev == &old_p->list)) {
- /* Only one element in the aggregate list */
+ (p->list.prev == &old_p->list))) {
+ /* Only probe on the hash list */
arch_disarm_kprobe(p);
hlist_del_rcu(&old_p->hlist);
- cleanup_p = old_p;
- } else if (old_p == p) {
- /* Only one kprobe element in the hash list */
- arch_disarm_kprobe(p);
- hlist_del_rcu(&p->hlist);
- cleanup_p = p;
+ cleanup_p = 1;
} else {
list_del_rcu(&p->list);
- cleanup_p = NULL;
+ cleanup_p = 0;
}
up(&kprobe_mutex);
synchronize_sched();
- if ((mod = module_text_address((unsigned long)p->addr)))
+ if (p->mod_refcounted &&
+ (mod = module_text_address((unsigned long)p->addr)))
module_put(mod);
if (cleanup_p) {
- if (cleanup_p->pre_handler == aggr_pre_handler) {
+ if (p != old_p) {
list_del_rcu(&p->list);
kfree(old_p);
}
- down(&kprobe_mutex);
arch_remove_kprobe(p);
- up(&kprobe_mutex);
}
}
jp->kp.pre_handler = setjmp_pre_handler;
jp->kp.break_handler = longjmp_break_handler;
- return register_kprobe(&jp->kp);
+ return __register_kprobe(&jp->kp,
+ (unsigned long)__builtin_return_address(0));
}
void __kprobes unregister_jprobe(struct jprobe *jp)
rp->nmissed = 0;
/* Establish function entry probe point */
- if ((ret = register_kprobe(&rp->kp)) != 0)
+ if ((ret = __register_kprobe(&rp->kp,
+ (unsigned long)__builtin_return_address(0))) != 0)
free_rp_inst(rp);
return ret;
}