]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/hw_breakpoint.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[karo-tx-linux.git] / kernel / hw_breakpoint.c
index dbcbf6a33a08d4f3a0d9b50469fa15162d81787b..8a5c7d55ac9fab9bdbd97bb7701d92ed2b3fa310 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/percpu.h>
 #include <linux/sched.h>
 #include <linux/init.h>
+#include <linux/cpu.h>
 #include <linux/smp.h>
 
 #include <linux/hw_breakpoint.h>
@@ -242,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable)
  *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
  *            + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
  */
-int reserve_bp_slot(struct perf_event *bp)
+static int __reserve_bp_slot(struct perf_event *bp)
 {
        struct bp_busy_slots slots = {0};
-       int ret = 0;
-
-       mutex_lock(&nr_bp_mutex);
 
        fetch_bp_busy_slots(&slots, bp);
 
        /* Flexible counters need to keep at least one slot */
-       if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
-               ret = -ENOSPC;
-               goto end;
-       }
+       if (slots.pinned + (!!slots.flexible) == HBP_NUM)
+               return -ENOSPC;
 
        toggle_bp_slot(bp, true);
 
-end:
+       return 0;
+}
+
+int reserve_bp_slot(struct perf_event *bp)
+{
+       int ret;
+
+       mutex_lock(&nr_bp_mutex);
+
+       ret = __reserve_bp_slot(bp);
+
        mutex_unlock(&nr_bp_mutex);
 
        return ret;
 }
 
+static void __release_bp_slot(struct perf_event *bp)
+{
+       toggle_bp_slot(bp, false);
+}
+
 void release_bp_slot(struct perf_event *bp)
 {
        mutex_lock(&nr_bp_mutex);
 
-       toggle_bp_slot(bp, false);
+       __release_bp_slot(bp);
 
        mutex_unlock(&nr_bp_mutex);
 }
 
+/*
+ * Allow the kernel debugger to reserve breakpoint slots without
+ * taking a lock using the dbg_* variant of for the reserve and
+ * release breakpoint slots.
+ */
+int dbg_reserve_bp_slot(struct perf_event *bp)
+{
+       if (mutex_is_locked(&nr_bp_mutex))
+               return -1;
+
+       return __reserve_bp_slot(bp);
+}
+
+int dbg_release_bp_slot(struct perf_event *bp)
+{
+       if (mutex_is_locked(&nr_bp_mutex))
+               return -1;
+
+       __release_bp_slot(bp);
+
+       return 0;
+}
 
 int register_perf_hw_breakpoint(struct perf_event *bp)
 {
@@ -295,6 +328,10 @@ int register_perf_hw_breakpoint(struct perf_event *bp)
        if (!bp->attr.disabled || !bp->overflow_handler)
                ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
 
+       /* if arch_validate_hwbkpt_settings() fails then release bp slot */
+       if (ret)
+               release_bp_slot(bp);
+
        return ret;
 }
 
@@ -388,7 +425,8 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
        if (!cpu_events)
                return ERR_PTR(-ENOMEM);
 
-       for_each_possible_cpu(cpu) {
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
                pevent = per_cpu_ptr(cpu_events, cpu);
                bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
 
@@ -399,18 +437,20 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
                        goto fail;
                }
        }
+       put_online_cpus();
 
        return cpu_events;
 
 fail:
-       for_each_possible_cpu(cpu) {
+       for_each_online_cpu(cpu) {
                pevent = per_cpu_ptr(cpu_events, cpu);
                if (IS_ERR(*pevent))
                        break;
                unregister_hw_breakpoint(*pevent);
        }
+       put_online_cpus();
+
        free_percpu(cpu_events);
-       /* return the error if any */
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);