struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{
- return pmu->boxes[topology_logical_package_id(cpu)];
+ unsigned int pkgid = topology_logical_package_id(cpu);
+
+ /*
+ * The unsigned check also catches the '-1' return value for non
+ * existent mappings in the topology map.
+ */
+ return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
}
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
}
}
-static int uncore_cpu_dying(unsigned int cpu)
-{
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box;
- int i, pkg;
-
- pkg = topology_logical_package_id(cpu);
- for (; *types; types++) {
- type = *types;
- pmu = type->pmus;
- for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
- if (box && atomic_dec_return(&box->refcnt) == 0)
- uncore_box_exit(box);
- }
- }
- return 0;
-}
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box;
- int i, pkg;
-
- pkg = topology_logical_package_id(cpu);
- for (; *types; types++) {
- type = *types;
- pmu = type->pmus;
- for (i = 0; i < type->num_boxes; i++, pmu++) {
- box = pmu->boxes[pkg];
- if (!box)
- continue;
- /* The first cpu on a package activates the box */
- if (atomic_inc_return(&box->refcnt) == 1)
- uncore_box_init(box);
- }
- }
-
- return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
- struct intel_uncore_type *type, **types = uncore_msr_uncores;
- struct intel_uncore_pmu *pmu;
- struct intel_uncore_box *box;
- int i, pkg;
-
- pkg = topology_logical_package_id(cpu);
- for (; *types; types++) {
- type = *types;
- pmu = type->pmus;
- for (i = 0; i < type->num_boxes; i++, pmu++) {
- if (pmu->boxes[pkg])
- continue;
- /* First cpu of a package allocates the box */
- box = uncore_alloc_box(type, cpu_to_node(cpu));
- if (!box)
- return -ENOMEM;
- box->pmu = pmu;
- box->pkgid = pkg;
- pmu->boxes[pkg] = box;
- }
- }
- return 0;
-}
-
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
int new_cpu)
{
static int uncore_event_cpu_offline(unsigned int cpu)
{
- int target;
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, pkg, target;
/* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
- return 0;
-
+ goto unref;
/* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+ /* Clear the references */
+ pkg = topology_logical_package_id(cpu);
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (box && atomic_dec_return(&box->refcnt) == 0)
+ uncore_box_exit(box);
+ }
+ }
return 0;
}
+static int allocate_boxes(struct intel_uncore_type **types,
+ unsigned int pkg, unsigned int cpu)
+{
+ struct intel_uncore_box *box, *tmp;
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ LIST_HEAD(allocated);
+ int i;
+
+ /* Try to allocate all required boxes */
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ if (pmu->boxes[pkg])
+ continue;
+ box = uncore_alloc_box(type, cpu_to_node(cpu));
+ if (!box)
+ goto cleanup;
+ box->pmu = pmu;
+ box->pkgid = pkg;
+ list_add(&box->active_list, &allocated);
+ }
+ }
+ /* Install them in the pmus */
+ list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+ list_del_init(&box->active_list);
+ box->pmu->boxes[pkg] = box;
+ }
+ return 0;
+
+cleanup:
+ list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+ list_del_init(&box->active_list);
+ kfree(box);
+ }
+ return -ENOMEM;
+}
+
static int uncore_event_cpu_online(unsigned int cpu)
{
- int target;
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, ret, pkg, target;
+
+ pkg = topology_logical_package_id(cpu);
+ ret = allocate_boxes(types, pkg, cpu);
+ if (ret)
+ return ret;
+
+ for (; *types; types++) {
+ type = *types;
+ pmu = type->pmus;
+ for (i = 0; i < type->num_boxes; i++, pmu++) {
+ box = pmu->boxes[pkg];
+ if (!box && atomic_inc_return(&box->refcnt) == 1)
+ uncore_box_init(box);
+ }
+ }
/*
* Check if there is an online cpu in the package
if (cret && pret)
return -ENODEV;
- /*
- * Install callbacks. Core will call them for each online cpu.
- *
- * The first online cpu of each package allocates and takes
- * the refcounts for all other online cpus in that package.
- * If msrs are not enabled no allocation is required and
- * uncore_cpu_prepare() is not called for each online cpu.
- */
- if (!cret) {
- ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
- "perf/x86/intel/uncore:prepare",
- uncore_cpu_prepare, NULL);
- if (ret)
- goto err;
- } else {
- cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
- "perf/x86/intel/uncore:prepare",
- uncore_cpu_prepare, NULL);
- }
-
- cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
- "perf/x86/uncore:starting",
- uncore_cpu_starting, uncore_cpu_dying);
-
- cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
- "perf/x86/uncore:online",
- uncore_event_cpu_online, uncore_event_cpu_offline);
+ /* Install hotplug callbacks to setup the targets for each package */
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+ "perf/x86/intel/uncore:online",
+ uncore_event_cpu_online,
+ uncore_event_cpu_offline);
+ if (ret)
+ goto err;
return 0;
err:
static void __exit intel_uncore_exit(void)
{
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
- cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit();
}