]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 May 2017 20:34:34 +0000 (13:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 May 2017 20:34:34 +0000 (13:34 -0700)
Pull arm64 fixes/cleanups from Catalin Marinas:

 - Avoid taking a mutex in the secondary CPU bring-up path when
   interrupts are disabled

 - Ignore perf exclude_hv when the kernel is running in Hyp mode

 - Remove redundant instruction in cmpxchg

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64/cpufeature: don't use mutex in bringup path
  arm64: perf: Ignore exclude_hv when kernel is running in HYP
  arm64: Remove redundant mov from LL/SC cmpxchg

arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/perf_event.c

index f819fdcff1accf694cc0ec7428096fe41aef7f89..f5a2d09afb3841bd5ac7d40764ef48b2e108de0d 100644 (file)
@@ -264,7 +264,6 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,            \
        "       st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"     \
        "       cbnz    %w[tmp], 1b\n"                                  \
        "       " #mb "\n"                                              \
-       "       mov     %" #w "[oldval], %" #w "[old]\n"                \
        "2:"                                                            \
        : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
          [v] "+Q" (*(unsigned long *)ptr)                              \
index e7f84a7b44658d2f93d642e06e013300693d6a82..428ee1f2468c55959d52cf1cffde922399387b9f 100644 (file)
@@ -115,6 +115,7 @@ struct arm64_cpu_capabilities {
 
 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
+extern struct static_key_false arm64_const_caps_ready;
 
 bool this_cpu_has_cap(unsigned int cap);
 
@@ -124,7 +125,7 @@ static inline bool cpu_have_feature(unsigned int num)
 }
 
 /* System capability check for constant caps */
-static inline bool cpus_have_const_cap(int num)
+static inline bool __cpus_have_const_cap(int num)
 {
        if (num >= ARM64_NCAPS)
                return false;
@@ -138,6 +139,14 @@ static inline bool cpus_have_cap(unsigned int num)
        return test_bit(num, cpu_hwcaps);
 }
 
+static inline bool cpus_have_const_cap(int num)
+{
+       if (static_branch_likely(&arm64_const_caps_ready))
+               return __cpus_have_const_cap(num);
+       else
+               return cpus_have_cap(num);
+}
+
 static inline void cpus_set_cap(unsigned int num)
 {
        if (num >= ARM64_NCAPS) {
@@ -145,7 +154,6 @@ static inline void cpus_set_cap(unsigned int num)
                        num, ARM64_NCAPS);
        } else {
                __set_bit(num, cpu_hwcaps);
-               static_branch_enable(&cpu_hwcap_keys[num]);
        }
 }
 
index 5e19165c5fa8b86002f0308812a0c49199a91b44..1f252a95bc02979cc8c6f01425e26e44f34c1990 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/types.h>
 #include <linux/kvm_types.h>
+#include <asm/cpufeature.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
@@ -355,9 +356,12 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
                                       unsigned long vector_ptr)
 {
        /*
-        * Call initialization code, and switch to the full blown
-        * HYP code.
+        * Call initialization code, and switch to the full blown HYP code.
+        * If the cpucaps haven't been finalized yet, something has gone very
+        * wrong, and hyp will crash and burn when it uses any
+        * cpus_have_const_cap() wrapper.
         */
+       BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
        __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
 }
 
index 94b8f7fc33100f85db825a372bf36d6f597934ac..817ce3365e200dfb5f7ff205dadabe72c3490e43 100644 (file)
@@ -985,8 +985,16 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
  */
 void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
 {
-       for (; caps->matches; caps++)
-               if (caps->enable && cpus_have_cap(caps->capability))
+       for (; caps->matches; caps++) {
+               unsigned int num = caps->capability;
+
+               if (!cpus_have_cap(num))
+                       continue;
+
+               /* Ensure cpus_have_const_cap(num) works */
+               static_branch_enable(&cpu_hwcap_keys[num]);
+
+               if (caps->enable) {
                        /*
                         * Use stop_machine() as it schedules the work allowing
                         * us to modify PSTATE, instead of on_each_cpu() which
@@ -994,6 +1002,8 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
                         * we return.
                         */
                        stop_machine(caps->enable, NULL, cpu_online_mask);
+               }
+       }
 }
 
 /*
@@ -1096,6 +1106,14 @@ static void __init setup_feature_capabilities(void)
        enable_cpu_capabilities(arm64_features);
 }
 
+DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
+EXPORT_SYMBOL(arm64_const_caps_ready);
+
+static void __init mark_const_caps_ready(void)
+{
+       static_branch_enable(&arm64_const_caps_ready);
+}
+
 /*
  * Check if the current CPU has a given feature capability.
  * Should be called from non-preemptible context.
@@ -1131,6 +1149,7 @@ void __init setup_cpu_features(void)
        /* Set the CPU feature capabilies */
        setup_feature_capabilities();
        enable_errata_workarounds();
+       mark_const_caps_ready();
        setup_elf_hwcaps(arm64_elf_hwcaps);
 
        if (system_supports_32bit_el0())
index bcc79471b38e24cff24b5702a182752ef26583cc..83a1b1ad189f51536af8c4c8dd8c509cb70f9a95 100644 (file)
@@ -877,15 +877,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
 
        if (attr->exclude_idle)
                return -EPERM;
-       if (is_kernel_in_hyp_mode() &&
-           attr->exclude_kernel != attr->exclude_hv)
-               return -EINVAL;
+
+       /*
+        * If we're running in hyp mode, then we *are* the hypervisor.
+        * Therefore we ignore exclude_hv in this configuration, since
+        * there's no hypervisor to sample anyway. This is consistent
+        * with other architectures (x86 and Power).
+        */
+       if (is_kernel_in_hyp_mode()) {
+               if (!attr->exclude_kernel)
+                       config_base |= ARMV8_PMU_INCLUDE_EL2;
+       } else {
+               if (attr->exclude_kernel)
+                       config_base |= ARMV8_PMU_EXCLUDE_EL1;
+               if (!attr->exclude_hv)
+                       config_base |= ARMV8_PMU_INCLUDE_EL2;
+       }
        if (attr->exclude_user)
                config_base |= ARMV8_PMU_EXCLUDE_EL0;
-       if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
-               config_base |= ARMV8_PMU_EXCLUDE_EL1;
-       if (!attr->exclude_hv)
-               config_base |= ARMV8_PMU_INCLUDE_EL2;
 
        /*
         * Install the filter into config_base as this is used to