]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Jul 2017 20:29:36 +0000 (13:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Jul 2017 20:29:36 +0000 (13:29 -0700)
Pull arm64 fixes from Will Deacon:
 "I'd been collecting these whilst we debugged a CPU hotplug failure,
  but we ended up diagnosing that one to tglx, who has taken a fix via
  the -tip tree separately.

  We're seeing some NFS issues that we haven't gotten to the bottom of
  yet, and we've uncovered some issues with our backtracing too so there
  might be another fixes pull before we're done.

  Summary:

   - Ensure we have a guard page after the kernel image in vmalloc

   - Fix incorrect prefetch stride in copy_page

   - Ensure irqs are disabled in die()

   - Fix for event group validation in QCOM L2 PMU driver

   - Fix requesting of PMU IRQs on AMD Seattle

   - Minor cleanups and fixes"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: mmu: Place guard page after mapping of kernel image
  drivers/perf: arm_pmu: Request PMU SPIs with IRQF_PER_CPU
  arm64: sysreg: Fix unprotected macro argmuent in write_sysreg
  perf: qcom_l2: fix column exclusion check
  arm64/lib: copy_page: use consistent prefetch stride
  arm64/numa: Drop duplicate message
  perf: Convert to using %pOF instead of full_name
  arm64: Convert to using %pOF instead of full_name
  arm64: traps: disable irq in die()
  arm64: atomics: Remove '&' from '+&' asm constraint in lse atomics
  arm64: uaccess: Remove redundant __force from addr cast in __range_ok

15 files changed:
arch/arm/mach-ux500/cpu-db8500.c
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/topology.c
arch/arm64/kernel/traps.c
arch/arm64/lib/copy_page.S
arch/arm64/mm/mmu.c
arch/arm64/mm/numa.c
drivers/perf/arm_pmu.c
drivers/perf/arm_pmu_platform.c
drivers/perf/qcom_l2_pmu.c
include/linux/perf/arm_pmu.h

index 28083ef728195816b440b4076d0f1d4a69aa3092..71a34e8c345a5b19b98f42cc368f796118d3214f 100644 (file)
@@ -133,6 +133,7 @@ static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
 
 static struct arm_pmu_platdata db8500_pmu_platdata = {
        .handle_irq             = db8500_pmu_handler,
+       .irq_flags              = IRQF_NOBALANCING | IRQF_NO_THREAD,
 };
 
 static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
index 99fa69c9c3cf3ebf080c7443fb16ac6c11cf234a..9ef0797380cbbdf182a86e934c2eec5aa97d889d 100644 (file)
@@ -435,7 +435,7 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
        "       sub     x30, x30, %[ret]\n"
        "       cbnz    x30, 1b\n"
        "2:")
-       : [ret] "+&r" (x0), [v] "+Q" (v->counter)
+       : [ret] "+r" (x0), [v] "+Q" (v->counter)
        :
        : __LL_SC_CLOBBERS, "cc", "memory");
 
index 16e44fa9b3b61b0bec9cc8ed92874310cd094176..248339e4aaf5a7a0b6bc09bc2184654bbda72845 100644 (file)
@@ -492,7 +492,7 @@ asm(
  * the "%x0" template means XZR.
  */
 #define write_sysreg(v, r) do {                                        \
-       u64 __val = (u64)v;                                     \
+       u64 __val = (u64)(v);                                   \
        asm volatile("msr " __stringify(r) ", %x0"              \
                     : : "rZ" (__val));                         \
 } while (0)
@@ -508,7 +508,7 @@ asm(
 })
 
 #define write_sysreg_s(v, r) do {                                      \
-       u64 __val = (u64)v;                                             \
+       u64 __val = (u64)(v);                                           \
        asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
 } while (0)
 
index 8f0a1de11e4a6cd11fc7af0bd6db587538fab7be..fab46a0ea223d74781464a2a3904a4d2eac0a423 100644 (file)
@@ -69,7 +69,7 @@ static inline void set_fs(mm_segment_t fs)
  */
 #define __range_ok(addr, size)                                         \
 ({                                                                     \
-       unsigned long __addr = (unsigned long __force)(addr);           \
+       unsigned long __addr = (unsigned long)(addr);                   \
        unsigned long flag, roksum;                                     \
        __chk_user_ptr(addr);                                           \
        asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"         \
index e137ceaf5016b2d796e6703058d6f62209955059..d16978213c5b332b439205f7c582e1c9a2f65e4d 100644 (file)
@@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu)
                         * Don't warn spuriously.
                         */
                        if (cpu != 0)
-                               pr_err("%s: missing enable-method property\n",
-                                       dn->full_name);
+                               pr_err("%pOF: missing enable-method property\n",
+                                       dn);
                }
        } else {
                enable_method = acpi_get_enable_method(cpu);
index 321119881abfedf93b4df127cb4f8bc74764e20d..dc66e6ec3a99dfe9e91d83f1cde2cec8ff225863 100644 (file)
@@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
         */
        cell = of_get_property(dn, "reg", NULL);
        if (!cell) {
-               pr_err("%s: missing reg property\n", dn->full_name);
+               pr_err("%pOF: missing reg property\n", dn);
                return INVALID_HWID;
        }
 
@@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
         * Non affinity bits must be set to 0 in the DT
         */
        if (hwid & ~MPIDR_HWID_BITMASK) {
-               pr_err("%s: invalid reg property\n", dn->full_name);
+               pr_err("%pOF: invalid reg property\n", dn);
                return INVALID_HWID;
        }
        return hwid;
@@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void)
                        goto next;
 
                if (is_mpidr_duplicate(cpu_count, hwid)) {
-                       pr_err("%s: duplicate cpu reg properties in the DT\n",
-                               dn->full_name);
+                       pr_err("%pOF: duplicate cpu reg properties in the DT\n",
+                               dn);
                        goto next;
                }
 
@@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void)
                 */
                if (hwid == cpu_logical_map(0)) {
                        if (bootcpu_valid) {
-                               pr_err("%s: duplicate boot cpu reg property in DT\n",
-                                       dn->full_name);
+                               pr_err("%pOF: duplicate boot cpu reg property in DT\n",
+                                       dn);
                                goto next;
                        }
 
index 79244c75eaec4fb6f13d3b19a9fd1b8e20346108..8d48b233e6ce5d09cd84db6a21a52f4d0dd68e97 100644 (file)
@@ -45,7 +45,7 @@ static int __init get_cpu_for_node(struct device_node *node)
                }
        }
 
-       pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+       pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
 
        of_node_put(cpu_node);
        return -1;
@@ -71,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id,
                                cpu_topology[cpu].core_id = core_id;
                                cpu_topology[cpu].thread_id = i;
                        } else {
-                               pr_err("%s: Can't get CPU for thread\n",
-                                      t->full_name);
+                               pr_err("%pOF: Can't get CPU for thread\n",
+                                      t);
                                of_node_put(t);
                                return -EINVAL;
                        }
@@ -84,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id,
        cpu = get_cpu_for_node(core);
        if (cpu >= 0) {
                if (!leaf) {
-                       pr_err("%s: Core has both threads and CPU\n",
-                              core->full_name);
+                       pr_err("%pOF: Core has both threads and CPU\n",
+                              core);
                        return -EINVAL;
                }
 
                cpu_topology[cpu].cluster_id = cluster_id;
                cpu_topology[cpu].core_id = core_id;
        } else if (leaf) {
-               pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+               pr_err("%pOF: Can't get CPU for leaf core\n", core);
                return -EINVAL;
        }
 
@@ -137,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
                        has_cores = true;
 
                        if (depth == 0) {
-                               pr_err("%s: cpu-map children should be clusters\n",
-                                      c->full_name);
+                               pr_err("%pOF: cpu-map children should be clusters\n",
+                                      c);
                                of_node_put(c);
                                return -EINVAL;
                        }
@@ -146,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
                        if (leaf) {
                                ret = parse_core(c, cluster_id, core_id++);
                        } else {
-                               pr_err("%s: Non-leaf cluster with core %s\n",
-                                      cluster->full_name, name);
+                               pr_err("%pOF: Non-leaf cluster with core %s\n",
+                                      cluster, name);
                                ret = -EINVAL;
                        }
 
@@ -159,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
        } while (c);
 
        if (leaf && !has_cores)
-               pr_warn("%s: empty cluster\n", cluster->full_name);
+               pr_warn("%pOF: empty cluster\n", cluster);
 
        if (leaf)
                cluster_id++;
index c7c7088097be0c1b8e80d594c3d02f715c03abfa..d48f470802136e17dfc7ec83be568b8e913e6cf4 100644 (file)
@@ -274,10 +274,12 @@ static DEFINE_RAW_SPINLOCK(die_lock);
 void die(const char *str, struct pt_regs *regs, int err)
 {
        int ret;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&die_lock, flags);
 
        oops_enter();
 
-       raw_spin_lock_irq(&die_lock);
        console_verbose();
        bust_spinlocks(1);
        ret = __die(str, err, regs);
@@ -287,13 +289,15 @@ void die(const char *str, struct pt_regs *regs, int err)
 
        bust_spinlocks(0);
        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
-       raw_spin_unlock_irq(&die_lock);
        oops_exit();
 
        if (in_interrupt())
                panic("Fatal exception in interrupt");
        if (panic_on_oops)
                panic("Fatal exception");
+
+       raw_spin_unlock_irqrestore(&die_lock, flags);
+
        if (ret != NOTIFY_STOP)
                do_exit(SIGSEGV);
 }
index c3cd65e3181414bcaf3ec5b50937f95bf895570b..076c43715e64afe7fcc12eabd753d355ca94558f 100644 (file)
  */
 ENTRY(copy_page)
 alternative_if ARM64_HAS_NO_HW_PREFETCH
-       # Prefetch two cache lines ahead.
-       prfm    pldl1strm, [x1, #128]
-       prfm    pldl1strm, [x1, #256]
+       // Prefetch three cache lines ahead.
+       prfm    pldl1strm, [x1, #128]
+       prfm    pldl1strm, [x1, #256]
+       prfm    pldl1strm, [x1, #384]
 alternative_else_nop_endif
 
        ldp     x2, x3, [x1]
@@ -50,7 +51,7 @@ alternative_else_nop_endif
        subs    x18, x18, #128
 
 alternative_if ARM64_HAS_NO_HW_PREFETCH
-       prfm    pldl1strm, [x1, #384]
+       prfm    pldl1strm, [x1, #384]
 alternative_else_nop_endif
 
        stnp    x2, x3, [x0]
index 23c2d89a362e4423f62c732b290015d23211f38e..f1eb15e0e8642d2a74c7d19d18e8e8d3ae5a7062 100644 (file)
@@ -496,7 +496,7 @@ void mark_rodata_ro(void)
 
 static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
                                      pgprot_t prot, struct vm_struct *vma,
-                                     int flags)
+                                     int flags, unsigned long vm_flags)
 {
        phys_addr_t pa_start = __pa_symbol(va_start);
        unsigned long size = va_end - va_start;
@@ -507,10 +507,13 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
        __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
                             early_pgtable_alloc, flags);
 
+       if (!(vm_flags & VM_NO_GUARD))
+               size += PAGE_SIZE;
+
        vma->addr       = va_start;
        vma->phys_addr  = pa_start;
        vma->size       = size;
-       vma->flags      = VM_MAP;
+       vma->flags      = VM_MAP | vm_flags;
        vma->caller     = __builtin_return_address(0);
 
        vm_area_add_early(vma);
@@ -541,14 +544,15 @@ static void __init map_kernel(pgd_t *pgd)
         * Only rodata will be remapped with different permissions later on,
         * all other segments are allowed to use contiguous mappings.
         */
-       map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0);
+       map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
+                          VM_NO_GUARD);
        map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
-                          &vmlinux_rodata, NO_CONT_MAPPINGS);
+                          &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
        map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
-                          &vmlinux_inittext, 0);
+                          &vmlinux_inittext, 0, VM_NO_GUARD);
        map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
-                          &vmlinux_initdata, 0);
-       map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0);
+                          &vmlinux_initdata, 0, VM_NO_GUARD);
+       map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
 
        if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
                /*
index b388a99fea7b783e5ab8edcad004f8a7457ee40e..dad128ba98bf8d827a1f7cc750f41c543a8f92f8 100644 (file)
@@ -208,8 +208,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
        }
 
        node_set(nid, numa_nodes_parsed);
-       pr_info("Adding memblock [0x%llx - 0x%llx] on node %d\n",
-                       start, (end - 1), nid);
        return ret;
 }
 
@@ -223,10 +221,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
        void *nd;
        int tnid;
 
-       if (start_pfn < end_pfn)
-               pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
-                       start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
-       else
+       if (start_pfn >= end_pfn)
                pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
 
        nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
index dc459eb1246b16e9368dd0091d8b6bf50630eaba..1c5e0f33377936e16d42d9a5ea9c984157f1e8d4 100644 (file)
@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
                if (irq != other_irq) {
                        pr_warn("mismatched PPIs detected.\n");
                        err = -EINVAL;
+                       goto err_out;
                }
        } else {
-               err = request_irq(irq, handler,
-                                 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+               struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
+               unsigned long irq_flags;
+
+               err = irq_force_affinity(irq, cpumask_of(cpu));
+
+               if (err && num_possible_cpus() > 1) {
+                       pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+                               irq, cpu);
+                       goto err_out;
+               }
+
+               if (platdata && platdata->irq_flags) {
+                       irq_flags = platdata->irq_flags;
+               } else {
+                       irq_flags = IRQF_PERCPU |
+                                   IRQF_NOBALANCING |
+                                   IRQF_NO_THREAD;
+               }
+
+               err = request_irq(irq, handler, irq_flags, "arm-pmu",
                                  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
        }
 
-       if (err) {
-               pr_err("unable to request IRQ%d for ARM PMU counters\n",
-                       irq);
-               return err;
-       }
+       if (err)
+               goto err_out;
 
        cpumask_set_cpu(cpu, &armpmu->active_irqs);
-
        return 0;
+
+err_out:
+       pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
+       return err;
 }
 
 int armpmu_request_irqs(struct arm_pmu *armpmu)
@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
                        enable_percpu_irq(irq, IRQ_TYPE_NONE);
                        return 0;
                }
-
-               if (irq_force_affinity(irq, cpumask_of(cpu)) &&
-                   num_possible_cpus() > 1) {
-                       pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
-                               irq, cpu);
-               }
        }
 
        return 0;
index 69255f53057a38131e4588313e34b92bffdc2b3d..4eafa7a42e52102f2d02be5c8dac277861995dee 100644 (file)
@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
        }
 
        if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
-               pr_warn("no interrupt-affinity property for %s, guessing.\n",
-                       of_node_full_name(pdev->dev.of_node));
+               pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
+                       pdev->dev.of_node);
        }
 
        /*
@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
        }
 
        if (ret) {
-               pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+               pr_info("%pOF: failed to probe PMU!\n", node);
                goto out_free;
        }
 
@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
 out_free_irqs:
        armpmu_free_irqs(pmu);
 out_free:
-       pr_info("%s: failed to register PMU devices!\n",
-               of_node_full_name(node));
+       pr_info("%pOF: failed to register PMU devices!\n", node);
        armpmu_free(pmu);
        return ret;
 }
index c259848228b4c337cab8e50ef1d0f6673fcb9b93..b242cce104686944efb68bfb4e8347358ffa135a 100644 (file)
@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event)
        }
 
        if ((event != event->group_leader) &&
+           !is_software_event(event->group_leader) &&
            (L2_EVT_GROUP(event->group_leader->attr.config) ==
             L2_EVT_GROUP(event->attr.config))) {
                dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event)
        list_for_each_entry(sibling, &event->group_leader->sibling_list,
                            group_entry) {
                if ((sibling != event) &&
+                   !is_software_event(sibling) &&
                    (L2_EVT_GROUP(sibling->attr.config) ==
                     L2_EVT_GROUP(event->attr.config))) {
                        dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
index 1360dd6d5e617a0fee746a0d530129e66530a0fb..af0f44effd44abc067d7f31e498c433fd061725e 100644 (file)
  *     interrupt and passed the address of the low level handler,
  *     and can be used to implement any platform specific handling
  *     before or after calling it.
+ *
+ * @irq_flags: if non-zero, these flags will be passed to request_irq
+ *             when requesting interrupts for this PMU device.
  */
 struct arm_pmu_platdata {
        irqreturn_t (*handle_irq)(int irq, void *dev,
                                  irq_handler_t pmu_handler);
+       unsigned long irq_flags;
 };
 
 #ifdef CONFIG_ARM_PMU