]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'v4.13-rc1' into fixes
authorMichael Ellerman <mpe@ellerman.id.au>
Mon, 31 Jul 2017 10:20:29 +0000 (20:20 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 31 Jul 2017 10:20:29 +0000 (20:20 +1000)
The fixes branch is based off a random pre-rc1 commit, because we had
some fixes that needed to go in before rc1 was released.

However we now need to fix some code that went in after that point, but
before rc1, so merge rc1 to get that code into fixes so we can fix it!

1  2 
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/smp.c

index 124091d306ff50c99df6328a60fa78d4947589d2,e6d8354d79ef25a34ece7766683813a65ffde57c..9029afd1fa2ab2ce231045659abf11ec9eaa32f6
@@@ -824,7 -824,7 +824,7 @@@ EXC_COMMON(trap_0b_common, 0xb00, unkno
   * r3 volatile parameter and return value for status
   * r4-r10 volatile input and output value
   * r11 volatile hypercall number and output value
 - * r12 volatile
 + * r12 volatile input and output value
   * r13-r31 nonvolatile
   * LR nonvolatile
   * CTR volatile
   * Other registers nonvolatile
   *
   * The intersection of volatile registers that don't contain possible
 - * inputs is: r12, cr0, xer, ctr. We may use these as scratch regs
 - * upon entry without saving.
 + * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
 + * without saving, though xer is not a good idea to use, as hardware may
 + * interpret some bits so it may be costly to change them.
   */
  #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
        /*
         * There is a little bit of juggling to get syscall and hcall
 -       * working well. Save r10 in ctr to be restored in case it is a
 -       * hcall.
 +       * working well. Save r13 in ctr to avoid using SPRG scratch
 +       * register.
         *
         * Userspace syscalls have already saved the PPR, hcalls must save
         * it before setting HMT_MEDIUM.
         */
  #define SYSCALL_KVMTEST                                                       \
 -      mr      r12,r13;                                                \
 +      mtctr   r13;                                                    \
        GET_PACA(r13);                                                  \
 -      mtctr   r10;                                                    \
 +      std     r10,PACA_EXGEN+EX_R10(r13);                             \
        KVMTEST_PR(0xc00); /* uses r10, branch to do_kvm_0xc00_system_call */ \
        HMT_MEDIUM;                                                     \
 -      mr      r9,r12;                                                 \
 +      mfctr   r9;
  
  #else
  #define SYSCALL_KVMTEST                                                       \
@@@ -936,8 -935,8 +936,8 @@@ EXC_VIRT_END(system_call, 0x4c00, 0x100
         * This is a hcall, so register convention is as above, with these
         * differences:
         * r13 = PACA
 -       * r12 = orig r13
 -       * ctr = orig r10
 +       * ctr = orig r13
 +       * orig r10 saved in PACA
         */
  TRAMP_KVM_BEGIN(do_kvm_0xc00)
         /*
          * HMT_MEDIUM. That allows the KVM code to save that value into the
          * guest state (it is the guest's PPR value).
          */
 -      OPT_GET_SPR(r0, SPRN_PPR, CPU_FTR_HAS_PPR)
 +      OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
        HMT_MEDIUM
 -      OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r0, CPU_FTR_HAS_PPR)
 +      OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
        mfctr   r10
 -      SET_SCRATCH0(r12)
 +      SET_SCRATCH0(r10)
        std     r9,PACA_EXGEN+EX_R9(r13)
        mfcr    r9
 -      std     r10,PACA_EXGEN+EX_R10(r13)
        KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
  #endif
  
@@@ -1314,6 -1314,31 +1314,31 @@@ EXC_REAL_NONE(0x1800, 0x100
  EXC_VIRT_NONE(0x5800, 0x100)
  #endif
  
+ #if defined(CONFIG_HARDLOCKUP_DETECTOR) && defined(CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH)
+ #define MASKED_DEC_HANDLER_LABEL 3f
+ #define MASKED_DEC_HANDLER(_H)                                \
+ 3: /* soft-nmi */                                     \
+       std     r12,PACA_EXGEN+EX_R12(r13);             \
+       GET_SCRATCH0(r10);                              \
+       std     r10,PACA_EXGEN+EX_R13(r13);             \
+       EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H)
+ EXC_COMMON_BEGIN(soft_nmi_common)
+       mr      r10,r1
+       ld      r1,PACAEMERGSP(r13)
+       ld      r1,PACA_NMI_EMERG_SP(r13)
+       subi    r1,r1,INT_FRAME_SIZE
+       EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900,
+                       system_reset, soft_nmi_interrupt,
+                       ADD_NVGPRS;ADD_RECONCILE)
+       b       ret_from_except
+ #else
+ #define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
+ #define MASKED_DEC_HANDLER(_H)
+ #endif
  
  /*
   * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@@ -1336,7 -1361,7 +1361,7 @@@ masked_##_H##interrupt:                                 
        lis     r10,0x7fff;                             \
        ori     r10,r10,0xffff;                         \
        mtspr   SPRN_DEC,r10;                           \
-       b       2f;                                     \
+       b       MASKED_DEC_HANDLER_LABEL;               \
  1:    cmpwi   r10,PACA_IRQ_DBELL;                     \
        beq     2f;                                     \
        cmpwi   r10,PACA_IRQ_HMI;                       \
        ld      r11,PACA_EXGEN+EX_R11(r13);             \
        GET_SCRATCH0(r13);                              \
        ##_H##rfid;                                     \
-       b       .
+       b       .;                                      \
+       MASKED_DEC_HANDLER(_H)
  
  /*
   * Real mode exceptions actually use this too, but alternate
index b0ea6d4d4853c90f63db6d0c2da8d8202bebacf4,997c88d54acf292b3e80beef1791ee2194ca9071..cf0e1245b8cc1c78948a4004be2d20c5b5ac0b78
@@@ -435,13 -435,31 +435,31 @@@ static void do_smp_send_nmi_ipi(int cpu
        }
  }
  
+ void smp_flush_nmi_ipi(u64 delay_us)
+ {
+       unsigned long flags;
+       nmi_ipi_lock_start(&flags);
+       while (nmi_ipi_busy_count) {
+               nmi_ipi_unlock_end(&flags);
+               udelay(1);
+               if (delay_us) {
+                       delay_us--;
+                       if (!delay_us)
+                               return;
+               }
+               nmi_ipi_lock_start(&flags);
+       }
+       nmi_ipi_unlock_end(&flags);
+ }
  /*
   * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
   * - fn is the target callback function.
   * - delay_us > 0 is the delay before giving up waiting for targets to
   *   enter the handler, == 0 specifies indefinite delay.
   */
static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
+ int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
  {
        unsigned long flags;
        int me = raw_smp_processor_id();
@@@ -985,13 -1003,21 +1003,13 @@@ static struct sched_domain_topology_lev
        { NULL, },
  };
  
 -static __init long smp_setup_cpu_workfn(void *data __always_unused)
 -{
 -      smp_ops->setup_cpu(boot_cpuid);
 -      return 0;
 -}
 -
  void __init smp_cpus_done(unsigned int max_cpus)
  {
        /*
 -       * We want the setup_cpu() here to be called on the boot CPU, but
 -       * init might run on any CPU, so make sure it's invoked on the boot
 -       * CPU.
 +       * We are running pinned to the boot CPU, see rest_init().
         */
        if (smp_ops && smp_ops->setup_cpu)
 -              work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
 +              smp_ops->setup_cpu(boot_cpuid);
  
        if (smp_ops && smp_ops->bringup_done)
                smp_ops->bringup_done();