]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Sep 2015 17:05:44 +0000 (10:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Sep 2015 17:05:44 +0000 (10:05 -0700)
Pull x86 irq fixlet from Ingo Molnar:
 "A single change that hides the 'HYP:' line in /proc/interrupts when
  it's unused"

* 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/irq: Hide 'HYP:' line in /proc/interrupts when not on Xen/Hyper-V

1  2 
arch/x86/kernel/irq.c

diff --combined arch/x86/kernel/irq.c
index 4616672a4049cb8b7b272ce46b537130b812f6d7,5b537479de8370f3e0bed4bbe5af3703056c0931..ae00b355114dd9922bf977684e8e473b0d55561d
@@@ -139,10 -139,13 +139,13 @@@ int arch_show_interrupts(struct seq_fil
        seq_puts(p, "  Machine check polls\n");
  #endif
  #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
-       seq_printf(p, "%*s: ", prec, "HYP");
-       for_each_online_cpu(j)
-               seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
-       seq_puts(p, "  Hypervisor callback interrupts\n");
+       if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) {
+               seq_printf(p, "%*s: ", prec, "HYP");
+               for_each_online_cpu(j)
+                       seq_printf(p, "%10u ",
+                                  irq_stats(j)->irq_hv_callback_count);
+               seq_puts(p, "  Hypervisor callback interrupts\n");
+       }
  #endif
        seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
  #if defined(CONFIG_X86_IO_APIC)
@@@ -216,23 -219,8 +219,23 @@@ __visible unsigned int __irq_entry do_I
        unsigned vector = ~regs->orig_ax;
        unsigned irq;
  
 +      /*
 +       * NB: Unlike exception entries, IRQ entries do not reliably
 +       * handle context tracking in the low-level entry code.  This is
 +       * because syscall entries execute briefly with IRQs on before
 +       * updating context tracking state, so we can take an IRQ from
 +       * kernel mode with CONTEXT_USER.  The low-level entry code only
 +       * updates the context if we came from user mode, so we won't
 +       * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
 +       * code is cleaned up enough that we can cleanly defer enabling
 +       * IRQs.
 +       */
 +
        entering_irq();
  
 +      /* entering_irq() tells RCU that we're not quiescent.  Check it. */
 +      RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
 +
        irq = __this_cpu_read(vector_irq[vector]);
  
        if (!handle_irq(irq, regs)) {
@@@ -362,22 -350,14 +365,22 @@@ int check_irq_vectors_for_cpu_disable(v
                        if (!desc)
                                continue;
  
 +                      /*
 +                       * Protect against concurrent action removal,
 +                       * affinity changes etc.
 +                       */
 +                      raw_spin_lock(&desc->lock);
                        data = irq_desc_get_irq_data(desc);
                        cpumask_copy(&affinity_new, data->affinity);
                        cpumask_clear_cpu(this_cpu, &affinity_new);
  
                        /* Do not count inactive or per-cpu irqs. */
 -                      if (!irq_has_action(irq) || irqd_is_per_cpu(data))
 +                      if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
 +                              raw_spin_unlock(&desc->lock);
                                continue;
 +                      }
  
 +                      raw_spin_unlock(&desc->lock);
                        /*
                         * A single irq may be mapped to multiple
                         * cpu's vector_irq[] (for example IOAPIC cluster
                 * vector. If the vector is marked in the used vectors
                 * bitmap or an irq is assigned to it, we don't count
                 * it as available.
 +               *
 +               * As this is an inaccurate snapshot anyway, we can do
 +               * this w/o holding vector_lock.
                 */
                for (vector = FIRST_EXTERNAL_VECTOR;
                     vector < first_system_vector; vector++) {
@@@ -512,11 -489,6 +515,11 @@@ void fixup_irqs(void
         */
        mdelay(1);
  
 +      /*
 +       * We can walk the vector array of this cpu without holding
 +       * vector_lock because the cpu is already marked !online, so
 +       * nothing else will touch it.
 +       */
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
                unsigned int irr;
  
                        irq = __this_cpu_read(vector_irq[vector]);
  
                        desc = irq_to_desc(irq);
 +                      raw_spin_lock(&desc->lock);
                        data = irq_desc_get_irq_data(desc);
                        chip = irq_data_get_irq_chip(data);
 -                      raw_spin_lock(&desc->lock);
                        if (chip->irq_retrigger) {
                                chip->irq_retrigger(data);
                                __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);