]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Oct 2012 18:16:12 +0000 (03:16 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 5 Oct 2012 18:16:12 +0000 (03:16 +0900)
Pull powerpc updates from Benjamin Herrenschmidt:
 "Some highlights in addition to the usual batch of fixes:

   - 64TB address space support for 64-bit processes by Aneesh Kumar

   - Gavin Shan did a major cleanup & re-organization of our EEH support
     code (IBM fancy PCI error handling & recovery infrastructure) which
     paves the way for supporting different platform backends, along
     with some rework of the PCIe code for the PowerNV platform in order
     to remove home made resource allocations and instead use the
     generic code (which is possible after some small improvements to it
     done by Gavin).

   - Uprobes support by Ananth N Mavinakayanahalli

   - A pile of embedded updates from Freescale folks, including new SoC
     and board supports, more KVM stuff including preparing for 64-bit
     BookE KVM support, ePAPR 1.1 updates, etc..."

Fixup trivial conflicts in drivers/scsi/ipr.c

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits)
  powerpc/iommu: Fix multiple issues with IOMMU pools code
  powerpc: Fix VMX fix for memcpy case
  driver/mtd:IFC NAND:Initialise internal SRAM before any write
  powerpc/fsl-pci: use 'Header Type' to identify PCIE mode
  powerpc/eeh: Don't release eeh_mutex in eeh_phb_pe_get
  powerpc: Remove tlb batching hack for nighthawk
  powerpc: Set paca->data_offset = 0 for boot cpu
  powerpc/perf: Sample only if SIAR-Valid bit is set in P7+
  powerpc/fsl-pci: fix warning when CONFIG_SWIOTLB is disabled
  powerpc/mpc85xx: Update interrupt handling for IFC controller
  powerpc/85xx: Enable USB support in p1023rds_defconfig
  powerpc/smp: Do not disable IPI interrupts during suspend
  powerpc/eeh: Fix crash on converting OF node to edev
  powerpc/eeh: Lock module while handling EEH event
  powerpc/kprobe: Don't emulate store when kprobe stwu r1
  powerpc/kprobe: Complete kprobe and migrate exception frame
  powerpc/kprobe: Introduce a new thread flag
  powerpc: Remove unused __get_user64() and __put_user64()
  powerpc/eeh: Global mutex to protect PE tree
  powerpc/eeh: Remove EEH PE for normal PCI hotplug
  ...

1  2 
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/time.c
arch/powerpc/mm/fault.c
arch/powerpc/platforms/85xx/tqm85xx.c
drivers/scsi/ipr.c
drivers/tty/hvc/hvc_console.c

index de7c4c53f5cf4746659fb646f116c2fa25a964d2,e263e6a5aca17539e16e5b58b5a52201599c7943..6d03530b75065b96d5c54aacd5a9d991f0d46ec9
@@@ -51,6 -51,7 +51,7 @@@ CONFIG_KEXEC=
  CONFIG_IRQ_ALL_CPUS=y
  CONFIG_MEMORY_HOTREMOVE=y
  CONFIG_SCHED_SMT=y
+ CONFIG_PPC_DENORMALISATION=y
  CONFIG_PCCARD=y
  CONFIG_ELECTRA_CF=y
  CONFIG_HOTPLUG_PCI=m
@@@ -92,6 -93,7 +93,6 @@@ CONFIG_NETFILTER_XT_TARGET_DSCP=
  CONFIG_NETFILTER_XT_TARGET_MARK=m
  CONFIG_NETFILTER_XT_TARGET_NFLOG=m
  CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
 -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
  CONFIG_NETFILTER_XT_TARGET_TPROXY=m
  CONFIG_NETFILTER_XT_TARGET_TRACE=m
  CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
@@@ -486,8 -488,7 +487,8 @@@ CONFIG_CRYPTO_TWOFISH=
  CONFIG_CRYPTO_LZO=m
  # CONFIG_CRYPTO_ANSI_CPRNG is not set
  CONFIG_CRYPTO_HW=y
 -CONFIG_CRYPTO_DEV_NX=m
 +CONFIG_CRYPTO_DEV_NX=y
 +CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
  CONFIG_VIRTUALIZATION=y
  CONFIG_KVM_BOOK3S_64=m
  CONFIG_KVM_BOOK3S_64_HV=y
index 9f4a9368f51b11a36d9c0b2d053819a294d3e6f2,c169dfb3e42d64094b158f8dcf39a05220586f63..1f710a32ffae840e4fca8af13e13b4607410b5c6
@@@ -48,6 -48,7 +48,7 @@@ CONFIG_MEMORY_HOTREMOVE=
  CONFIG_PPC_64K_PAGES=y
  CONFIG_PPC_SUBPAGE_PROT=y
  CONFIG_SCHED_SMT=y
+ CONFIG_PPC_DENORMALISATION=y
  CONFIG_HOTPLUG_PCI=m
  CONFIG_HOTPLUG_PCI_RPA=m
  CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
@@@ -369,8 -370,7 +370,8 @@@ CONFIG_CRYPTO_TWOFISH=
  CONFIG_CRYPTO_LZO=m
  # CONFIG_CRYPTO_ANSI_CPRNG is not set
  CONFIG_CRYPTO_HW=y
 -CONFIG_CRYPTO_DEV_NX=m
 +CONFIG_CRYPTO_DEV_NX=y
 +CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
  CONFIG_VIRTUALIZATION=y
  CONFIG_KVM_BOOK3S_64=m
  CONFIG_KVM_BOOK3S_64_HV=y
index e9cb51f5f80185024b50d3e8036f3163f7fc3f17,50e504c29bb95294baa35700b74ce7ee44396e60..d5ad666efd8b9a5fde3b93541fb774ae8459881f
@@@ -258,6 -258,7 +258,7 @@@ void do_send_trap(struct pt_regs *regs
  {
        siginfo_t info;
  
+       current->thread.trap_nr = signal_code;
        if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
                        11, SIGSEGV) == NOTIFY_STOP)
                return;
@@@ -275,6 -276,7 +276,7 @@@ void do_dabr(struct pt_regs *regs, unsi
  {
        siginfo_t info;
  
+       current->thread.trap_nr = TRAP_HWBKPT;
        if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
                        11, SIGSEGV) == NOTIFY_STOP)
                return;
                return;
  
        /* Clear the DABR */
-       set_dabr(0);
+       set_dabr(0, 0);
  
        /* Deliver the signal to userspace */
        info.si_signo = SIGTRAP;
@@@ -364,18 -366,19 +366,19 @@@ static void set_debug_reg_defaults(stru
  {
        if (thread->dabr) {
                thread->dabr = 0;
-               set_dabr(0);
+               thread->dabrx = 0;
+               set_dabr(0, 0);
        }
  }
  #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  #endif        /* CONFIG_PPC_ADV_DEBUG_REGS */
  
- int set_dabr(unsigned long dabr)
+ int set_dabr(unsigned long dabr, unsigned long dabrx)
  {
        __get_cpu_var(current_dabr) = dabr;
  
        if (ppc_md.set_dabr)
-               return ppc_md.set_dabr(dabr);
+               return ppc_md.set_dabr(dabr, dabrx);
  
        /* XXX should we have a CPU_FTR_HAS_DABR ? */
  #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  #endif
  #elif defined(CONFIG_PPC_BOOK3S)
        mtspr(SPRN_DABR, dabr);
+       mtspr(SPRN_DABRX, dabrx);
  #endif
        return 0;
  }
  
@@@ -480,7 -482,7 +482,7 @@@ struct task_struct *__switch_to(struct 
   */
  #ifndef CONFIG_HAVE_HW_BREAKPOINT
        if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
-               set_dabr(new->thread.dabr);
+               set_dabr(new->thread.dabr, new->thread.dabrx);
  #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  #endif
  
  
        local_irq_save(flags);
  
 -      account_system_vtime(current);
 -      account_process_vtime(current);
 -
        /*
         * We can't take a PMU exception inside _switch() since there is a
         * window where the kernel stack SLB and the kernel stack are out
index 47834a3f49381c47f6efd22eb26e17e4e03b242f,ce68278a5d73108dfb67b9e2a2035da2e172a224..cb6c123722a214691d7c99519a08a5aa27e95577
@@@ -705,7 -705,6 +705,7 @@@ static void __init early_cmdline_parse(
  #endif
  #define OV5_TYPE1_AFFINITY    0x80    /* Type 1 NUMA affinity */
  #define OV5_PFO_HW_RNG                0x80    /* PFO Random Number Generator */
 +#define OV5_PFO_HW_842                0x40    /* PFO Compression Accelerator */
  #define OV5_PFO_HW_ENCR               0x20    /* PFO Encryption Accelerator */
  
  /* Option Vector 6: IBM PAPR hints */
@@@ -775,7 -774,8 +775,7 @@@ static unsigned char ibm_architecture_v
        0,
        0,
        0,
 -      OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR,
 -
 +      OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842,
        /* option vector 6: IBM PAPR hints */
        4 - 2,                          /* length */
        0,
@@@ -1623,63 -1623,6 +1623,63 @@@ static void __init prom_instantiate_rta
  }
  
  #ifdef CONFIG_PPC64
 +/*
 + * Allocate room for and instantiate Stored Measurement Log (SML)
 + */
 +static void __init prom_instantiate_sml(void)
 +{
 +      phandle ibmvtpm_node;
 +      ihandle ibmvtpm_inst;
 +      u32 entry = 0, size = 0;
 +      u64 base;
 +
 +      prom_debug("prom_instantiate_sml: start...\n");
 +
 +      ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
 +      prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
 +      if (!PHANDLE_VALID(ibmvtpm_node))
 +              return;
 +
 +      ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
 +      if (!IHANDLE_VALID(ibmvtpm_inst)) {
 +              prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
 +              return;
 +      }
 +
 +      if (call_prom_ret("call-method", 2, 2, &size,
 +                        ADDR("sml-get-handover-size"),
 +                        ibmvtpm_inst) != 0 || size == 0) {
 +              prom_printf("SML get handover size failed\n");
 +              return;
 +      }
 +
 +      base = alloc_down(size, PAGE_SIZE, 0);
 +      if (base == 0)
 +              prom_panic("Could not allocate memory for sml\n");
 +
 +      prom_printf("instantiating sml at 0x%x...", base);
 +
 +      if (call_prom_ret("call-method", 4, 2, &entry,
 +                        ADDR("sml-handover"),
 +                        ibmvtpm_inst, size, base) != 0 || entry == 0) {
 +              prom_printf("SML handover failed\n");
 +              return;
 +      }
 +      prom_printf(" done\n");
 +
 +      reserve_mem(base, size);
 +
 +      prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
 +                   &base, sizeof(base));
 +      prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
 +                   &size, sizeof(size));
 +
 +      prom_debug("sml base     = 0x%x\n", base);
 +      prom_debug("sml size     = 0x%x\n", (long)size);
 +
 +      prom_debug("prom_instantiate_sml: end...\n");
 +}
 +
  /*
   * Allocate room for and initialize TCE tables
   */
@@@ -1748,7 -1691,7 +1748,7 @@@ static void __init prom_initialize_tce_
                 * else will impact performance, so we always allocate 8MB.
                 * Anton
                 */
-               if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p))
+               if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
                        minsize = 8UL << 20;
                else
                        minsize = 4UL << 20;
@@@ -2973,11 -2916,6 +2973,11 @@@ unsigned long __init prom_init(unsigne
                prom_instantiate_opal();
  #endif
  
 +#ifdef CONFIG_PPC64
 +      /* instantiate sml */
 +      prom_instantiate_sml();
 +#endif
 +
        /*
         * On non-powermacs, put all CPUs in spin-loops.
         *
index eaa9d0e6abca6747e72acb6e1f69d5475a7c672b,bd693a11d86edafe0cc84684a0198678271ec8a0..c9986fd400d89947966d4e94746f8d8c9913b7a2
@@@ -291,12 -291,13 +291,12 @@@ static inline u64 calculate_stolen_time
   * Account time for a transition between system, hard irq
   * or soft irq state.
   */
 -void account_system_vtime(struct task_struct *tsk)
 +static u64 vtime_delta(struct task_struct *tsk,
 +                      u64 *sys_scaled, u64 *stolen)
  {
 -      u64 now, nowscaled, delta, deltascaled;
 -      unsigned long flags;
 -      u64 stolen, udelta, sys_scaled, user_scaled;
 +      u64 now, nowscaled, deltascaled;
 +      u64 udelta, delta, user_scaled;
  
 -      local_irq_save(flags);
        now = mftb();
        nowscaled = read_spurr(now);
        get_paca()->system_time += now - get_paca()->starttime;
        deltascaled = nowscaled - get_paca()->startspurr;
        get_paca()->startspurr = nowscaled;
  
 -      stolen = calculate_stolen_time(now);
 +      *stolen = calculate_stolen_time(now);
  
        delta = get_paca()->system_time;
        get_paca()->system_time = 0;
         * the user ticks get saved up in paca->user_time_scaled to be
         * used by account_process_tick.
         */
 -      sys_scaled = delta;
 +      *sys_scaled = delta;
        user_scaled = udelta;
        if (deltascaled != delta + udelta) {
                if (udelta) {
 -                      sys_scaled = deltascaled * delta / (delta + udelta);
 -                      user_scaled = deltascaled - sys_scaled;
 +                      *sys_scaled = deltascaled * delta / (delta + udelta);
 +                      user_scaled = deltascaled - *sys_scaled;
                } else {
 -                      sys_scaled = deltascaled;
 +                      *sys_scaled = deltascaled;
                }
        }
        get_paca()->user_time_scaled += user_scaled;
  
 -      if (in_interrupt() || idle_task(smp_processor_id()) != tsk) {
 -              account_system_time(tsk, 0, delta, sys_scaled);
 -              if (stolen)
 -                      account_steal_time(stolen);
 -      } else {
 -              account_idle_time(delta + stolen);
 -      }
 -      local_irq_restore(flags);
 +      return delta;
 +}
 +
 +void vtime_account_system(struct task_struct *tsk)
 +{
 +      u64 delta, sys_scaled, stolen;
 +
 +      delta = vtime_delta(tsk, &sys_scaled, &stolen);
 +      account_system_time(tsk, 0, delta, sys_scaled);
 +      if (stolen)
 +              account_steal_time(stolen);
 +}
 +
 +void vtime_account_idle(struct task_struct *tsk)
 +{
 +      u64 delta, sys_scaled, stolen;
 +
 +      delta = vtime_delta(tsk, &sys_scaled, &stolen);
 +      account_idle_time(delta + stolen);
  }
 -EXPORT_SYMBOL_GPL(account_system_vtime);
  
  /*
   * Transfer the user and system times accumulated in the paca
   * by the exception entry and exit code to the generic process
   * user and system time records.
   * Must be called with interrupts disabled.
 - * Assumes that account_system_vtime() has been called recently
 + * Assumes that vtime_account() has been called recently
   * (i.e. since the last entry from usermode) so that
   * get_paca()->user_time_scaled is up to date.
   */
@@@ -375,12 -366,6 +375,12 @@@ void account_process_tick(struct task_s
        account_user_time(tsk, utime, utimescaled);
  }
  
 +void vtime_task_switch(struct task_struct *prev)
 +{
 +      vtime_account(prev);
 +      account_process_tick(prev, 0);
 +}
 +
  #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
  #define calc_cputime_factors()
  #endif
@@@ -508,8 -493,6 +508,6 @@@ void timer_interrupt(struct pt_regs * r
         */
        may_hard_irq_enable();
  
-       trace_timer_interrupt_entry(regs);
        __get_cpu_var(irq_stat).timer_irqs++;
  
  #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
        old_regs = set_irq_regs(regs);
        irq_enter();
  
+       trace_timer_interrupt_entry(regs);
        if (test_irq_work_pending()) {
                clear_irq_work_pending();
                irq_work_run();
        }
  #endif
  
+       trace_timer_interrupt_exit(regs);
        irq_exit();
        set_irq_regs(old_regs);
-       trace_timer_interrupt_exit(regs);
  }
  
  /*
diff --combined arch/powerpc/mm/fault.c
index e5f028b5794e6a69498184f9b3b7b617b0701bbb,995f924e007f8f01fa4fe6eaba2b83006d66d81d..5495ebe983a23489403be6fc8388e7434a47f769
@@@ -133,6 -133,7 +133,7 @@@ static int do_sigbus(struct pt_regs *re
        up_read(&current->mm->mmap_sem);
  
        if (user_mode(regs)) {
+               current->thread.trap_nr = BUS_ADRERR;
                info.si_signo = SIGBUS;
                info.si_errno = 0;
                info.si_code = BUS_ADRERR;
@@@ -470,7 -471,7 +471,7 @@@ bad_area_nosemaphore
        if (is_exec && (error_code & DSISR_PROTFAULT))
                printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected"
                                   " page (%lx) - exploit attempt? (uid: %d)\n",
 -                                 address, current_uid());
 +                                 address, from_kuid(&init_user_ns, current_uid()));
  
        return SIGSEGV;
  
index b62fa87521a320f36e0eacceb937e721774cdf15,d8941eea70754ccee29357bc46ad09432e6b4cbf..b4e58cdc09a53cc3cd9e998a2a1efe0443457eb2
@@@ -59,10 -59,6 +59,6 @@@ static void __init tqm85xx_pic_init(voi
   */
  static void __init tqm85xx_setup_arch(void)
  {
- #ifdef CONFIG_PCI
-       struct device_node *np;
- #endif
        if (ppc_md.progress)
                ppc_md.progress("tqm85xx_setup_arch()", 0);
  
        cpm2_reset();
  #endif
  
- #ifdef CONFIG_PCI
-       for_each_node_by_type(np, "pci") {
-               if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
-                   of_device_is_compatible(np, "fsl,mpc8548-pcie")) {
-                       struct resource rsrc;
-                       if (!of_address_to_resource(np, 0, &rsrc)) {
-                               if ((rsrc.start & 0xfffff) == 0x8000)
-                                       fsl_add_bridge(np, 1);
-                               else
-                                       fsl_add_bridge(np, 0);
-                       }
-               }
-       }
- #endif
+       fsl_pci_assign_primary();
  }
  
  static void tqm85xx_show_cpuinfo(struct seq_file *m)
@@@ -123,9 -106,9 +106,9 @@@ static void __devinit tqm85xx_ti1520_fi
  DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520,
                tqm85xx_ti1520_fixup);
  
- machine_device_initcall(tqm85xx, mpc85xx_common_publish_devices);
+ machine_arch_initcall(tqm85xx, mpc85xx_common_publish_devices);
  
 -static const char *board[] __initdata = {
 +static const char * const board[] __initconst = {
        "tqc,tqm8540",
        "tqc,tqm8541",
        "tqc,tqm8548",
diff --combined drivers/scsi/ipr.c
index e3f29f61cbc3910f7dec04e8fdc702b3077e1454,6077c43edacc00d9b7d05faf1cbce7c4ef4296d8..fe6029f4df164406e406fb8ca6b2c698461fd48a
@@@ -192,7 -192,7 +192,7 @@@ static const struct ipr_chip_t ipr_chip
        { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
  };
  
 -static int ipr_max_bus_speeds [] = {
 +static int ipr_max_bus_speeds[] = {
        IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
  };
  
@@@ -562,26 -562,9 +562,26 @@@ static void ipr_trc_hook(struct ipr_cmn
        trace_entry->u.add_data = add_data;
  }
  #else
 -#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
 +#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
  #endif
  
 +/**
 + * ipr_lock_and_done - Acquire lock and complete command
 + * @ipr_cmd:  ipr command struct
 + *
 + * Return value:
 + *    none
 + **/
 +static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
 +{
 +      unsigned long lock_flags;
 +      struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 +
 +      spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 +      ipr_cmd->done(ipr_cmd);
 +      spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 +}
 +
  /**
   * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
   * @ipr_cmd:  ipr command struct
@@@ -628,49 -611,33 +628,49 @@@ static void ipr_reinit_ipr_cmnd(struct 
   * Return value:
   *    none
   **/
 -static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
 +static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
 +                            void (*fast_done) (struct ipr_cmnd *))
  {
        ipr_reinit_ipr_cmnd(ipr_cmd);
        ipr_cmd->u.scratch = 0;
        ipr_cmd->sibling = NULL;
 +      ipr_cmd->fast_done = fast_done;
        init_timer(&ipr_cmd->timer);
  }
  
  /**
 - * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
 + * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
   * @ioa_cfg:  ioa config struct
   *
   * Return value:
   *    pointer to ipr command struct
   **/
  static
 -struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 +struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
  {
        struct ipr_cmnd *ipr_cmd;
  
        ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
        list_del(&ipr_cmd->queue);
 -      ipr_init_ipr_cmnd(ipr_cmd);
  
        return ipr_cmd;
  }
  
 +/**
 + * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
 + * @ioa_cfg:  ioa config struct
 + *
 + * Return value:
 + *    pointer to ipr command struct
 + **/
 +static
 +struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
 +{
 +      struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
 +      ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
 +      return ipr_cmd;
 +}
 +
  /**
   * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
   * @ioa_cfg:  ioa config struct
@@@ -1035,7 -1002,7 +1035,7 @@@ static void ipr_send_hcam(struct ipr_io
   **/
  static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
  {
 -      switch(proto) {
 +      switch (proto) {
        case IPR_PROTO_SATA:
        case IPR_PROTO_SAS_STP:
                res->ata_class = ATA_DEV_ATA;
@@@ -3076,7 -3043,7 +3076,7 @@@ static void ipr_get_ioa_dump(struct ipr
  }
  
  #else
 -#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
 +#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
  #endif
  
  /**
   **/
  static void ipr_release_dump(struct kref *kref)
  {
 -      struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
 +      struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
        struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
        unsigned long lock_flags = 0;
        int i;
@@@ -3175,7 -3142,7 +3175,7 @@@ restart
                                break;
                        }
                }
 -      } while(did_work);
 +      } while (did_work);
  
        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
                if (res->add_to_ml) {
@@@ -3301,7 -3268,7 +3301,7 @@@ static ssize_t ipr_show_log_level(struc
   *    number of bytes printed to buffer
   **/
  static ssize_t ipr_store_log_level(struct device *dev,
 -                                 struct device_attribute *attr,
 +                                 struct device_attribute *attr,
                                   const char *buf, size_t count)
  {
        struct Scsi_Host *shost = class_to_shost(dev);
@@@ -3348,7 -3315,7 +3348,7 @@@ static ssize_t ipr_store_diagnostics(st
                return -EACCES;
  
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 -      while(ioa_cfg->in_reset_reload) {
 +      while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@@ -3715,7 -3682,7 +3715,7 @@@ static int ipr_update_ioa_ucode(struct 
        unsigned long lock_flags;
  
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 -      while(ioa_cfg->in_reset_reload) {
 +      while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@@ -3779,7 -3746,7 +3779,7 @@@ static ssize_t ipr_store_update_fw(stru
        len = snprintf(fname, 99, "%s", buf);
        fname[len-1] = '\0';
  
 -      if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
 +      if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
                dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
                return -EIO;
        }
@@@ -4645,7 -4612,7 +4645,7 @@@ static int ipr_slave_alloc(struct scsi_
   * Return value:
   *    SUCCESS / FAILED
   **/
 -static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
 +static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
  {
        struct ipr_ioa_cfg *ioa_cfg;
        int rc;
        return rc;
  }
  
 -static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
 +static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
  {
        int rc;
  
@@@ -4734,7 -4701,7 +4734,7 @@@ static int ipr_device_reset(struct ipr_
        }
  
        LEAVE;
 -      return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
 +      return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
  }
  
  /**
@@@ -4758,7 -4725,7 +4758,7 @@@ static int ipr_sata_reset(struct ata_li
  
        ENTER;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 -      while(ioa_cfg->in_reset_reload) {
 +      while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
   * Return value:
   *    SUCCESS / FAILED
   **/
 -static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
 +static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
  {
        struct ipr_cmnd *ipr_cmd;
        struct ipr_ioa_cfg *ioa_cfg;
        res->resetting_device = 0;
  
        LEAVE;
 -      return (rc ? FAILED : SUCCESS);
 +      return rc ? FAILED : SUCCESS;
  }
  
 -static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
 +static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
  {
        int rc;
  
@@@ -4943,7 -4910,7 +4943,7 @@@ static void ipr_abort_timeout(struct ip
   * Return value:
   *    SUCCESS / FAILED
   **/
 -static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
 +static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
  {
        struct ipr_cmnd *ipr_cmd;
        struct ipr_ioa_cfg *ioa_cfg;
                res->needs_sync_complete = 1;
  
        LEAVE;
 -      return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
 +      return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
  }
  
  /**
   * Return value:
   *    SUCCESS / FAILED
   **/
 -static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
 +static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
  {
        unsigned long flags;
        int rc;
@@@ -5149,9 -5116,8 +5149,9 @@@ static irqreturn_t ipr_isr(int irq, voi
        u16 cmd_index;
        int num_hrrq = 0;
        int irq_none = 0;
 -      struct ipr_cmnd *ipr_cmd;
 +      struct ipr_cmnd *ipr_cmd, *temp;
        irqreturn_t rc = IRQ_NONE;
 +      LIST_HEAD(doneq);
  
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
  
  
                        if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
                                ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
 -                              spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 -                              return IRQ_HANDLED;
 +                              rc = IRQ_HANDLED;
 +                              goto unlock_out;
                        }
  
                        ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
  
                        ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
  
 -                      list_del(&ipr_cmd->queue);
 -                      del_timer(&ipr_cmd->timer);
 -                      ipr_cmd->done(ipr_cmd);
 +                      list_move_tail(&ipr_cmd->queue, &doneq);
  
                        rc = IRQ_HANDLED;
  
                } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
                           int_reg & IPR_PCII_HRRQ_UPDATED) {
                        ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
 -                      spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 -                      return IRQ_HANDLED;
 +                      rc = IRQ_HANDLED;
 +                      goto unlock_out;
                } else
                        break;
        }
        if (unlikely(rc == IRQ_NONE))
                rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
  
 +unlock_out:
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 +      list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
 +              list_del(&ipr_cmd->queue);
 +              del_timer(&ipr_cmd->timer);
 +              ipr_cmd->fast_done(ipr_cmd);
 +      }
 +
        return rc;
  }
  
@@@ -5809,28 -5770,21 +5809,28 @@@ static void ipr_scsi_done(struct ipr_cm
        struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
        struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
        u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 +      unsigned long lock_flags;
  
        scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
  
        if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
 -              scsi_dma_unmap(ipr_cmd->scsi_cmd);
 +              scsi_dma_unmap(scsi_cmd);
 +
 +              spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
                list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
                scsi_cmd->scsi_done(scsi_cmd);
 -      } else
 +              spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 +      } else {
 +              spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
                ipr_erp_start(ioa_cfg, ipr_cmd);
 +              spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 +      }
  }
  
  /**
   * ipr_queuecommand - Queue a mid-layer request
 + * @shost:            scsi host struct
   * @scsi_cmd: scsi command struct
 - * @done:             done function
   *
   * This function queues a request generated by the mid-layer.
   *
   *    SCSI_MLQUEUE_DEVICE_BUSY if device is busy
   *    SCSI_MLQUEUE_HOST_BUSY if host is busy
   **/
 -static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
 -                          void (*done) (struct scsi_cmnd *))
 +static int ipr_queuecommand(struct Scsi_Host *shost,
 +                          struct scsi_cmnd *scsi_cmd)
  {
        struct ipr_ioa_cfg *ioa_cfg;
        struct ipr_resource_entry *res;
        struct ipr_ioarcb *ioarcb;
        struct ipr_cmnd *ipr_cmd;
 -      int rc = 0;
 +      unsigned long lock_flags;
 +      int rc;
  
 -      scsi_cmd->scsi_done = done;
 -      ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
 -      res = scsi_cmd->device->hostdata;
 +      ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
 +
 +      spin_lock_irqsave(shost->host_lock, lock_flags);
        scsi_cmd->result = (DID_OK << 16);
 +      res = scsi_cmd->device->hostdata;
  
        /*
         * We are currently blocking all devices due to a host reset
         * We have told the host to stop giving us new requests, but
         * ERP ops don't count. FIXME
         */
 -      if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
 +      if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
 +              spin_unlock_irqrestore(shost->host_lock, lock_flags);
                return SCSI_MLQUEUE_HOST_BUSY;
 +      }
  
        /*
         * FIXME - Create scsi_set_host_offline interface
         *  and the ioa_is_dead check can be removed
         */
        if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
 -              memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 -              scsi_cmd->result = (DID_NO_CONNECT << 16);
 -              scsi_cmd->scsi_done(scsi_cmd);
 -              return 0;
 +              spin_unlock_irqrestore(shost->host_lock, lock_flags);
 +              goto err_nodev;
 +      }
 +
 +      if (ipr_is_gata(res) && res->sata_port) {
 +              rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
 +              spin_unlock_irqrestore(shost->host_lock, lock_flags);
 +              return rc;
        }
  
 -      if (ipr_is_gata(res) && res->sata_port)
 -              return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
 +      ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
 +      spin_unlock_irqrestore(shost->host_lock, lock_flags);
  
 -      ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 +      ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
        ioarcb = &ipr_cmd->ioarcb;
 -      list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
  
        memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
        ipr_cmd->scsi_cmd = scsi_cmd;
 -      ioarcb->res_handle = res->res_handle;
 -      ipr_cmd->done = ipr_scsi_done;
 -      ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
 +      ipr_cmd->done = ipr_scsi_eh_done;
  
        if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
                if (scsi_cmd->underflow == 0)
                        ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
  
 -              if (res->needs_sync_complete) {
 -                      ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
 -                      res->needs_sync_complete = 0;
 -              }
 -
                ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
                if (ipr_is_gscsi(res))
                        ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
            (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
                ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
  
 -      if (likely(rc == 0)) {
 -              if (ioa_cfg->sis64)
 -                      rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
 -              else
 -                      rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
 -      }
 +      if (ioa_cfg->sis64)
 +              rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
 +      else
 +              rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
  
 -      if (unlikely(rc != 0)) {
 -              list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 +      spin_lock_irqsave(shost->host_lock, lock_flags);
 +      if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
 +              list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 +              spin_unlock_irqrestore(shost->host_lock, lock_flags);
 +              if (!rc)
 +                      scsi_dma_unmap(scsi_cmd);
                return SCSI_MLQUEUE_HOST_BUSY;
        }
  
 +      if (unlikely(ioa_cfg->ioa_is_dead)) {
 +              list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 +              spin_unlock_irqrestore(shost->host_lock, lock_flags);
 +              scsi_dma_unmap(scsi_cmd);
 +              goto err_nodev;
 +      }
 +
 +      ioarcb->res_handle = res->res_handle;
 +      if (res->needs_sync_complete) {
 +              ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
 +              res->needs_sync_complete = 0;
 +      }
 +      list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
 +      ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
        ipr_send_command(ipr_cmd);
 +      spin_unlock_irqrestore(shost->host_lock, lock_flags);
        return 0;
 -}
  
 -static DEF_SCSI_QCMD(ipr_queuecommand)
 +err_nodev:
 +      spin_lock_irqsave(shost->host_lock, lock_flags);
 +      memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
 +      scsi_cmd->result = (DID_NO_CONNECT << 16);
 +      scsi_cmd->scsi_done(scsi_cmd);
 +      spin_unlock_irqrestore(shost->host_lock, lock_flags);
 +      return 0;
 +}
  
  /**
   * ipr_ioctl - IOCTL handler
@@@ -5976,7 -5907,7 +5976,7 @@@ static int ipr_ioctl(struct scsi_devic
   * Return value:
   *    pointer to buffer with description string
   **/
 -static const char * ipr_ioa_info(struct Scsi_Host *host)
 +static const char *ipr_ioa_info(struct Scsi_Host *host)
  {
        static char buffer[512];
        struct ipr_ioa_cfg *ioa_cfg;
@@@ -6034,7 -5965,7 +6034,7 @@@ static void ipr_ata_phy_reset(struct at
  
        ENTER;
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
 -      while(ioa_cfg->in_reset_reload) {
 +      while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@@ -6074,7 -6005,7 +6074,7 @@@ static void ipr_ata_post_internal(struc
        unsigned long flags;
  
        spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
 -      while(ioa_cfg->in_reset_reload) {
 +      while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
                spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@@ -6373,14 -6304,14 +6373,14 @@@ static struct ata_port_info sata_port_i
  
  #ifdef CONFIG_PPC_PSERIES
  static const u16 ipr_blocked_processors[] = {
-       PV_NORTHSTAR,
-       PV_PULSAR,
-       PV_POWER4,
-       PV_ICESTAR,
-       PV_SSTAR,
-       PV_POWER4p,
-       PV_630,
-       PV_630p
+       PVR_NORTHSTAR,
+       PVR_PULSAR,
+       PVR_POWER4,
+       PVR_ICESTAR,
+       PVR_SSTAR,
+       PVR_POWER4p,
+       PVR_630,
+       PVR_630p
  };
  
  /**
@@@ -6399,8 -6330,8 +6399,8 @@@ static int ipr_invalid_adapter(struct i
        int i;
  
        if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
 -              for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
 +              for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
-                       if (__is_processor(ipr_blocked_processors[i]))
+                       if (pvr_version_is(ipr_blocked_processors[i]))
                                return 1;
                }
        }
@@@ -6677,7 -6608,7 +6677,7 @@@ static void ipr_scsi_bus_speed_limit(st
   *    none
   **/
  static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
 -                                              struct ipr_mode_pages *mode_pages)
 +                                        struct ipr_mode_pages *mode_pages)
  {
        int i, entry_length;
        struct ipr_dev_bus_entry *bus;
@@@ -8091,7 -8022,7 +8091,7 @@@ static void ipr_reset_ioa_job(struct ip
                ipr_reinit_ipr_cmnd(ipr_cmd);
                ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
                rc = ipr_cmd->job_step(ipr_cmd);
 -      } while(rc == IPR_RC_JOB_CONTINUE);
 +      } while (rc == IPR_RC_JOB_CONTINUE);
  }
  
  /**
@@@ -8352,7 -8283,7 +8352,7 @@@ static void ipr_free_cmd_blks(struct ip
        }
  
        if (ioa_cfg->ipr_cmd_pool)
 -              pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
 +              pci_pool_destroy(ioa_cfg->ipr_cmd_pool);
  
        kfree(ioa_cfg->ipr_cmnd_list);
        kfree(ioa_cfg->ipr_cmnd_list_dma);
@@@ -8432,8 -8363,8 +8432,8 @@@ static int __devinit ipr_alloc_cmd_blks
        dma_addr_t dma_addr;
        int i;
  
 -      ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
 -                                               sizeof(struct ipr_cmnd), 512, 0);
 +      ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
 +                                              sizeof(struct ipr_cmnd), 512, 0);
  
        if (!ioa_cfg->ipr_cmd_pool)
                return -ENOMEM;
        }
  
        for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
 -              ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
 +              ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
  
                if (!ipr_cmd) {
                        ipr_free_cmd_blks(ioa_cfg);
@@@ -8844,7 -8775,8 +8844,7 @@@ static int __devinit ipr_probe_ioa(stru
  
        ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
        memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
 -      ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
 -                    sata_port_info.flags, &ipr_sata_ops);
 +      ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
  
        ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
  
@@@ -9032,7 -8964,7 +9032,7 @@@ static void ipr_scan_vsets(struct ipr_i
        int target, lun;
  
        for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
 -              for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
 +              for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
                        scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
  }
  
@@@ -9078,7 -9010,7 +9078,7 @@@ static void __ipr_remove(struct pci_de
        ENTER;
  
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
 -      while(ioa_cfg->in_reset_reload) {
 +      while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
                spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
  
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
        wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
 -      flush_work_sync(&ioa_cfg->work_q);
 +      flush_work(&ioa_cfg->work_q);
        spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
  
        spin_lock(&ipr_driver_lock);
@@@ -9207,7 -9139,7 +9207,7 @@@ static void ipr_shutdown(struct pci_de
        unsigned long lock_flags = 0;
  
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 -      while(ioa_cfg->in_reset_reload) {
 +      while (ioa_cfg->in_reset_reload) {
                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
                spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@@ -9296,7 -9228,7 +9296,7 @@@ static struct pci_device_id ipr_pci_tab
  };
  MODULE_DEVICE_TABLE(pci, ipr_pci_table);
  
 -static struct pci_error_handlers ipr_err_handler = {
 +static const struct pci_error_handlers ipr_err_handler = {
        .error_detected = ipr_pci_error_detected,
        .slot_reset = ipr_pci_slot_reset,
  };
index 4a652999380f691a359fc614f605cb18ede57ae9,f1d4d96a4a0784b95160de07c665635d159ba237..a5dec1ca1b826bad18d9a1bf5c69eae17bfba423
@@@ -245,6 -245,20 +245,20 @@@ static void hvc_port_destruct(struct tt
        kfree(hp);
  }
  
+ static void hvc_check_console(int index)
+ {
+       /* Already enabled, bail out */
+       if (hvc_console.flags & CON_ENABLED)
+               return;
+       /* If this index is what the user requested, then register
+        * now (setup won't fail at this point).  It's ok to just
+        * call register again if previously .setup failed.
+        */
+       if (index == hvc_console.index)
+               register_console(&hvc_console);
+ }
  /*
   * hvc_instantiate() is an early console discovery method which locates
   * consoles * prior to the vio subsystem discovering them.  Hotplugged
@@@ -275,12 -289,8 +289,8 @@@ int hvc_instantiate(uint32_t vtermno, i
        if (last_hvc < index)
                last_hvc = index;
  
-       /* if this index is what the user requested, then register
-        * now (setup won't fail at this point).  It's ok to just
-        * call register again if previously .setup failed.
-        */
-       if (index == hvc_console.index)
-               register_console(&hvc_console);
+       /* check if we need to re-register the kernel console */
+       hvc_check_console(index);
  
        return 0;
  }
@@@ -299,33 -309,20 +309,33 @@@ static void hvc_unthrottle(struct tty_s
        hvc_kick();
  }
  
 +static int hvc_install(struct tty_driver *driver, struct tty_struct *tty)
 +{
 +      struct hvc_struct *hp;
 +      int rc;
 +
 +      /* Auto increments kref reference if found. */
 +      if (!(hp = hvc_get_by_index(tty->index)))
 +              return -ENODEV;
 +
 +      tty->driver_data = hp;
 +
 +      rc = tty_port_install(&hp->port, driver, tty);
 +      if (rc)
 +              tty_port_put(&hp->port);
 +      return rc;
 +}
 +
  /*
   * The TTY interface won't be used until after the vio layer has exposed the vty
   * adapter to the kernel.
   */
  static int hvc_open(struct tty_struct *tty, struct file * filp)
  {
 -      struct hvc_struct *hp;
 +      struct hvc_struct *hp = tty->driver_data;
        unsigned long flags;
        int rc = 0;
  
 -      /* Auto increments kref reference if found. */
 -      if (!(hp = hvc_get_by_index(tty->index)))
 -              return -ENODEV;
 -
        spin_lock_irqsave(&hp->port.lock, flags);
        /* Check and then increment for fast path open. */
        if (hp->port.count++ > 0) {
        } /* else count == 0 */
        spin_unlock_irqrestore(&hp->port.lock, flags);
  
 -      tty->driver_data = hp;
        tty_port_tty_set(&hp->port, tty);
  
        if (hp->ops->notifier_add)
@@@ -401,11 -399,6 +411,11 @@@ static void hvc_close(struct tty_struc
                                hp->vtermno, hp->port.count);
                spin_unlock_irqrestore(&hp->port.lock, flags);
        }
 +}
 +
 +static void hvc_cleanup(struct tty_struct *tty)
 +{
 +      struct hvc_struct *hp = tty->driver_data;
  
        tty_port_put(&hp->port);
  }
@@@ -558,7 -551,7 +568,7 @@@ static int hvc_write_room(struct tty_st
        struct hvc_struct *hp = tty->driver_data;
  
        if (!hp)
 -              return -1;
 +              return 0;
  
        return hp->outbuf_size - hp->n_outbuf;
  }
@@@ -809,10 -802,8 +819,10 @@@ static void hvc_poll_put_char(struct tt
  #endif
  
  static const struct tty_operations hvc_ops = {
 +      .install = hvc_install,
        .open = hvc_open,
        .close = hvc_close,
 +      .cleanup = hvc_cleanup,
        .write = hvc_write,
        .hangup = hvc_hangup,
        .unthrottle = hvc_unthrottle,
@@@ -877,10 -868,15 +887,15 @@@ struct hvc_struct *hvc_alloc(uint32_t v
                i = ++last_hvc;
  
        hp->index = i;
+       cons_ops[i] = ops;
+       vtermnos[i] = vtermno;
  
        list_add_tail(&(hp->next), &hvc_structs);
        spin_unlock(&hvc_structs_lock);
  
+       /* check if we need to re-register the kernel console */
+       hvc_check_console(i);
        return hp;
  }
  EXPORT_SYMBOL_GPL(hvc_alloc);
@@@ -893,8 -889,12 +908,12 @@@ int hvc_remove(struct hvc_struct *hp
        tty = tty_port_tty_get(&hp->port);
  
        spin_lock_irqsave(&hp->lock, flags);
-       if (hp->index < MAX_NR_HVC_CONSOLES)
+       if (hp->index < MAX_NR_HVC_CONSOLES) {
+               console_lock();
                vtermnos[hp->index] = -1;
+               cons_ops[hp->index] = NULL;
+               console_unlock();
+       }
  
        /* Don't whack hp->irq because tty_hangup() will need to free the irq. */