]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 May 2013 14:48:05 +0000 (07:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 May 2013 14:48:05 +0000 (07:48 -0700)
Pull MIPS updates from Ralf Baechle:

 - More work on DT support for various platforms

 - Various fixes that were to late to make it straight into 3.9

 - Improved platform support, in particular the Netlogic XLR and
   BCM63xx, and the SEAD3 and Malta eval boards.

 - Support for several Ralink SOC families.

 - Complete support for the microMIPS ASE which basically reencodes the
   existing MIPS32/MIPS64 ISA to use non-constant size instructions.

 - Some fallout from LTO work which remove old cruft and will generally
   make the MIPS kernel easier to maintain and resistant to compiler
   optimization, even in absence of LTO.

 - KVM support.  While MIPS has announced hardware virtualization
   extensions this KVM extension uses trap and emulate mode for
   virtualization of MIPS32.  More KVM work to add support for VZ
   hardware virtualizaiton extensions and MIPS64 will probably already
   be merged for 3.11.

Most of this has been sitting in -next for a long time.  All defconfigs
have been build or run time tested except three for which fixes are being
sent by other maintainers.

Semantic conflict with kvm updates done as per Ralf

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (118 commits)
  MIPS: Add new GIC clockevent driver.
  MIPS: Formatting clean-ups for clocksources.
  MIPS: Refactor GIC clocksource code.
  MIPS: Move 'gic_frequency' to common location.
  MIPS: Move 'gic_present' to common location.
  MIPS: MIPS16e: Add unaligned access support.
  MIPS: MIPS16e: Support handling of delay slots.
  MIPS: MIPS16e: Add instruction formats.
  MIPS: microMIPS: Optimise 'strnlen' core library function.
  MIPS: microMIPS: Optimise 'strlen' core library function.
  MIPS: microMIPS: Optimise 'strncpy' core library function.
  MIPS: microMIPS: Optimise 'memset' core library function.
  MIPS: microMIPS: Add configuration option for microMIPS kernel.
  MIPS: microMIPS: Disable LL/SC and fix linker bug.
  MIPS: microMIPS: Add vdso support.
  MIPS: microMIPS: Add unaligned access support.
  MIPS: microMIPS: Support handling of delay slots.
  MIPS: microMIPS: Add support for exception handling.
  MIPS: microMIPS: Floating point support.
  MIPS: microMIPS: Fix macro naming in micro-assembler.
  ...

12 files changed:
1  2 
Documentation/devicetree/bindings/vendor-prefixes.txt
arch/mips/Kconfig
arch/mips/bcm63xx/dev-spi.c
arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h
arch/mips/kernel/Makefile
arch/mips/kernel/process.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kvm/kvm_mips.c
arch/mips/ralink/dts/rt3052_eval.dts
arch/mips/sgi-ip27/ip27-memory.c
virt/kvm/kvm_main.c

index 4d1919bf23322ac2209eb3046bfce404c5ced483,65274126dfadc6ab0d6cb7839b97225123db30ac..6931c4348d240ed9f8bf6b21a0d75f9c520edf1d
@@@ -5,7 -5,6 +5,7 @@@ using them to avoid name-space collisio
  
  ad    Avionic Design GmbH
  adi   Analog Devices, Inc.
 +aeroflexgaisler       Aeroflex Gaisler AB
  ak    Asahi Kasei Corp.
  amcc  Applied Micro Circuits Corporation (APM, formally AMCC)
  apm   Applied Micro Circuits Corporation (APM)
@@@ -42,6 -41,7 +42,7 @@@ onnn  ON Semiconductor Corp
  picochip      Picochip Ltd
  powervr       PowerVR (deprecated, use img)
  qcom  Qualcomm, Inc.
+ ralink        Mediatek/Ralink Technology Corp.
  ramtron       Ramtron International
  realtek Realtek Semiconductor Corp.
  renesas       Renesas Electronics Corporation
@@@ -49,7 -49,6 +50,7 @@@ samsung       Samsung Semiconducto
  sbs   Smart Battery System
  schindler     Schindler
  sil   Silicon Image
 +silabs        Silicon Laboratories
  simtek
  sirf  SiRF Technology, Inc.
  snps  Synopsys, Inc.
diff --combined arch/mips/Kconfig
index a90cfc702bb1a31cade6c98d0bd4eb0ead652f8d,033ffd0abe2c1b13664f7fb71f780dcce47c745d..7a58ab933b206a397c56b65f6a608fc4f9a1d8eb
@@@ -61,7 -61,8 +61,7 @@@ config MIPS_ALCHEM
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_APM_EMULATION
 -      select GENERIC_GPIO
 -      select ARCH_WANT_OPTIONAL_GPIOLIB
 +      select ARCH_REQUIRE_GPIOLIB
        select SYS_SUPPORTS_ZBOOT
        select USB_ARCH_HAS_OHCI
        select USB_ARCH_HAS_EHCI
@@@ -224,6 -225,7 +224,6 @@@ config MACH_JZ474
        select SYS_SUPPORTS_ZBOOT_UART16550
        select DMA_NONCOHERENT
        select IRQ_CPU
 -      select GENERIC_GPIO
        select ARCH_REQUIRE_GPIOLIB
        select SYS_HAS_EARLY_PRINTK
        select HAVE_PWM
@@@ -304,7 -306,6 +304,6 @@@ config MIPS_MALT
        select HW_HAS_PCI
        select I8253
        select I8259
-       select MIPS_BOARDS_GEN
        select MIPS_BONITO64
        select MIPS_CPU_SCACHE
        select PCI_GT64XXX_PCI0
@@@ -335,12 -336,12 +334,12 @@@ config MIPS_SEAD
        select BOOT_RAW
        select CEVT_R4K
        select CSRC_R4K
+       select CSRC_GIC
        select CPU_MIPSR2_IRQ_VI
        select CPU_MIPSR2_IRQ_EI
        select DMA_NONCOHERENT
        select IRQ_CPU
        select IRQ_GIC
-       select MIPS_BOARDS_GEN
        select MIPS_CPU_SCACHE
        select MIPS_MSC
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_SMARTMIPS
+       select SYS_SUPPORTS_MICROMIPS
        select USB_ARCH_HAS_EHCI
        select USB_EHCI_BIG_ENDIAN_DESC
        select USB_EHCI_BIG_ENDIAN_MMIO
@@@ -402,8 -404,6 +402,8 @@@ config PMC_MS
        select IRQ_CPU
        select SERIAL_8250
        select SERIAL_8250_CONSOLE
 +      select USB_EHCI_BIG_ENDIAN_MMIO
 +      select USB_EHCI_BIG_ENDIAN_DESC
        help
          This adds support for the PMC-Sierra family of Multi-Service
          Processor System-On-A-Chips.  These parts include a number
@@@ -910,6 -910,9 +910,9 @@@ config CEVT_GT641X
  config CEVT_R4K
        bool
  
+ config CEVT_GIC
+       bool
  config CEVT_SB1250
        bool
  
@@@ -935,6 -938,7 +938,6 @@@ config CSRC_SB125
        bool
  
  config GPIO_TXX9
 -      select GENERIC_GPIO
        select ARCH_REQUIRE_GPIOLIB
        bool
  
@@@ -982,9 -986,6 +985,6 @@@ config MIPS_MS
  config MIPS_NILE4
        bool
  
- config MIPS_DISABLE_OBSOLETE_IDE
-       bool
  config SYNC_R4K
        bool
  
@@@ -1006,6 -1007,9 +1006,6 @@@ config GENERIC_ISA_DMA_SUPPORT_BROKE
  config ISA_DMA_API
        bool
  
 -config GENERIC_GPIO
 -      bool
 -
  config HOLES_IN_ZONE
        bool
  
@@@ -1075,9 -1079,6 +1075,6 @@@ config IRQ_GT641X
  config IRQ_GIC
        bool
  
- config MIPS_BOARDS_GEN
-       bool
  config PCI_GT64XXX_PCI0
        bool
  
@@@ -1106,6 -1107,7 +1103,6 @@@ config SOC_PNX833
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_BIG_ENDIAN
 -      select GENERIC_GPIO
        select CPU_MIPSR2_IRQ_VI
  
  config SOC_PNX8335
@@@ -1147,7 -1149,7 +1144,7 @@@ config BOOT_ELF3
  
  config MIPS_L1_CACHE_SHIFT
        int
-       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL
+       default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X
        default "6" if MIPS_CPU_SCACHE
        default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
        default "5"
@@@ -1196,6 -1198,7 +1193,6 @@@ config CPU_LOONGSON2
        bool "Loongson 2F"
        depends on SYS_HAS_CPU_LOONGSON2F
        select CPU_LOONGSON2
 -      select GENERIC_GPIO
        select ARCH_REQUIRE_GPIOLIB
        help
          The Loongson 2F processor implements the MIPS III instruction set
@@@ -1236,6 -1239,7 +1233,7 @@@ config CPU_MIPS32_R
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_32BIT_KERNEL
        select CPU_SUPPORTS_HIGHMEM
+       select HAVE_KVM
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture.  Most modern embedded systems with a 32-bit
@@@ -1427,7 -1431,6 +1425,7 @@@ config CPU_CAVIUM_OCTEO
        select CPU_SUPPORTS_HUGEPAGES
        select LIBFDT
        select USE_OF
 +      select USB_EHCI_BIG_ENDIAN_MMIO
        help
          The Cavium Octeon processor is a highly integrated chip containing
          many ethernet hardware widgets for networking tasks. The processor
@@@ -1731,11 -1734,26 +1729,25 @@@ config 32BI
  config 64BIT
        bool "64-bit kernel"
        depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
 -      select HAVE_SYSCALL_WRAPPERS
        help
          Select this option if you want to build a 64-bit kernel.
  
  endchoice
  
+ config KVM_GUEST
+       bool "KVM Guest Kernel"
+       help
+         Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+ config KVM_HOST_FREQ
+       int "KVM Host Processor Frequency (MHz)"
+       depends on KVM_GUEST
+       default 500
+       help
+         Select this option if building a guest kernel for KVM to skip
+         RTC emulation when determining guest CPU Frequency.  Instead, the guest
+         processor frequency is automatically derived from the host frequency.
  choice
        prompt "Kernel page size"
        default PAGE_SIZE_4KB
@@@ -1811,6 -1829,15 +1823,15 @@@ config FORCE_MAX_ZONEORDE
          The page size is not necessarily 4KB.  Keep this in mind
          when choosing a value for this option.
  
+ config CEVT_GIC
+       bool "Use GIC global counter for clock events"
+       depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC)
+       help
+         Use the GIC global counter for the clock events. The R4K clock
+         event driver is always present, so if the platform ends up not
+         detecting a GIC, it will fall back to the R4K timer for the
+         generation of clock events.
  config BOARD_SCACHE
        bool
  
@@@ -2016,6 -2043,7 +2037,7 @@@ config SB1_PASS_2_1_WORKAROUND
        depends on CPU_SB1 && CPU_SB1_PASS_2
        default y
  
  config 64BIT_PHYS_ADDR
        bool
  
@@@ -2034,6 -2062,13 +2056,13 @@@ config CPU_HAS_SMARTMIP
          you don't know you probably don't have SmartMIPS and should say N
          here.
  
+ config CPU_MICROMIPS
+       depends on SYS_SUPPORTS_MICROMIPS
+       bool "Build kernel using microMIPS ISA"
+       help
+         When this option is enabled the kernel will be built using the
+         microMIPS ISA
  config CPU_HAS_WB
        bool
  
@@@ -2096,6 -2131,9 +2125,9 @@@ config SYS_SUPPORTS_HIGHME
  config SYS_SUPPORTS_SMARTMIPS
        bool
  
+ config SYS_SUPPORTS_MICROMIPS
+       bool
  config ARCH_FLATMEM_ENABLE
        def_bool y
        depends on !NUMA && !CPU_LOONGSON2
@@@ -2532,14 -2570,7 +2564,14 @@@ source "kernel/power/Kconfig
  
  endmenu
  
 -source "arch/mips/kernel/cpufreq/Kconfig"
 +config MIPS_EXTERNAL_TIMER
 +      bool
 +
 +if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
 +menu "CPU Power Management"
 +source "drivers/cpufreq/Kconfig"
 +endmenu
 +endif
  
  source "net/Kconfig"
  
@@@ -2556,3 -2587,5 +2588,5 @@@ source "security/Kconfig
  source "crypto/Kconfig"
  
  source "lib/Kconfig"
+ source "arch/mips/kvm/Kconfig"
index e97fd60e92ef289e215908b7327889435f3f702f,854e9367e235d3f646f7cb4fa3f0bfad98f1e9f2..3065bb61820d5befea57f31580c91c15afcc666b
  /*
   * register offsets
   */
- static const unsigned long bcm6338_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6338)
- };
  static const unsigned long bcm6348_regs_spi[] = {
        __GEN_SPI_REGS_TABLE(6348)
  };
@@@ -34,23 -30,15 +30,15 @@@ static const unsigned long bcm6358_regs
        __GEN_SPI_REGS_TABLE(6358)
  };
  
- static const unsigned long bcm6368_regs_spi[] = {
-       __GEN_SPI_REGS_TABLE(6368)
- };
  const unsigned long *bcm63xx_regs_spi;
  EXPORT_SYMBOL(bcm63xx_regs_spi);
  
  static __init void bcm63xx_spi_regs_init(void)
  {
-       if (BCMCPU_IS_6338())
-               bcm63xx_regs_spi = bcm6338_regs_spi;
-       if (BCMCPU_IS_6348())
+       if (BCMCPU_IS_6338() || BCMCPU_IS_6348())
                bcm63xx_regs_spi = bcm6348_regs_spi;
-       if (BCMCPU_IS_6358())
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
                bcm63xx_regs_spi = bcm6358_regs_spi;
-       if (BCMCPU_IS_6368())
-               bcm63xx_regs_spi = bcm6368_regs_spi;
  }
  #else
  static __init void bcm63xx_spi_regs_init(void) { }
@@@ -85,21 -73,32 +73,21 @@@ static struct platform_device bcm63xx_s
  
  int __init bcm63xx_spi_register(void)
  {
 -      struct clk *periph_clk;
 -
        if (BCMCPU_IS_6328() || BCMCPU_IS_6345())
                return -ENODEV;
  
 -      periph_clk = clk_get(NULL, "periph");
 -      if (IS_ERR(periph_clk)) {
 -              pr_err("unable to get periph clock\n");
 -              return -ENODEV;
 -      }
 -
 -      /* Set bus frequency */
 -      spi_pdata.speed_hz = clk_get_rate(periph_clk);
 -
        spi_resources[0].start = bcm63xx_regset_address(RSET_SPI);
        spi_resources[0].end = spi_resources[0].start;
        spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI);
  
        if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
-               spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1;
-               spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE;
-               spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT;
-               spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH;
+               spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1;
+               spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE;
+               spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT;
+               spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH;
        }
  
-       if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
+       if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) {
                spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1;
                spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE;
                spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT;
index b0184cf025755a49d5fc7ca6614e82c9866b4fd8,6515da99829333c8f628933114c588c70d6014e6..c426cabc620a1df26338693f77e0485e07402cb1
@@@ -13,6 -13,7 +13,6 @@@ struct bcm63xx_spi_pdata 
        unsigned int    msg_ctl_width;
        int             bus_num;
        int             num_chipselect;
 -      u32             speed_hz;
  };
  
  enum bcm63xx_regs_spi {
@@@ -71,18 -72,13 +71,13 @@@ static inline unsigned long bcm63xx_spi
  
        return bcm63xx_regs_spi[reg];
  #else
- #ifdef CONFIG_BCM63XX_CPU_6338
-       __GEN_SPI_RSET(6338)
- #endif
- #ifdef CONFIG_BCM63XX_CPU_6348
+ #if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348)
        __GEN_SPI_RSET(6348)
  #endif
- #ifdef CONFIG_BCM63XX_CPU_6358
+ #if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \
+       defined(CONFIG_BCM63XX_CPU_6368)
        __GEN_SPI_RSET(6358)
  #endif
- #ifdef CONFIG_BCM63XX_CPU_6368
-       __GEN_SPI_RSET(6368)
- #endif
  #endif
        return 0;
  }
index 520a908d45d62af6d3edb1ef83b2f0f990faa562,cb96ace5c8c5ce4d5cd3873d404c145f08dcaf5b..6ad9e04bdf6210a8b722e92aca5e49161cb4deca
@@@ -5,7 -5,7 +5,7 @@@
  extra-y               := head.o vmlinux.lds
  
  obj-y         += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
-                  ptrace.o reset.o setup.o signal.o syscall.o \
+                  prom.o ptrace.o reset.o setup.o signal.o syscall.o \
                   time.o topology.o traps.o unaligned.o watch.o vdso.o
  
  ifdef CONFIG_FUNCTION_TRACER
@@@ -19,15 -19,16 +19,16 @@@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1
  obj-$(CONFIG_CEVT_R4K)                += cevt-r4k.o
  obj-$(CONFIG_MIPS_MT_SMTC)    += cevt-smtc.o
  obj-$(CONFIG_CEVT_DS1287)     += cevt-ds1287.o
+ obj-$(CONFIG_CEVT_GIC)                += cevt-gic.o
  obj-$(CONFIG_CEVT_GT641XX)    += cevt-gt641xx.o
  obj-$(CONFIG_CEVT_SB1250)     += cevt-sb1250.o
  obj-$(CONFIG_CEVT_TXX9)               += cevt-txx9.o
  obj-$(CONFIG_CSRC_BCM1480)    += csrc-bcm1480.o
+ obj-$(CONFIG_CSRC_GIC)                += csrc-gic.o
  obj-$(CONFIG_CSRC_IOASIC)     += csrc-ioasic.o
  obj-$(CONFIG_CSRC_POWERTV)    += csrc-powertv.o
  obj-$(CONFIG_CSRC_R4K)                += csrc-r4k.o
  obj-$(CONFIG_CSRC_SB1250)     += csrc-sb1250.o
- obj-$(CONFIG_CSRC_GIC)                += csrc-gic.o
  obj-$(CONFIG_SYNC_R4K)                += sync-r4k.o
  
  obj-$(CONFIG_STACKTRACE)      += stacktrace.o
@@@ -86,12 -87,12 +87,10 @@@ obj-$(CONFIG_EARLY_PRINTK) += early_pri
  obj-$(CONFIG_SPINLOCK_TEST)   += spinlock_test.o
  obj-$(CONFIG_MIPS_MACHINE)    += mips_machine.o
  
- obj-$(CONFIG_OF)              += prom.o
  CFLAGS_cpu-bugs64.o   = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
  
  obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
  
 -obj-$(CONFIG_MIPS_CPUFREQ)    += cpufreq/
 -
  obj-$(CONFIG_PERF_EVENTS)     += perf_event.o
  obj-$(CONFIG_HW_PERF_EVENTS)  += perf_event_mipsxx.o
  
index cfc742d75b7f3a74f1ace85269d55e0e0b85a48d,ef533760d2c891fd897e9cb93d7d607382cc5190..eb902c1f0cad4c50031836ad73225f620be8d7d5
@@@ -7,6 -7,7 +7,7 @@@
   * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
   * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   * Copyright (C) 2004 Thiemo Seufer
+  * Copyright (C) 2013  Imagination Technologies Ltd.
   */
  #include <linux/errno.h>
  #include <linux/sched.h>
  #include <asm/inst.h>
  #include <asm/stacktrace.h>
  
 -/*
 - * The idle thread. There's no useful work to be done, so just try to conserve
 - * power and have a low exit latency (ie sit in a loop waiting for somebody to
 - * say that they'd like to reschedule)
 - */
 -void __noreturn cpu_idle(void)
 +#ifdef CONFIG_HOTPLUG_CPU
 +void arch_cpu_idle_dead(void)
  {
 -      int cpu;
 -
 -      /* CPU is going idle. */
 -      cpu = smp_processor_id();
 +      /* What the heck is this check doing ? */
 +      if (!cpu_isset(smp_processor_id(), cpu_callin_map))
 +              play_dead();
 +}
 +#endif
  
 -      /* endless idle loop with no priority at all */
 -      while (1) {
 -              tick_nohz_idle_enter();
 -              rcu_idle_enter();
 -              while (!need_resched() && cpu_online(cpu)) {
 +void arch_cpu_idle(void)
 +{
  #ifdef CONFIG_MIPS_MT_SMTC
 -                      extern void smtc_idle_loop_hook(void);
 +      extern void smtc_idle_loop_hook(void);
  
 -                      smtc_idle_loop_hook();
 -#endif
 -
 -                      if (cpu_wait) {
 -                              /* Don't trace irqs off for idle */
 -                              stop_critical_timings();
 -                              (*cpu_wait)();
 -                              start_critical_timings();
 -                      }
 -              }
 -#ifdef CONFIG_HOTPLUG_CPU
 -              if (!cpu_online(cpu) && !cpu_isset(cpu, cpu_callin_map))
 -                      play_dead();
 +      smtc_idle_loop_hook();
  #endif
 -              rcu_idle_exit();
 -              tick_nohz_idle_exit();
 -              schedule_preempt_disabled();
 -      }
 +      if (cpu_wait)
 +              (*cpu_wait)();
 +      else
 +              local_irq_enable();
  }
  
  asmlinkage void ret_from_fork(void);
@@@ -225,34 -244,115 +226,115 @@@ struct mips_frame_info 
  
  static inline int is_ra_save_ins(union mips_instruction *ip)
  {
+ #ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction mmi;
+       /*
+        * swsp ra,offset
+        * swm16 reglist,offset(sp)
+        * swm32 reglist,offset(sp)
+        * sw32 ra,offset(sp)
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is way more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r5_format.opcode == mm_swsp16_op &&
+                        mmi.mm16_r5_format.rt == 31) ||
+                       (mmi.mm16_m_format.opcode == mm_pool16c_op &&
+                        mmi.mm16_m_format.func == mm_swm16_op));
+       }
+       else {
+               mmi.halfword[0] = ip->halfword[1];
+               mmi.halfword[1] = ip->halfword[0];
+               return ((mmi.mm_m_format.opcode == mm_pool32b_op &&
+                        mmi.mm_m_format.rd > 9 &&
+                        mmi.mm_m_format.base == 29 &&
+                        mmi.mm_m_format.func == mm_swm32_func) ||
+                       (mmi.i_format.opcode == mm_sw32_op &&
+                        mmi.i_format.rs == 29 &&
+                        mmi.i_format.rt == 31));
+       }
+ #else
        /* sw / sd $ra, offset($sp) */
        return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
                ip->i_format.rs == 29 &&
                ip->i_format.rt == 31;
+ #endif
  }
  
  static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
  {
+ #ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * jr16,jrc,jalr16,jalr16
+        * jal
+        * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+        * jraddiusp - NOT SUPPORTED
+        *
+        * microMIPS is kind of more fun...
+        */
+       union mips_instruction mmi;
+       mmi.word = (ip->halfword[0] << 16);
+       if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
+           (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
+           ip->j_format.opcode == mm_jal32_op)
+               return 1;
+       if (ip->r_format.opcode != mm_pool32a_op ||
+                       ip->r_format.func != mm_pool32axf_op)
+               return 0;
+       return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
+ #else
        if (ip->j_format.opcode == jal_op)
                return 1;
        if (ip->r_format.opcode != spec_op)
                return 0;
        return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+ #endif
  }
  
  static inline int is_sp_move_ins(union mips_instruction *ip)
  {
+ #ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * addiusp -imm
+        * addius5 sp,-imm
+        * addiu32 sp,sp,-imm
+        * jradiussp - NOT SUPPORTED
+        *
+        * microMIPS is not more fun...
+        */
+       if (mm_insn_16bit(ip->halfword[0])) {
+               union mips_instruction mmi;
+               mmi.word = (ip->halfword[0] << 16);
+               return ((mmi.mm16_r3_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
+                       (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
+                        mmi.mm16_r5_format.rt == 29));
+       }
+       return (ip->mm_i_format.opcode == mm_addiu32_op &&
+                ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29);
+ #else
        /* addiu/daddiu sp,sp,-imm */
        if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
                return 0;
        if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
                return 1;
+ #endif
        return 0;
  }
  
  static int get_frame_info(struct mips_frame_info *info)
  {
+ #ifdef CONFIG_CPU_MICROMIPS
+       union mips_instruction *ip = (void *) (((char *) info->func) - 1);
+ #else
        union mips_instruction *ip = info->func;
+ #endif
        unsigned max_insns = info->func_size / sizeof(union mips_instruction);
        unsigned i;
  
                        break;
                if (!info->frame_size) {
                        if (is_sp_move_ins(ip))
+                       {
+ #ifdef CONFIG_CPU_MICROMIPS
+                               if (mm_insn_16bit(ip->halfword[0]))
+                               {
+                                       unsigned short tmp;
+                                       if (ip->halfword[0] & mm_addiusp_func)
+                                       {
+                                               tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
+                                               info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
+                                       } else {
+                                               tmp = (ip->halfword[0] >> 1);
+                                               info->frame_size = -(signed short)(tmp & 0xf);
+                                       }
+                                       ip = (void *) &ip->halfword[1];
+                                       ip--;
+                               } else
+ #endif
                                info->frame_size = - ip->i_format.simmediate;
+                       }
                        continue;
                }
                if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
diff --combined arch/mips/kernel/smp.c
index aee04af213c5895d5b273b4f17fe896621707705,596620dd7ee2dd8bcd2b0c28d21ab2030575dba2..c17619fe18e32a9f23a5df7e18b0b943b5d0cf2c
@@@ -83,6 -83,7 +83,7 @@@ static inline void set_cpu_sibling_map(
  }
  
  struct plat_smp_ops *mp_ops;
+ EXPORT_SYMBOL(mp_ops);
  
  __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
  {
@@@ -139,7 -140,7 +140,7 @@@ asmlinkage __cpuinit void start_seconda
        WARN_ON_ONCE(!irqs_disabled());
        mp_ops->smp_finish();
  
 -      cpu_idle();
 +      cpu_startup_entry(CPUHP_ONLINE);
  }
  
  /*
diff --combined arch/mips/kernel/traps.c
index 25225515451f8f348d4f2ffa91a4c64c9b004a82,3c906e723fd4243f103b9963cd7a8ef13bf2367f..77cff1f6d050cb92e21475ae52a9ef2f037b5bc5
@@@ -8,8 -8,8 +8,8 @@@
   * Copyright (C) 1998 Ulf Carlsson
   * Copyright (C) 1999 Silicon Graphics, Inc.
   * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
-  * Copyright (C) 2000, 01 MIPS Technologies, Inc.
   * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
+  * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
   */
  #include <linux/bug.h>
  #include <linux/compiler.h>
@@@ -60,9 -60,9 +60,9 @@@ extern void check_wait(void)
  extern asmlinkage void r4k_wait(void);
  extern asmlinkage void rollback_handle_int(void);
  extern asmlinkage void handle_int(void);
- extern asmlinkage void handle_tlbm(void);
- extern asmlinkage void handle_tlbl(void);
- extern asmlinkage void handle_tlbs(void);
+ extern u32 handle_tlbl[];
+ extern u32 handle_tlbs[];
+ extern u32 handle_tlbm[];
  extern asmlinkage void handle_adel(void);
  extern asmlinkage void handle_ades(void);
  extern asmlinkage void handle_ibe(void);
@@@ -83,10 -83,6 +83,6 @@@ extern asmlinkage void handle_dsp(void)
  extern asmlinkage void handle_mcheck(void);
  extern asmlinkage void handle_reserved(void);
  
- extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
-                                   struct mips_fpu_struct *ctx, int has_fpu,
-                                   void *__user *fault_addr);
  void (*board_be_init)(void);
  int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
  void (*board_nmi_handler_setup)(void);
@@@ -206,6 -202,19 +202,6 @@@ void show_stack(struct task_struct *tas
        show_stacktrace(task, &regs);
  }
  
 -/*
 - * The architecture-independent dump_stack generator
 - */
 -void dump_stack(void)
 -{
 -      struct pt_regs regs;
 -
 -      prepare_frametrace(&regs);
 -      show_backtrace(current, &regs);
 -}
 -
 -EXPORT_SYMBOL(dump_stack);
 -
  static void show_code(unsigned int __user *pc)
  {
        long i;
@@@ -231,7 -240,7 +227,7 @@@ static void __show_regs(const struct pt
        unsigned int cause = regs->cp0_cause;
        int i;
  
 -      printk("Cpu %d\n", smp_processor_id());
 +      show_regs_print_info(KERN_DEFAULT);
  
        /*
         * Saved main processor registers
@@@ -482,6 -491,12 +478,12 @@@ asmlinkage void do_be(struct pt_regs *r
  #define SYNC   0x0000000f
  #define RDHWR  0x0000003b
  
+ /*  microMIPS definitions   */
+ #define MM_POOL32A_FUNC 0xfc00ffff
+ #define MM_RDHWR        0x00006b3c
+ #define MM_RS           0x001f0000
+ #define MM_RT           0x03e00000
  /*
   * The ll_bit is cleared by r*_switch.S
   */
@@@ -596,42 -611,62 +598,62 @@@ static int simulate_llsc(struct pt_reg
   * Simulate trapping 'rdhwr' instructions to provide user accessible
   * registers not implemented in hardware.
   */
- static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
+ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
  {
        struct thread_info *ti = task_thread_info(current);
  
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+                       1, regs, 0);
+       switch (rd) {
+       case 0:         /* CPU number */
+               regs->regs[rt] = smp_processor_id();
+               return 0;
+       case 1:         /* SYNCI length */
+               regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+                                    current_cpu_data.icache.linesz);
+               return 0;
+       case 2:         /* Read count register */
+               regs->regs[rt] = read_c0_count();
+               return 0;
+       case 3:         /* Count register resolution */
+               switch (current_cpu_data.cputype) {
+               case CPU_20KC:
+               case CPU_25KF:
+                       regs->regs[rt] = 1;
+                       break;
+               default:
+                       regs->regs[rt] = 2;
+               }
+               return 0;
+       case 29:
+               regs->regs[rt] = ti->tp_value;
+               return 0;
+       default:
+               return -1;
+       }
+ }
+ static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+ {
        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
                int rd = (opcode & RD) >> 11;
                int rt = (opcode & RT) >> 16;
-               perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, regs, 0);
-               switch (rd) {
-               case 0:         /* CPU number */
-                       regs->regs[rt] = smp_processor_id();
-                       return 0;
-               case 1:         /* SYNCI length */
-                       regs->regs[rt] = min(current_cpu_data.dcache.linesz,
-                                            current_cpu_data.icache.linesz);
-                       return 0;
-               case 2:         /* Read count register */
-                       regs->regs[rt] = read_c0_count();
-                       return 0;
-               case 3:         /* Count register resolution */
-                       switch (current_cpu_data.cputype) {
-                       case CPU_20KC:
-                       case CPU_25KF:
-                               regs->regs[rt] = 1;
-                               break;
-                       default:
-                               regs->regs[rt] = 2;
-                       }
-                       return 0;
-               case 29:
-                       regs->regs[rt] = ti->tp_value;
-                       return 0;
-               default:
-                       return -1;
-               }
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
+       }
+       /* Not ours.  */
+       return -1;
+ }
+ static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+ {
+       if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+               int rd = (opcode & MM_RS) >> 16;
+               int rt = (opcode & MM_RT) >> 21;
+               simulate_rdhwr(regs, rd, rt);
+               return 0;
        }
  
        /* Not ours.  */
@@@ -662,7 -697,7 +684,7 @@@ asmlinkage void do_ov(struct pt_regs *r
        force_sig_info(SIGFPE, &info, current);
  }
  
static int process_fpemu_return(int sig, void __user *fault_addr)
+ int process_fpemu_return(int sig, void __user *fault_addr)
  {
        if (sig == SIGSEGV || sig == SIGBUS) {
                struct siginfo si = {0};
@@@ -813,9 -848,29 +835,29 @@@ static void do_trap_or_bp(struct pt_reg
  asmlinkage void do_bp(struct pt_regs *regs)
  {
        unsigned int opcode, bcode;
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       unsigned long epc;
+       u16 instr[2];
+       if (get_isa16_mode(regs->cp0_epc)) {
+               /* Calculate EPC. */
+               epc = exception_epc(regs);
+               if (cpu_has_mmips) {
+                       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
+                           (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
+                               goto out_sigsegv;
+                   opcode = (instr[0] << 16) | instr[1];
+               } else {
+                   /* MIPS16e mode */
+                   if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
+                               goto out_sigsegv;
+                   bcode = (instr[0] >> 6) & 0x3f;
+                   do_trap_or_bp(regs, bcode, "Break");
+                   return;
+               }
+       } else {
+               if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+                       goto out_sigsegv;
+       }
  
        /*
         * There is the ancient bug in the MIPS assemblers that the break
@@@ -856,13 -911,22 +898,22 @@@ out_sigsegv
  asmlinkage void do_tr(struct pt_regs *regs)
  {
        unsigned int opcode, tcode = 0;
+       u16 instr[2];
+       unsigned long epc = exception_epc(regs);
  
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
+       if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) ||
+               (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))
+                       goto out_sigsegv;
+       opcode = (instr[0] << 16) | instr[1];
  
        /* Immediate versions don't provide a code.  */
-       if (!(opcode & OPCODE))
-               tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       if (!(opcode & OPCODE)) {
+               if (get_isa16_mode(regs->cp0_epc))
+                       /* microMIPS */
+                       tcode = (opcode >> 12) & 0x1f;
+               else
+                       tcode = ((opcode >> 6) & ((1 << 10) - 1));
+       }
  
        do_trap_or_bp(regs, tcode, "Trap");
        return;
@@@ -875,6 -939,7 +926,7 @@@ asmlinkage void do_ri(struct pt_regs *r
  {
        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
        unsigned long old_epc = regs->cp0_epc;
+       unsigned long old31 = regs->regs[31];
        unsigned int opcode = 0;
        int status = -1;
  
        if (unlikely(compute_return_epc(regs) < 0))
                return;
  
-       if (unlikely(get_user(opcode, epc) < 0))
-               status = SIGSEGV;
+       if (get_isa16_mode(regs->cp0_epc)) {
+               unsigned short mmop[2] = { 0 };
  
-       if (!cpu_has_llsc && status < 0)
-               status = simulate_llsc(regs, opcode);
+               if (unlikely(get_user(mmop[0], epc) < 0))
+                       status = SIGSEGV;
+               if (unlikely(get_user(mmop[1], epc) < 0))
+                       status = SIGSEGV;
+               opcode = (mmop[0] << 16) | mmop[1];
  
-       if (status < 0)
-               status = simulate_rdhwr(regs, opcode);
+               if (status < 0)
+                       status = simulate_rdhwr_mm(regs, opcode);
+       } else {
+               if (unlikely(get_user(opcode, epc) < 0))
+                       status = SIGSEGV;
  
-       if (status < 0)
-               status = simulate_sync(regs, opcode);
+               if (!cpu_has_llsc && status < 0)
+                       status = simulate_llsc(regs, opcode);
+               if (status < 0)
+                       status = simulate_rdhwr_normal(regs, opcode);
+               if (status < 0)
+                       status = simulate_sync(regs, opcode);
+       }
  
        if (status < 0)
                status = SIGILL;
  
        if (unlikely(status > 0)) {
                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
+               regs->regs[31] = old31;
                force_sig(status, current);
        }
  }
@@@ -973,7 -1052,7 +1039,7 @@@ static int default_cu2_call(struct noti
  asmlinkage void do_cpu(struct pt_regs *regs)
  {
        unsigned int __user *epc;
-       unsigned long old_epc;
+       unsigned long old_epc, old31;
        unsigned int opcode;
        unsigned int cpid;
        int status;
        case 0:
                epc = (unsigned int __user *)exception_epc(regs);
                old_epc = regs->cp0_epc;
+               old31 = regs->regs[31];
                opcode = 0;
                status = -1;
  
                if (unlikely(compute_return_epc(regs) < 0))
                        return;
  
-               if (unlikely(get_user(opcode, epc) < 0))
-                       status = SIGSEGV;
+               if (get_isa16_mode(regs->cp0_epc)) {
+                       unsigned short mmop[2] = { 0 };
  
-               if (!cpu_has_llsc && status < 0)
-                       status = simulate_llsc(regs, opcode);
+                       if (unlikely(get_user(mmop[0], epc) < 0))
+                               status = SIGSEGV;
+                       if (unlikely(get_user(mmop[1], epc) < 0))
+                               status = SIGSEGV;
+                       opcode = (mmop[0] << 16) | mmop[1];
  
-               if (status < 0)
-                       status = simulate_rdhwr(regs, opcode);
+                       if (status < 0)
+                               status = simulate_rdhwr_mm(regs, opcode);
+               } else {
+                       if (unlikely(get_user(opcode, epc) < 0))
+                               status = SIGSEGV;
+                       if (!cpu_has_llsc && status < 0)
+                               status = simulate_llsc(regs, opcode);
+                       if (status < 0)
+                               status = simulate_rdhwr_normal(regs, opcode);
+               }
  
                if (status < 0)
                        status = SIGILL;
  
                if (unlikely(status > 0)) {
                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
+                       regs->regs[31] = old31;
                        force_sig(status, current);
                }
  
@@@ -1320,7 -1414,7 +1401,7 @@@ asmlinkage void cache_parity_error(void
  void ejtag_exception_handler(struct pt_regs *regs)
  {
        const int field = 2 * sizeof(unsigned long);
-       unsigned long depc, old_epc;
+       unsigned long depc, old_epc, old_ra;
        unsigned int debug;
  
        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
                 * calculation.
                 */
                old_epc = regs->cp0_epc;
+               old_ra = regs->regs[31];
                regs->cp0_epc = depc;
-               __compute_return_epc(regs);
+               compute_return_epc(regs);
                depc = regs->cp0_epc;
                regs->cp0_epc = old_epc;
+               regs->regs[31] = old_ra;
        } else
                depc += 4;
        write_c0_depc(depc);
@@@ -1377,11 -1473,27 +1460,27 @@@ unsigned long vi_handlers[64]
  void __init *set_except_vector(int n, void *addr)
  {
        unsigned long handler = (unsigned long) addr;
-       unsigned long old_handler = exception_handlers[n];
+       unsigned long old_handler;
+ #ifdef CONFIG_CPU_MICROMIPS
+       /*
+        * Only the TLB handlers are cache aligned with an even
+        * address. All other handlers are on an odd address and
+        * require no modification. Otherwise, MIPS32 mode will
+        * be entered when handling any TLB exceptions. That
+        * would be bad...since we must stay in microMIPS mode.
+        */
+       if (!(handler & 0x1))
+               handler |= 1;
+ #endif
+       old_handler = xchg(&exception_handlers[n], handler);
  
-       exception_handlers[n] = handler;
        if (n == 0 && cpu_has_divec) {
+ #ifdef CONFIG_CPU_MICROMIPS
+               unsigned long jump_mask = ~((1 << 27) - 1);
+ #else
                unsigned long jump_mask = ~((1 << 28) - 1);
+ #endif
                u32 *buf = (u32 *)(ebase + 0x200);
                unsigned int k0 = 26;
                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
        return (void *)old_handler;
  }
  
- static asmlinkage void do_default_vi(void)
+ static void do_default_vi(void)
  {
        show_regs(get_irq_regs());
        panic("Caught unexpected vectored interrupt.");
@@@ -1408,17 -1520,18 +1507,18 @@@ static void *set_vi_srs_handler(int n, 
        unsigned long handler;
        unsigned long old_handler = vi_handlers[n];
        int srssets = current_cpu_data.srsets;
-       u32 *w;
+       u16 *h;
        unsigned char *b;
  
        BUG_ON(!cpu_has_veic && !cpu_has_vint);
+       BUG_ON((n < 0) && (n > 9));
  
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
                srs = 0;
        } else
                handler = (unsigned long) addr;
-       vi_handlers[n] = (unsigned long) addr;
+       vi_handlers[n] = handler;
  
        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
  
        if (srs == 0) {
                /*
                 * If no shadow set is selected then use the default handler
-                * that does normal register saving and standard interrupt exit
+                * that does normal register saving and standard interrupt exit
                 */
                extern char except_vec_vi, except_vec_vi_lui;
                extern char except_vec_vi_ori, except_vec_vi_end;
                extern char rollback_except_vec_vi;
                 * Status.IM bit to be masked before going there.
                 */
                extern char except_vec_vi_mori;
+ #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int mori_offset = &except_vec_vi_mori - vec_start + 2;
+ #else
                const int mori_offset = &except_vec_vi_mori - vec_start;
+ #endif
  #endif /* CONFIG_MIPS_MT_SMTC */
-               const int handler_len = &except_vec_vi_end - vec_start;
+ #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+               const int lui_offset = &except_vec_vi_lui - vec_start + 2;
+               const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+ #else
                const int lui_offset = &except_vec_vi_lui - vec_start;
                const int ori_offset = &except_vec_vi_ori - vec_start;
+ #endif
+               const int handler_len = &except_vec_vi_end - vec_start;
  
                if (handler_len > VECTORSPACING) {
                        /*
                        panic("VECTORSPACING too small");
                }
  
-               memcpy(b, vec_start, handler_len);
+               set_handler(((unsigned long)b - ebase), vec_start,
+ #ifdef CONFIG_CPU_MICROMIPS
+                               (handler_len - 1));
+ #else
+                               handler_len);
+ #endif
  #ifdef CONFIG_MIPS_MT_SMTC
                BUG_ON(n > 7);  /* Vector index %d exceeds SMTC maximum. */
  
-               w = (u32 *)(b + mori_offset);
-               *w = (*w & 0xffff0000) | (0x100 << n);
+               h = (u16 *)(b + mori_offset);
+               *h = (0x100 << n);
  #endif /* CONFIG_MIPS_MT_SMTC */
-               w = (u32 *)(b + lui_offset);
-               *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
-               w = (u32 *)(b + ori_offset);
-               *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
+               h = (u16 *)(b + lui_offset);
+               *h = (handler >> 16) & 0xffff;
+               h = (u16 *)(b + ori_offset);
+               *h = (handler & 0xffff);
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+handler_len));
        }
        else {
                /*
-                * In other cases jump directly to the interrupt handler
-                *
-                * It is the handlers responsibility to save registers if required
-                * (eg hi/lo) and return from the exception using "eret"
+                * In other cases jump directly to the interrupt handler. It
+                * is the handler's responsibility to save registers if required
+                * (eg hi/lo) and return from the exception using "eret".
                 */
-               w = (u32 *)b;
-               *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
-               *w = 0;
+               u32 insn;
+               h = (u16 *)b;
+               /* j handler */
+ #ifdef CONFIG_CPU_MICROMIPS
+               insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+ #else
+               insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+ #endif
+               h[0] = (insn >> 16) & 0xffff;
+               h[1] = insn & 0xffff;
+               h[2] = 0;
+               h[3] = 0;
                local_flush_icache_range((unsigned long)b,
                                         (unsigned long)(b+8));
        }
@@@ -1534,6 -1669,7 +1656,7 @@@ void __cpuinit per_cpu_trap_init(bool i
        unsigned int cpu = smp_processor_id();
        unsigned int status_set = ST0_CU0;
        unsigned int hwrena = cpu_hwrena_impl_bits;
+       unsigned long asid = 0;
  #ifdef CONFIG_MIPS_MT_SMTC
        int secondaryTC = 0;
        int bootTC = (cpu == 0);
        }
  #endif /* CONFIG_MIPS_MT_SMTC */
  
-       if (!cpu_data[cpu].asid_cache)
-               cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+       asid = ASID_FIRST_VERSION;
+       cpu_data[cpu].asid_cache = asid;
+       TLBMISS_HANDLER_SETUP();
  
        atomic_inc(&init_mm.mm_count);
        current->active_mm = &init_mm;
  /* Install CPU exception handler */
  void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
  {
+ #ifdef CONFIG_CPU_MICROMIPS
+       memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+ #else
        memcpy((void *)(ebase + offset), addr, size);
+ #endif
        local_flush_icache_range(ebase + offset, ebase + offset + size);
  }
  
@@@ -1682,8 -1823,9 +1810,9 @@@ __setup("rdhwr_noopt", set_rdhwr_noopt)
  
  void __init trap_init(void)
  {
-       extern char except_vec3_generic, except_vec3_r4000;
+       extern char except_vec3_generic;
        extern char except_vec4;
+       extern char except_vec3_r4000;
        unsigned long i;
        int rollback;
  
                ebase = (unsigned long)
                        __alloc_bootmem(size, 1 << fls(size), 0);
        } else {
-               ebase = CKSEG0;
+ #ifdef CONFIG_KVM_GUEST
+ #define KVM_GUEST_KSEG0     0x40000000
+         ebase = KVM_GUEST_KSEG0;
+ #else
+         ebase = CKSEG0;
+ #endif
                if (cpu_has_mips_r2)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
  
        if (cpu_has_vce)
                /* Special exception: R4[04]00 uses also the divec space. */
-               memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
+               set_handler(0x180, &except_vec3_r4000, 0x100);
        else if (cpu_has_4kex)
-               memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
+               set_handler(0x180, &except_vec3_generic, 0x80);
        else
-               memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
+               set_handler(0x080, &except_vec3_generic, 0x80);
  
        local_flush_icache_range(ebase, ebase + 0x400);
        flush_tlb_handlers();
diff --combined arch/mips/kvm/kvm_mips.c
index 0000000000000000000000000000000000000000,2e60b1c78194ddc3709c178bf81d38ec89f07b44..e0dad0289797b292f9b436de9afa492eca0112de
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,958 +1,958 @@@
 -                                 struct kvm_memory_slot *memslot,
 -                                 struct kvm_memory_slot old,
 -                                 struct kvm_userspace_memory_region *mem,
 -                                 bool user_alloc)
+ /*
+  * This file is subject to the terms and conditions of the GNU General Public
+  * License.  See the file "COPYING" in the main directory of this archive
+  * for more details.
+  *
+  * KVM/MIPS: MIPS specific KVM APIs
+  *
+  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+  * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
+ #include <linux/bootmem.h>
+ #include <asm/page.h>
+ #include <asm/cacheflush.h>
+ #include <asm/mmu_context.h>
+ #include <linux/kvm_host.h>
+ #include "kvm_mips_int.h"
+ #include "kvm_mips_comm.h"
+ #define CREATE_TRACE_POINTS
+ #include "trace.h"
+ #ifndef VECTORSPACING
+ #define VECTORSPACING 0x100   /* for EI/VI mode */
+ #endif
+ #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+ struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "wait", VCPU_STAT(wait_exits) },
+       { "cache", VCPU_STAT(cache_exits) },
+       { "signal", VCPU_STAT(signal_exits) },
+       { "interrupt", VCPU_STAT(int_exits) },
+       { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
+       { "tlbmod", VCPU_STAT(tlbmod_exits) },
+       { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
+       { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
+       { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
+       { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
+       { "syscall", VCPU_STAT(syscall_exits) },
+       { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
+       { "break_inst", VCPU_STAT(break_inst_exits) },
+       { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
+       { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+       {NULL}
+ };
+ static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+ {
+       int i;
+       for_each_possible_cpu(i) {
+               vcpu->arch.guest_kernel_asid[i] = 0;
+               vcpu->arch.guest_user_asid[i] = 0;
+       }
+       return 0;
+ }
+ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+ {
+       return gfn;
+ }
+ /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
+  * are "runnable" if interrupts are pending
+  */
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+ {
+       return !!(vcpu->arch.pending_exceptions);
+ }
+ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+ {
+       return 1;
+ }
+ int kvm_arch_hardware_enable(void *garbage)
+ {
+       return 0;
+ }
+ void kvm_arch_hardware_disable(void *garbage)
+ {
+ }
+ int kvm_arch_hardware_setup(void)
+ {
+       return 0;
+ }
+ void kvm_arch_hardware_unsetup(void)
+ {
+ }
+ void kvm_arch_check_processor_compat(void *rtn)
+ {
+       int *r = (int *)rtn;
+       *r = 0;
+       return;
+ }
+ static void kvm_mips_init_tlbs(struct kvm *kvm)
+ {
+       unsigned long wired;
+       /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
+       wired = read_c0_wired();
+       write_c0_wired(wired + 1);
+       mtc0_tlbw_hazard();
+       kvm->arch.commpage_tlb = wired;
+       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+                 kvm->arch.commpage_tlb);
+ }
+ static void kvm_mips_init_vm_percpu(void *arg)
+ {
+       struct kvm *kvm = (struct kvm *)arg;
+       kvm_mips_init_tlbs(kvm);
+       kvm_mips_callbacks->vm_init(kvm);
+ }
+ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ {
+       if (atomic_inc_return(&kvm_mips_instance) == 1) {
+               kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+       }
+       return 0;
+ }
+ void kvm_mips_free_vcpus(struct kvm *kvm)
+ {
+       unsigned int i;
+       struct kvm_vcpu *vcpu;
+       /* Put the pages we reserved for the guest pmap */
+       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+       }
+       if (kvm->arch.guest_pmap)
+               kfree(kvm->arch.guest_pmap);
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_arch_vcpu_free(vcpu);
+       }
+       mutex_lock(&kvm->lock);
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+       atomic_set(&kvm->online_vcpus, 0);
+       mutex_unlock(&kvm->lock);
+ }
+ void kvm_arch_sync_events(struct kvm *kvm)
+ {
+ }
+ static void kvm_mips_uninit_tlbs(void *arg)
+ {
+       /* Restore wired count */
+       write_c0_wired(0);
+       mtc0_tlbw_hazard();
+       /* Clear out all the TLBs */
+       kvm_local_flush_tlb_all();
+ }
+ void kvm_arch_destroy_vm(struct kvm *kvm)
+ {
+       kvm_mips_free_vcpus(kvm);
+       /* If this is the last instance, restore wired count */
+       if (atomic_dec_return(&kvm_mips_instance) == 0) {
+               kvm_info("%s: last KVM instance, restoring TLB parameters\n",
+                        __func__);
+               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+       }
+ }
+ long
+ kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+ {
+       return -EINVAL;
+ }
+ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+ {
+ }
+ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
+ {
+       return 0;
+ }
+ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 -                                 struct kvm_userspace_memory_region *mem,
 -                                 struct kvm_memory_slot old, bool user_alloc)
++                                struct kvm_memory_slot *memslot,
++                                struct kvm_userspace_memory_region *mem,
++                                enum kvm_mr_change change)
+ {
+       return 0;
+ }
+ void kvm_arch_commit_memory_region(struct kvm *kvm,
++                                struct kvm_userspace_memory_region *mem,
++                                const struct kvm_memory_slot *old,
++                                enum kvm_mr_change change)
+ {
+       unsigned long npages = 0;
+       int i, err = 0;
+       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+                 __func__, kvm, mem->slot, mem->guest_phys_addr,
+                 mem->memory_size, mem->userspace_addr);
+       /* Setup Guest PMAP table */
+       if (!kvm->arch.guest_pmap) {
+               if (mem->slot == 0)
+                       npages = mem->memory_size >> PAGE_SHIFT;
+               if (npages) {
+                       kvm->arch.guest_pmap_npages = npages;
+                       kvm->arch.guest_pmap =
+                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+                       if (!kvm->arch.guest_pmap) {
+                               kvm_err("Failed to allocate guest PMAP");
+                               err = -ENOMEM;
+                               goto out;
+                       }
+                       kvm_info
+                           ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+                            npages, kvm->arch.guest_pmap);
+                       /* Now setup the page table */
+                       for (i = 0; i < npages; i++) {
+                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+                       }
+               }
+       }
+ out:
+       return;
+ }
+ void kvm_arch_flush_shadow_all(struct kvm *kvm)
+ {
+ }
+ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+ {
+ }
+ void kvm_arch_flush_shadow(struct kvm *kvm)
+ {
+ }
+ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ {
+       extern char mips32_exception[], mips32_exceptionEnd[];
+       extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+       int err, size, offset;
+       void *gebase;
+       int i;
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+       err = kvm_vcpu_init(vcpu, kvm, id);
+       if (err)
+               goto out_free_cpu;
+       kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+       /* Allocate space for host mode exception handlers that handle
+        * guest mode exits
+        */
+       if (cpu_has_veic || cpu_has_vint) {
+               size = 0x200 + VECTORSPACING * 64;
+       } else {
+               size = 0x200;
+       }
+       /* Save Linux EBASE */
+       vcpu->arch.host_ebase = (void *)read_c0_ebase();
+       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+       if (!gebase) {
+               err = -ENOMEM;
+               goto out_free_cpu;
+       }
+       kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+                ALIGN(size, PAGE_SIZE), gebase);
+       /* Save new ebase */
+       vcpu->arch.guest_ebase = gebase;
+       /* Copy L1 Guest Exception handler to correct offset */
+       /* TLB Refill, EXL = 0 */
+       memcpy(gebase, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+       /* General Exception Entry point */
+       memcpy(gebase + 0x180, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+       for (i = 0; i < 8; i++) {
+               kvm_debug("L1 Vectored handler @ %p\n",
+                         gebase + 0x200 + (i * VECTORSPACING));
+               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+                      mips32_exceptionEnd - mips32_exception);
+       }
+       /* General handler, relocate to unmapped space for sanity's sake */
+       offset = 0x2000;
+       kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
+                gebase + offset,
+                mips32_GuestExceptionEnd - mips32_GuestException);
+       memcpy(gebase + offset, mips32_GuestException,
+              mips32_GuestExceptionEnd - mips32_GuestException);
+       /* Invalidate the icache for these ranges */
+       mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
+       /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
+       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+       if (!vcpu->arch.kseg0_commpage) {
+               err = -ENOMEM;
+               goto out_free_gebase;
+       }
+       kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+       kvm_mips_commpage_init(vcpu);
+       /* Init */
+       vcpu->arch.last_sched_cpu = -1;
+       /* Start off the timer */
+       kvm_mips_emulate_count(vcpu);
+       return vcpu;
+ out_free_gebase:
+       kfree(gebase);
+ out_free_cpu:
+       kfree(vcpu);
+ out:
+       return ERR_PTR(err);
+ }
+ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ {
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+       kvm_vcpu_uninit(vcpu);
+       kvm_mips_dump_stats(vcpu);
+       if (vcpu->arch.guest_ebase)
+               kfree(vcpu->arch.guest_ebase);
+       if (vcpu->arch.kseg0_commpage)
+               kfree(vcpu->arch.kseg0_commpage);
+ }
+ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+       kvm_arch_vcpu_free(vcpu);
+ }
+ int
+ kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                   struct kvm_guest_debug *dbg)
+ {
+       return -EINVAL;
+ }
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+       int r = 0;
+       sigset_t sigsaved;
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvm_mips_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       }
+       /* Check if we have any exceptions/interrupts pending */
+       kvm_mips_deliver_interrupts(vcpu,
+                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
+       local_irq_disable();
+       kvm_guest_enter();
+       r = __kvm_mips_vcpu_run(run, vcpu);
+       kvm_guest_exit();
+       local_irq_enable();
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+       return r;
+ }
+ int
+ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
+ {
+       int intr = (int)irq->irq;
+       struct kvm_vcpu *dvcpu = NULL;
+       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+                         (int)intr);
+       if (irq->cpu == -1)
+               dvcpu = vcpu;
+       else
+               dvcpu = vcpu->kvm->vcpus[irq->cpu];
+       if (intr == 2 || intr == 3 || intr == 4) {
+               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+       } else if (intr == -2 || intr == -3 || intr == -4) {
+               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+       } else {
+               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+                       irq->cpu, irq->irq);
+               return -EINVAL;
+       }
+       dvcpu->arch.wait = 0;
+       if (waitqueue_active(&dvcpu->wq)) {
+               wake_up_interruptible(&dvcpu->wq);
+       }
+       return 0;
+ }
+ int
+ kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+ {
+       return -EINVAL;
+ }
+ int
+ kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                               struct kvm_mp_state *mp_state)
+ {
+       return -EINVAL;
+ }
+ long
+ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+ {
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+       int intr;
+       switch (ioctl) {
+       case KVM_NMI:
+               /* Treat the NMI as a CPU reset */
+               r = kvm_mips_reset_vcpu(vcpu);
+               break;
+       case KVM_INTERRUPT:
+               {
+                       struct kvm_mips_interrupt irq;
+                       r = -EFAULT;
+                       if (copy_from_user(&irq, argp, sizeof(irq)))
+                               goto out;
+                       intr = (int)irq.irq;
+                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+                                 irq.irq);
+                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+                       break;
+               }
+       default:
+               r = -EINVAL;
+       }
+ out:
+       return r;
+ }
+ /*
+  * Get (and clear) the dirty memory log for a memory slot.
+  */
+ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+ {
+       struct kvm_memory_slot *memslot;
+       unsigned long ga, ga_end;
+       int is_dirty = 0;
+       int r;
+       unsigned long n;
+       mutex_lock(&kvm->slots_lock);
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               memslot = &kvm->memslots->memslots[log->slot];
+               ga = memslot->base_gfn << PAGE_SHIFT;
+               ga_end = ga + (memslot->npages << PAGE_SHIFT);
+               printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+                      ga_end);
+               n = kvm_dirty_bitmap_bytes(memslot);
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+       r = 0;
+ out:
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+ }
+ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+ {
+       long r;
+       switch (ioctl) {
+       default:
+               r = -EINVAL;
+       }
+       return r;
+ }
+ int kvm_arch_init(void *opaque)
+ {
+       int ret;
+       if (kvm_mips_callbacks) {
+               kvm_err("kvm: module already exists\n");
+               return -EEXIST;
+       }
+       ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
+       return ret;
+ }
+ void kvm_arch_exit(void)
+ {
+       kvm_mips_callbacks = NULL;
+ }
+ int
+ kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+ {
+       return -ENOTSUPP;
+ }
+ int
+ kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+ {
+       return -ENOTSUPP;
+ }
+ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+ {
+       return 0;
+ }
+ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+       return -ENOTSUPP;
+ }
+ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+       return -ENOTSUPP;
+ }
+ int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+ {
+       return VM_FAULT_SIGBUS;
+ }
+ int kvm_dev_ioctl_check_extension(long ext)
+ {
+       int r;
+       switch (ext) {
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+ }
+ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+ {
+       return kvm_mips_pending_timer(vcpu);
+ }
+ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+ {
+       int i;
+       struct mips_coproc *cop0;
+       if (!vcpu)
+               return -1;
+       printk("VCPU Register Dump:\n");
+       printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
+       printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+       for (i = 0; i < 32; i += 4) {
+               printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+                      vcpu->arch.gprs[i],
+                      vcpu->arch.gprs[i + 1],
+                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+       }
+       printk("\thi: 0x%08lx\n", vcpu->arch.hi);
+       printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
+       cop0 = vcpu->arch.cop0;
+       printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+              kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
+       printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+       return 0;
+ }
+ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+ {
+       int i;
+       for (i = 0; i < 32; i++)
+               vcpu->arch.gprs[i] = regs->gprs[i];
+       vcpu->arch.hi = regs->hi;
+       vcpu->arch.lo = regs->lo;
+       vcpu->arch.pc = regs->pc;
+       return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
+ }
+ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+ {
+       int i;
+       for (i = 0; i < 32; i++)
+               regs->gprs[i] = vcpu->arch.gprs[i];
+       regs->hi = vcpu->arch.hi;
+       regs->lo = vcpu->arch.lo;
+       regs->pc = vcpu->arch.pc;
+       return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
+ }
+ void kvm_mips_comparecount_func(unsigned long data)
+ {
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+       vcpu->arch.wait = 0;
+       if (waitqueue_active(&vcpu->wq)) {
+               wake_up_interruptible(&vcpu->wq);
+       }
+ }
+ /*
+  * low level hrtimer wake routine.
+  */
+ enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+ {
+       struct kvm_vcpu *vcpu;
+       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+       kvm_mips_comparecount_func((unsigned long) vcpu);
+       hrtimer_forward_now(&vcpu->arch.comparecount_timer,
+                           ktime_set(0, MS_TO_NS(10)));
+       return HRTIMER_RESTART;
+ }
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+       kvm_mips_callbacks->vcpu_init(vcpu);
+       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
+       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+       kvm_mips_init_shadow_tlb(vcpu);
+       return 0;
+ }
+ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+ {
+       return;
+ }
+ int
+ kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
+ {
+       return 0;
+ }
+ /* Initial guest state */
+ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ {
+       return kvm_mips_callbacks->vcpu_setup(vcpu);
+ }
+ static
+ void kvm_mips_set_c0_status(void)
+ {
+       uint32_t status = read_c0_status();
+       if (cpu_has_fpu)
+               status |= (ST0_CU1);
+       if (cpu_has_dsp)
+               status |= (ST0_MX);
+       write_c0_status(status);
+       ehb();
+ }
+ /*
+  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+  */
+ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ {
+       uint32_t cause = vcpu->arch.host_cp0_cause;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+       /* Set a default exit reason */
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+       /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
+       kvm_mips_set_c0_status();
+       local_irq_enable();
+       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+                       cause, opc, run, vcpu);
+       /* Do a privilege check, if in UM most of these exit conditions end up
+        * causing an exception to be delivered to the Guest Kernel
+        */
+       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+       if (er == EMULATE_PRIV_FAIL) {
+               goto skip_emul;
+       } else if (er == EMULATE_FAIL) {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               goto skip_emul;
+       }
+       switch (exccode) {
+       case T_INT:
+               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+               ++vcpu->stat.int_exits;
+               trace_kvm_exit(vcpu, INT_EXITS);
+               if (need_resched()) {
+                       cond_resched();
+               }
+               ret = RESUME_GUEST;
+               break;
+       case T_COP_UNUSABLE:
+               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+               ++vcpu->stat.cop_unusable_exits;
+               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+               /* XXXKYMA: Might need to return to user space */
+               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
+                       ret = RESUME_HOST;
+               }
+               break;
+       case T_TLB_MOD:
+               ++vcpu->stat.tlbmod_exits;
+               trace_kvm_exit(vcpu, TLBMOD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+               break;
+       case T_TLB_ST_MISS:
+               kvm_debug
+                   ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+                    cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+                    badvaddr);
+               ++vcpu->stat.tlbmiss_st_exits;
+               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+               break;
+       case T_TLB_LD_MISS:
+               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+               ++vcpu->stat.tlbmiss_ld_exits;
+               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+               break;
+       case T_ADDR_ERR_ST:
+               ++vcpu->stat.addrerr_st_exits;
+               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+               break;
+       case T_ADDR_ERR_LD:
+               ++vcpu->stat.addrerr_ld_exits;
+               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+               break;
+       case T_SYSCALL:
+               ++vcpu->stat.syscall_exits;
+               trace_kvm_exit(vcpu, SYSCALL_EXITS);
+               ret = kvm_mips_callbacks->handle_syscall(vcpu);
+               break;
+       case T_RES_INST:
+               ++vcpu->stat.resvd_inst_exits;
+               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+               break;
+       case T_BREAK:
+               ++vcpu->stat.break_inst_exits;
+               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
+       default:
+               kvm_err
+                   ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                    exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+                    kvm_read_c0_guest_status(vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+       }
+ skip_emul:
+       local_irq_disable();
+       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+               kvm_mips_deliver_interrupts(vcpu, cause);
+       if (!(ret & RESUME_HOST)) {
+               /* Only check for signals if not already exiting to userspace  */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       ret = (-EINTR << 2) | RESUME_HOST;
+                       ++vcpu->stat.signal_exits;
+                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
+               }
+       }
+       return ret;
+ }
+ int __init kvm_mips_init(void)
+ {
+       int ret;
+       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+       if (ret)
+               return ret;
+       /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
+        * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
+        * to avoid the possibility of double faulting. The issue is that the TLB code
+        * references routines that are part of the the KVM module,
+        * which are only available once the module is loaded.
+        */
+       kvm_mips_gfn_to_pfn = gfn_to_pfn;
+       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+       kvm_mips_is_error_pfn = is_error_pfn;
+       pr_info("KVM/MIPS Initialized\n");
+       return 0;
+ }
+ void __exit kvm_mips_exit(void)
+ {
+       kvm_exit();
+       kvm_mips_gfn_to_pfn = NULL;
+       kvm_mips_release_pfn_clean = NULL;
+       kvm_mips_is_error_pfn = NULL;
+       pr_info("KVM/MIPS unloaded\n");
+ }
+ module_init(kvm_mips_init);
+ module_exit(kvm_mips_exit);
+ EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
index 148a590bc4194784c369acef68c76b73d9c2240d,df17f5fb999c002a70a7df2e59e368d70d8b29d7..c18c9a84f4c4ee5794088f133332d3a391b834b7
@@@ -1,10 -1,8 +1,8 @@@
  /dts-v1/;
  
--/include/ "rt3050.dtsi"
++#include "rt3050.dtsi"
  
  / {
-       #address-cells = <1>;
-       #size-cells = <1>;
        compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
        model = "Ralink RT3052 evaluation board";
  
                reg = <0x0 0x2000000>;
        };
  
-       palmbus@10000000 {
-               sysc@0 {
-                       ralink,pinmmux = "uartlite", "spi";
-                       ralink,uartmux = "gpio";
-                       ralink,wdtmux = <0>;
-               };
+       chosen {
+               bootargs = "console=ttyS0,57600";
        };
  
        cfi@1f000000 {
index 5f2bddb1860e11c1b32035593b3376b758c37570,64d8dab06b01697425e4a6073bf1e1d6ae0bca44..1230f56429d7334ea418f41921e709a9b737586a
@@@ -255,14 -255,14 +255,14 @@@ static void __init dump_topology(void
        }
  }
  
- static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot)
+ static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
  {
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
  
-       return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
+       return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
  }
  
- static pfn_t __init slot_psize_compute(cnodeid_t node, int slot)
+ static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
  {
        nasid_t nasid;
        lboard_t *brd;
@@@ -353,7 -353,7 +353,7 @@@ static void __init mlreset(void
  
  static void __init szmem(void)
  {
-       pfn_t slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
+       unsigned long slot_psize, slot0sz = 0, nodebytes;       /* Hack to detect problem configs */
        int slot;
        cnodeid_t node;
  
  
  static void __init node_mem_init(cnodeid_t node)
  {
-       pfn_t slot_firstpfn = slot_getbasepfn(node, 0);
-       pfn_t slot_freepfn = node_getfirstfree(node);
+       unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
+       unsigned long slot_freepfn = node_getfirstfree(node);
        unsigned long bootmap_size;
-       pfn_t start_pfn, end_pfn;
+       unsigned long start_pfn, end_pfn;
  
        get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
  
@@@ -457,7 -457,7 +457,7 @@@ void __init prom_free_prom_memory(void
        /* We got nothing to free here ...  */
  }
  
 -extern unsigned long setup_zero_pages(void);
 +extern void setup_zero_pages(void);
  
  void __init paging_init(void)
  {
        pagetable_init();
  
        for_each_online_node(node) {
-               pfn_t start_pfn, end_pfn;
+               unsigned long start_pfn, end_pfn;
  
                get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
  
@@@ -492,7 -492,7 +492,7 @@@ void __init mem_init(void
                totalram_pages += free_all_bootmem_node(NODE_DATA(node));
        }
  
 -      totalram_pages -= setup_zero_pages();   /* This comes from node 0 */
 +      setup_zero_pages();     /* This comes from node 0 */
  
        codesize =  (unsigned long) &_etext - (unsigned long) &_text;
        datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
diff --combined virt/kvm/kvm_main.c
index 45f09362ee7be02df67171efa3508fa225129499,1fc942048521085a960074affc2663d93a70e93e..ae88b719bd2e477bbdb729f0b55555ebc869fcef
@@@ -217,9 -217,9 +217,9 @@@ void kvm_make_mclock_inprogress_request
        make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
  }
  
 -void kvm_make_update_eoibitmap_request(struct kvm *kvm)
 +void kvm_make_scan_ioapic_request(struct kvm *kvm)
  {
 -      make_all_cpus_request(kvm, KVM_REQ_EOIBITMAP);
 +      make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
  }
  
  int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  
        kvm_vcpu_set_in_spin_loop(vcpu, false);
        kvm_vcpu_set_dy_eligible(vcpu, false);
 +      vcpu->preempted = false;
  
        r = kvm_arch_vcpu_init(vcpu);
        if (r < 0)
@@@ -504,7 -503,6 +504,7 @@@ static struct kvm *kvm_create_vm(unsign
        mutex_init(&kvm->irq_lock);
        mutex_init(&kvm->slots_lock);
        atomic_set(&kvm->users_count, 1);
 +      INIT_LIST_HEAD(&kvm->devices);
  
        r = kvm_init_mmu_notifier(kvm);
        if (r)
@@@ -582,19 -580,6 +582,19 @@@ void kvm_free_physmem(struct kvm *kvm
        kfree(kvm->memslots);
  }
  
 +static void kvm_destroy_devices(struct kvm *kvm)
 +{
 +      struct list_head *node, *tmp;
 +
 +      list_for_each_safe(node, tmp, &kvm->devices) {
 +              struct kvm_device *dev =
 +                      list_entry(node, struct kvm_device, vm_node);
 +
 +              list_del(node);
 +              dev->ops->destroy(dev);
 +      }
 +}
 +
  static void kvm_destroy_vm(struct kvm *kvm)
  {
        int i;
        kvm_arch_flush_shadow_all(kvm);
  #endif
        kvm_arch_destroy_vm(kvm);
 +      kvm_destroy_devices(kvm);
        kvm_free_physmem(kvm);
        cleanup_srcu_struct(&kvm->srcu);
        kvm_arch_free_vm(kvm);
@@@ -734,6 -718,24 +734,6 @@@ static struct kvm_memslots *install_new
        return old_memslots; 
  }
  
 -/*
 - * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
 - * - create a new memory slot
 - * - delete an existing memory slot
 - * - modify an existing memory slot
 - *   -- move it in the guest physical memory space
 - *   -- just change its flags
 - *
 - * Since flags can be changed by some of these operations, the following
 - * differentiation is the best we can do for __kvm_set_memory_region():
 - */
 -enum kvm_mr_change {
 -      KVM_MR_CREATE,
 -      KVM_MR_DELETE,
 -      KVM_MR_MOVE,
 -      KVM_MR_FLAGS_ONLY,
 -};
 -
  /*
   * Allocate some memory and give it an address in the guest physical address
   * space.
   * Must be called holding mmap_sem for write.
   */
  int __kvm_set_memory_region(struct kvm *kvm,
 -                          struct kvm_userspace_memory_region *mem,
 -                          bool user_alloc)
 +                          struct kvm_userspace_memory_region *mem)
  {
        int r;
        gfn_t base_gfn;
        if (mem->guest_phys_addr & (PAGE_SIZE - 1))
                goto out;
        /* We can read the guest memory with __xxx_user() later on. */
 -      if (user_alloc &&
 +      if ((mem->slot < KVM_USER_MEM_SLOTS) &&
            ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
             !access_ok(VERIFY_WRITE,
                        (void __user *)(unsigned long)mem->userspace_addr,
                slots = old_memslots;
        }
  
 -      r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
 +      r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
        if (r)
                goto out_slots;
  
  
        old_memslots = install_new_memslots(kvm, slots, &new);
  
 -      kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
 +      kvm_arch_commit_memory_region(kvm, mem, &old, change);
  
        kvm_free_physmem_slot(&old, &new);
        kfree(old_memslots);
@@@ -929,23 -932,26 +929,23 @@@ out
  EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
  
  int kvm_set_memory_region(struct kvm *kvm,
 -                        struct kvm_userspace_memory_region *mem,
 -                        bool user_alloc)
 +                        struct kvm_userspace_memory_region *mem)
  {
        int r;
  
        mutex_lock(&kvm->slots_lock);
 -      r = __kvm_set_memory_region(kvm, mem, user_alloc);
 +      r = __kvm_set_memory_region(kvm, mem);
        mutex_unlock(&kvm->slots_lock);
        return r;
  }
  EXPORT_SYMBOL_GPL(kvm_set_memory_region);
  
  int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
 -                                 struct
 -                                 kvm_userspace_memory_region *mem,
 -                                 bool user_alloc)
 +                                 struct kvm_userspace_memory_region *mem)
  {
        if (mem->slot >= KVM_USER_MEM_SLOTS)
                return -EINVAL;
 -      return kvm_set_memory_region(kvm, mem, user_alloc);
 +      return kvm_set_memory_region(kvm, mem);
  }
  
  int kvm_get_dirty_log(struct kvm *kvm,
@@@ -1093,7 -1099,7 +1093,7 @@@ static int kvm_read_hva_atomic(void *da
        return __copy_from_user_inatomic(data, hva, len);
  }
  
 -int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
 +static int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
        unsigned long start, int write, struct page **page)
  {
        int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
@@@ -1713,7 -1719,6 +1713,7 @@@ void kvm_vcpu_kick(struct kvm_vcpu *vcp
                        smp_send_reschedule(cpu);
        put_cpu();
  }
 +EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
  #endif /* !CONFIG_S390 */
  
  void kvm_resched(struct kvm_vcpu *vcpu)
@@@ -1811,8 -1816,6 +1811,8 @@@ void kvm_vcpu_on_spin(struct kvm_vcpu *
                                continue;
                        } else if (pass && i > last_boosted_vcpu)
                                break;
 +                      if (!ACCESS_ONCE(vcpu->preempted))
 +                              continue;
                        if (vcpu == me)
                                continue;
                        if (waitqueue_active(&vcpu->wq))
@@@ -1978,7 -1981,7 +1978,7 @@@ static long kvm_vcpu_ioctl(struct file 
        if (vcpu->kvm->mm != current->mm)
                return -EIO;
  
- #if defined(CONFIG_S390) || defined(CONFIG_PPC)
+ #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
        /*
         * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
         * so vcpu_load() would break it.
  }
  #endif
  
 +static int kvm_device_ioctl_attr(struct kvm_device *dev,
 +                               int (*accessor)(struct kvm_device *dev,
 +                                               struct kvm_device_attr *attr),
 +                               unsigned long arg)
 +{
 +      struct kvm_device_attr attr;
 +
 +      if (!accessor)
 +              return -EPERM;
 +
 +      if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
 +              return -EFAULT;
 +
 +      return accessor(dev, &attr);
 +}
 +
 +static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
 +                           unsigned long arg)
 +{
 +      struct kvm_device *dev = filp->private_data;
 +
 +      switch (ioctl) {
 +      case KVM_SET_DEVICE_ATTR:
 +              return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
 +      case KVM_GET_DEVICE_ATTR:
 +              return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
 +      case KVM_HAS_DEVICE_ATTR:
 +              return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
 +      default:
 +              if (dev->ops->ioctl)
 +                      return dev->ops->ioctl(dev, ioctl, arg);
 +
 +              return -ENOTTY;
 +      }
 +}
 +
 +static int kvm_device_release(struct inode *inode, struct file *filp)
 +{
 +      struct kvm_device *dev = filp->private_data;
 +      struct kvm *kvm = dev->kvm;
 +
 +      kvm_put_kvm(kvm);
 +      return 0;
 +}
 +
 +static const struct file_operations kvm_device_fops = {
 +      .unlocked_ioctl = kvm_device_ioctl,
 +#ifdef CONFIG_COMPAT
 +      .compat_ioctl = kvm_device_ioctl,
 +#endif
 +      .release = kvm_device_release,
 +};
 +
 +struct kvm_device *kvm_device_from_filp(struct file *filp)
 +{
 +      if (filp->f_op != &kvm_device_fops)
 +              return NULL;
 +
 +      return filp->private_data;
 +}
 +
 +static int kvm_ioctl_create_device(struct kvm *kvm,
 +                                 struct kvm_create_device *cd)
 +{
 +      struct kvm_device_ops *ops = NULL;
 +      struct kvm_device *dev;
 +      bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
 +      int ret;
 +
 +      switch (cd->type) {
 +#ifdef CONFIG_KVM_MPIC
 +      case KVM_DEV_TYPE_FSL_MPIC_20:
 +      case KVM_DEV_TYPE_FSL_MPIC_42:
 +              ops = &kvm_mpic_ops;
 +              break;
 +#endif
 +#ifdef CONFIG_KVM_XICS
 +      case KVM_DEV_TYPE_XICS:
 +              ops = &kvm_xics_ops;
 +              break;
 +#endif
 +      default:
 +              return -ENODEV;
 +      }
 +
 +      if (test)
 +              return 0;
 +
 +      dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 +      if (!dev)
 +              return -ENOMEM;
 +
 +      dev->ops = ops;
 +      dev->kvm = kvm;
 +
 +      ret = ops->create(dev, cd->type);
 +      if (ret < 0) {
 +              kfree(dev);
 +              return ret;
 +      }
 +
 +      ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR);
 +      if (ret < 0) {
 +              ops->destroy(dev);
 +              return ret;
 +      }
 +
 +      list_add(&dev->vm_node, &kvm->devices);
 +      kvm_get_kvm(kvm);
 +      cd->fd = ret;
 +      return 0;
 +}
 +
  static long kvm_vm_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
  {
                                                sizeof kvm_userspace_mem))
                        goto out;
  
 -              r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
 +              r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
                break;
        }
        case KVM_GET_DIRTY_LOG: {
                if (copy_from_user(&irq_event, argp, sizeof irq_event))
                        goto out;
  
 -              r = kvm_vm_ioctl_irq_line(kvm, &irq_event);
 +              r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
 +                                      ioctl == KVM_IRQ_LINE_STATUS);
                if (r)
                        goto out;
  
                break;
        }
  #endif
 +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
 +      case KVM_SET_GSI_ROUTING: {
 +              struct kvm_irq_routing routing;
 +              struct kvm_irq_routing __user *urouting;
 +              struct kvm_irq_routing_entry *entries;
 +
 +              r = -EFAULT;
 +              if (copy_from_user(&routing, argp, sizeof(routing)))
 +                      goto out;
 +              r = -EINVAL;
 +              if (routing.nr >= KVM_MAX_IRQ_ROUTES)
 +                      goto out;
 +              if (routing.flags)
 +                      goto out;
 +              r = -ENOMEM;
 +              entries = vmalloc(routing.nr * sizeof(*entries));
 +              if (!entries)
 +                      goto out;
 +              r = -EFAULT;
 +              urouting = argp;
 +              if (copy_from_user(entries, urouting->entries,
 +                                 routing.nr * sizeof(*entries)))
 +                      goto out_free_irq_routing;
 +              r = kvm_set_irq_routing(kvm, entries, routing.nr,
 +                                      routing.flags);
 +      out_free_irq_routing:
 +              vfree(entries);
 +              break;
 +      }
 +#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
 +      case KVM_CREATE_DEVICE: {
 +              struct kvm_create_device cd;
 +
 +              r = -EFAULT;
 +              if (copy_from_user(&cd, argp, sizeof(cd)))
 +                      goto out;
 +
 +              r = kvm_ioctl_create_device(kvm, &cd);
 +              if (r)
 +                      goto out;
 +
 +              r = -EFAULT;
 +              if (copy_to_user(argp, &cd, sizeof(cd)))
 +                      goto out;
 +
 +              r = 0;
 +              break;
 +      }
        default:
                r = kvm_arch_vm_ioctl(filp, ioctl, arg);
                if (r == -ENOTTY)
@@@ -2605,12 -2446,9 +2605,12 @@@ static long kvm_dev_ioctl_check_extensi
        case KVM_CAP_INTERNAL_ERROR_DATA:
  #ifdef CONFIG_HAVE_KVM_MSI
        case KVM_CAP_SIGNAL_MSI:
 +#endif
 +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
 +      case KVM_CAP_IRQFD_RESAMPLE:
  #endif
                return 1;
 -#ifdef KVM_CAP_IRQ_ROUTING
 +#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
        case KVM_CAP_IRQ_ROUTING:
                return KVM_MAX_IRQ_ROUTES;
  #endif
@@@ -2780,6 -2618,14 +2780,6 @@@ static int kvm_cpu_hotplug(struct notif
        return NOTIFY_OK;
  }
  
 -
 -asmlinkage void kvm_spurious_fault(void)
 -{
 -      /* Fault while not rebooting.  We want the trace. */
 -      BUG();
 -}
 -EXPORT_SYMBOL_GPL(kvm_spurious_fault);
 -
  static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
                      void *v)
  {
@@@ -2812,7 -2658,7 +2812,7 @@@ static void kvm_io_bus_destroy(struct k
        kfree(bus);
  }
  
 -int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
 +static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
  {
        const struct kvm_io_range *r1 = p1;
        const struct kvm_io_range *r2 = p2;
        return 0;
  }
  
 -int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
 +static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
                          gpa_t addr, int len)
  {
        bus->range[bus->dev_count++] = (struct kvm_io_range) {
        return 0;
  }
  
 -int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
 +static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
                             gpa_t addr, int len)
  {
        struct kvm_io_range *range, key;
@@@ -3083,8 -2929,6 +3083,8 @@@ struct kvm_vcpu *preempt_notifier_to_vc
  static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
  {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 +      if (vcpu->preempted)
 +              vcpu->preempted = false;
  
        kvm_arch_vcpu_load(vcpu, cpu);
  }
@@@ -3094,8 -2938,6 +3094,8 @@@ static void kvm_sched_out(struct preemp
  {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
  
 +      if (current->state == TASK_RUNNING)
 +              vcpu->preempted = true;
        kvm_arch_vcpu_put(vcpu);
  }
  
@@@ -3105,9 -2947,6 +3105,9 @@@ int kvm_init(void *opaque, unsigned vcp
        int r;
        int cpu;
  
 +      r = kvm_irqfd_init();
 +      if (r)
 +              goto out_irqfd;
        r = kvm_arch_init(opaque);
        if (r)
                goto out_fail;
@@@ -3188,8 -3027,6 +3188,8 @@@ out_free_0a
  out_free_0:
        kvm_arch_exit();
  out_fail:
 +      kvm_irqfd_exit();
 +out_irqfd:
        return r;
  }
  EXPORT_SYMBOL_GPL(kvm_init);
@@@ -3206,7 -3043,6 +3206,7 @@@ void kvm_exit(void
        on_each_cpu(hardware_disable_nolock, NULL, 1);
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
 +      kvm_irqfd_exit();
        free_cpumask_var(cpus_hardware_enabled);
  }
  EXPORT_SYMBOL_GPL(kvm_exit);