]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'tip/auto-latest'
authorThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:39:48 +0000 (14:39 +0200)
committerThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:51:22 +0000 (14:51 +0200)
Conflicts:
arch/h8300/include/asm/Kbuild
include/linux/wait.h
tools/perf/config/Makefile
tools/perf/config/feature-tests.mak

19 files changed:
1  2 
MAINTAINERS
arch/arm/Kconfig
arch/arm/include/asm/Kbuild
arch/arm/kernel/setup.c
arch/arm64/kernel/setup.c
arch/powerpc/Kconfig
arch/x86/Kconfig
arch/x86/kernel/smpboot.c
drivers/acpi/processor_idle.c
drivers/idle/intel_idle.c
include/linux/mm.h
include/linux/wait.h
init/Kconfig
init/main.c
kernel/Makefile
tools/perf/config/Makefile
tools/perf/config/feature-checks/Makefile
tools/perf/config/feature-checks/test-all.c
tools/perf/config/feature-checks/test-libunwind-debug-frame.c

diff --combined MAINTAINERS
index 10a90130579c8f603ff9998e82dca556ee847744,016a5c44b0f33cc7b6e655c69ad1d4f1f463e488..316572ad49f8a8dd02ab5b2c601aaf5938542f11
@@@ -253,20 -253,6 +253,20 @@@ F:       drivers/pci/*acpi
  F:    drivers/pci/*/*acpi*
  F:    drivers/pci/*/*/*acpi*
  
 +ACPI COMPONENT ARCHITECTURE (ACPICA)
 +M:    Robert Moore <robert.moore@intel.com>
 +M:    Lv Zheng <lv.zheng@intel.com>
 +M:    Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 +L:    linux-acpi@vger.kernel.org
 +L:    devel@acpica.org
 +W:    https://acpica.org/
 +W:    https://github.com/acpica/acpica/
 +Q:    https://patchwork.kernel.org/project/linux-acpi/list/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +S:    Supported
 +F:    drivers/acpi/acpica/
 +F:    include/acpi/
 +
  ACPI FAN DRIVER
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
@@@ -1416,7 -1402,7 +1416,7 @@@ M:      Wolfram Sang <wsa@the-dreams.de
  L:    linux-i2c@vger.kernel.org
  S:    Maintained
  F:    drivers/misc/eeprom/at24.c
 -F:    include/linux/i2c/at24.h
 +F:    include/linux/platform_data/at24.h
  
  ATA OVER ETHERNET (AOE) DRIVER
  M:    "Ed L. Cashin" <ecashin@coraid.com>
@@@ -1672,9 -1658,9 +1672,9 @@@ F:      drivers/video/backlight
  F:    include/linux/backlight.h
  
  BATMAN ADVANCED
 -M:    Marek Lindner <lindner_marek@yahoo.de>
 -M:    Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
 -M:    Antonio Quartulli <ordex@autistici.org>
 +M:    Marek Lindner <mareklindner@neomailbox.ch>
 +M:    Simon Wunderlich <sw@simonwunderlich.de>
 +M:    Antonio Quartulli <antonio@meshcoding.com>
  L:    b.a.t.m.a.n@lists.open-mesh.org
  W:    http://www.open-mesh.org/
  S:    Maintained
@@@ -1805,7 -1791,6 +1805,7 @@@ F:      include/net/bluetooth
  
  BONDING DRIVER
  M:    Jay Vosburgh <fubar@us.ibm.com>
 +M:    Veaceslav Falico <vfalico@redhat.com>
  M:    Andy Gospodarek <andy@greyhouse.net>
  L:    netdev@vger.kernel.org
  W:    http://sourceforge.net/projects/bonding/
@@@ -2733,8 -2718,6 +2733,8 @@@ T:      git git://git.linaro.org/people/sumi
  DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
  M:    Vinod Koul <vinod.koul@intel.com>
  M:    Dan Williams <dan.j.williams@intel.com>
 +L:    dmaengine@vger.kernel.org
 +Q:    https://patchwork.kernel.org/project/linux-dmaengine/list/
  S:    Supported
  F:    drivers/dma/
  F:    include/linux/dma*
@@@ -2839,9 -2822,7 +2839,9 @@@ L:      dri-devel@lists.freedesktop.or
  L:    linux-tegra@vger.kernel.org
  T:    git git://anongit.freedesktop.org/tegra/linux.git
  S:    Maintained
 +F:    drivers/gpu/drm/tegra/
  F:    drivers/gpu/host1x/
 +F:    include/linux/host1x.h
  F:    include/uapi/drm/tegra_drm.h
  F:    Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
  
@@@ -4376,10 -4357,7 +4376,10 @@@ F:    arch/x86/kernel/microcode_intel.
  
  INTEL I/OAT DMA DRIVER
  M:    Dan Williams <dan.j.williams@intel.com>
 -S:    Maintained
 +M:    Dave Jiang <dave.jiang@intel.com>
 +L:    dmaengine@vger.kernel.org
 +Q:    https://patchwork.kernel.org/project/linux-dmaengine/list/
 +S:    Supported
  F:    drivers/dma/ioat*
  
  INTEL IOMMU (VT-d)
@@@ -6400,12 -6378,6 +6400,12 @@@ S:    Supporte
  F:    Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
  F:    drivers/pci/host/pci-tegra.c
  
 +PCI DRIVER FOR SAMSUNG EXYNOS
 +M:    Jingoo Han <jg1.han@samsung.com>
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    drivers/pci/host/pci-exynos.c
 +
  PCMCIA SUBSYSTEM
  P:    Linux PCMCIA Team
  L:    linux-pcmcia@lists.infradead.org
@@@ -6876,14 -6848,6 +6876,14 @@@ L:    linux-hexagon@vger.kernel.or
  S:    Supported
  F:    arch/hexagon/
  
 +QUALCOMM WCN36XX WIRELESS DRIVER
 +M:    Eugene Krasnikov <k.eugene.e@gmail.com>
 +L:    wcn36xx@lists.infradead.org
 +W:    http://wireless.kernel.org/en/users/Drivers/wcn36xx
 +T:    git git://github.com/KrasnikovEugene/wcn36xx.git
 +S:    Supported
 +F:    drivers/net/wireless/ath/wcn36xx/
 +
  QUICKCAM PARALLEL PORT WEBCAMS
  M:    Hans Verkuil <hverkuil@xs4all.nl>
  L:    linux-media@vger.kernel.org
@@@ -6971,7 -6935,7 +6971,7 @@@ M:      "Paul E. McKenney" <paulmck@linux.vn
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
  F:    Documentation/RCU/torture.txt
- F:    kernel/rcutorture.c
+ F:    kernel/rcu/torture.c
  
  RDC R-321X SoC
  M:    Florian Fainelli <florian@openwrt.org>
@@@ -6998,8 -6962,9 +6998,9 @@@ T:      git git://git.kernel.org/pub/scm/lin
  F:    Documentation/RCU/
  X:    Documentation/RCU/torture.txt
  F:    include/linux/rcu*
- F:    kernel/rcu*
- X:    kernel/rcutorture.c
+ X:    include/linux/srcu.h
+ F:    kernel/rcu/
+ X:    kernel/rcu/torture.c
  
  REAL TIME CLOCK (RTC) SUBSYSTEM
  M:    Alessandro Zummo <a.zummo@towertech.it>
@@@ -7324,6 -7289,8 +7325,8 @@@ S:      Maintaine
  F:    kernel/sched/
  F:    include/linux/sched.h
  F:    include/uapi/linux/sched.h
+ F:    kernel/wait.c
+ F:    include/linux/wait.h
  
  SCORE ARCHITECTURE
  M:    Chen Liqin <liqin.linux@gmail.com>
@@@ -7458,10 -7425,9 +7461,10 @@@ SELINUX SECURITY MODUL
  M:    Stephen Smalley <sds@tycho.nsa.gov>
  M:    James Morris <james.l.morris@oracle.com>
  M:    Eric Paris <eparis@parisplace.org>
 +M:    Paul Moore <paul@paul-moore.com>
  L:    selinux@tycho.nsa.gov (subscribers-only, general discussion)
  W:    http://selinuxproject.org
 -T:    git git://git.infradead.org/users/eparis/selinux.git
 +T:    git git://git.infradead.org/users/pcmoore/selinux
  S:    Supported
  F:    include/linux/selinux*
  F:    security/selinux/
@@@ -7687,8 -7653,8 +7690,8 @@@ M:      "Paul E. McKenney" <paulmck@linux.vn
  W:    http://www.rdrop.com/users/paulmck/RCU/
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
- F:    include/linux/srcu*
- F:    kernel/srcu*
+ F:    include/linux/srcu.h
+ F:    kernel/rcu/srcu.c
  
  SMACK SECURITY MODULE
  M:    Casey Schaufler <casey@schaufler-ca.com>
@@@ -7859,13 -7825,6 +7862,13 @@@ F:    Documentation/sound/alsa/soc
  F:    sound/soc/
  F:    include/sound/soc*
  
 +SOUND - DMAENGINE HELPERS
 +M:    Lars-Peter Clausen <lars@metafoo.de>
 +S:    Supported
 +F:    include/sound/dmaengine_pcm.h
 +F:    sound/core/pcm_dmaengine.c
 +F:    sound/soc/soc-generic-dmaengine-pcm.c
 +
  SPARC + UltraSPARC (sparc/sparc64)
  M:    "David S. Miller" <davem@davemloft.net>
  L:    sparclinux@vger.kernel.org
@@@ -8550,7 -8509,6 +8553,7 @@@ F:      drivers/media/usb/tm6000
  TPM DEVICE DRIVER
  M:    Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
  M:    Ashley Lai <ashley@ashleylai.com>
 +M:    Peter Huewe <peterhuewe@gmx.de>
  M:    Rajiv Andrade <mail@srajiv.net>
  W:    http://tpmdd.sourceforge.net
  M:    Marcel Selhorst <tpmdd@selhorst.net>
@@@ -8647,6 -8605,14 +8650,6 @@@ S:     Maintaine
  F:    arch/m68k/*/*_no.*
  F:    arch/m68k/include/asm/*_no.*
  
 -UCLINUX FOR RENESAS H8/300 (H8300)
 -M:    Yoshinori Sato <ysato@users.sourceforge.jp>
 -W:    http://uclinux-h8.sourceforge.jp/
 -S:    Supported
 -F:    arch/h8300/
 -F:    drivers/ide/ide-h8300.c
 -F:    drivers/net/ethernet/8390/ne-h8300.c
 -
  UDF FILESYSTEM
  M:    Jan Kara <jack@suse.cz>
  S:    Maintained
diff --combined arch/arm/Kconfig
index b6a708ef6067124e3b8b7e3d0cc8aebccb72787d,323baf07fdcedfe2866819ca95dc225b05743ac3..5e27ab643757e84871ef6616b5ae8acb100d72d7
@@@ -5,8 -5,6 +5,8 @@@ config AR
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_CUSTOM_GPIO_H
 +      select ARCH_USE_CMPXCHG_LOCKREF
 +      select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT if MMU
        select CLONE_BACKWARDS
        select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
        select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
        select HAVE_PERF_EVENTS
 +      select HAVE_PERF_REGS
 +      select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16
+       select HAVE_VIRT_CPU_ACCOUNTING_GEN
        select IRQ_FORCED_THREADING
        select KTIME_SCALAR
        select MODULES_USE_ELF_REL
@@@ -696,6 -693,7 +697,6 @@@ config ARCH_SA110
        select GENERIC_CLOCKEVENTS
        select HAVE_IDE
        select ISA
 -      select NEED_MACH_GPIO_H
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
        help
@@@ -1552,32 -1550,6 +1553,32 @@@ config MCP
          for (multi-)cluster based systems, such as big.LITTLE based
          systems.
  
 +config BIG_LITTLE
 +      bool "big.LITTLE support (Experimental)"
 +      depends on CPU_V7 && SMP
 +      select MCPM
 +      help
 +        This option enables support selections for the big.LITTLE
 +        system architecture.
 +
 +config BL_SWITCHER
 +      bool "big.LITTLE switcher support"
 +      depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
 +      select CPU_PM
 +      select ARM_CPU_SUSPEND
 +      help
 +        The big.LITTLE "switcher" provides the core functionality to
 +        transparently handle transition between a cluster of A15's
 +        and a cluster of A7's in a big.LITTLE system.
 +
 +config BL_SWITCHER_DUMMY_IF
 +      tristate "Simple big.LITTLE switcher user interface"
 +      depends on BL_SWITCHER && DEBUG_KERNEL
 +      help
 +        This is a simple and dummy char dev interface to control
 +        the big.LITTLE switcher core code.  It is meant for
 +        debugging purposes only.
 +
  choice
        prompt "Memory split"
        default VMSPLIT_3G
index a6395c0277152f74645b8fceb572abc9b1c5a2db,1a7024b413511d6742f6fb928407603c648c77e3..c38b58c8020215f54af8140e97de8467ad3c3c16
@@@ -24,7 -24,6 +24,7 @@@ generic-y += sembuf.
  generic-y += serial.h
  generic-y += shmbuf.h
  generic-y += siginfo.h
 +generic-y += simd.h
  generic-y += sizes.h
  generic-y += socket.h
  generic-y += sockios.h
@@@ -33,3 -32,4 +33,4 @@@ generic-y += termios.
  generic-y += timex.h
  generic-y += trace_clock.h
  generic-y += unaligned.h
+ generic-y += preempt.h
diff --combined arch/arm/kernel/setup.c
index 6b4ce802ac4ea3e2d88bbe04adce6713651fb066,5d65438685d8516f6455393940e4bdaed107699a..ffb5809efc626b6c6eb61a0d30fc03104ec8ec39
@@@ -73,8 -73,6 +73,8 @@@ __setup("fpe=", fpe_setup)
  #endif
  
  extern void paging_init(const struct machine_desc *desc);
 +extern void early_paging_init(const struct machine_desc *,
 +                            struct proc_info_list *);
  extern void sanity_check_meminfo(void);
  extern enum reboot_mode reboot_mode;
  extern void setup_dma_zone(const struct machine_desc *desc);
@@@ -601,8 -599,6 +601,8 @@@ static void __init setup_processor(void
        elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
  #endif
  
 +      erratum_a15_798181_init();
 +
        feat_v6_fixup();
  
        cacheid_init();
@@@ -882,8 -878,6 +882,8 @@@ void __init setup_arch(char **cmdline_p
        parse_early_param();
  
        sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
 +
 +      early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
        sanity_check_meminfo();
        arm_memblock_init(&meminfo, mdesc);
  
@@@ -981,6 -975,7 +981,7 @@@ static const char *hwcap_str[] = 
        "idivt",
        "vfpd32",
        "lpae",
+       "evtstrm",
        NULL
  };
  
index a4ed2d3e4de9e158ad66d44c370ed519355d3902,d355b7b9710bcd067b136a36e7523a3fb3fd85cf..c7ad57b4391893f386c1a6b6fd871498125c90a2
@@@ -60,6 -60,16 +60,16 @@@ EXPORT_SYMBOL(processor_id)
  unsigned long elf_hwcap __read_mostly;
  EXPORT_SYMBOL_GPL(elf_hwcap);
  
+ #ifdef CONFIG_COMPAT
+ #define COMPAT_ELF_HWCAP_DEFAULT      \
+                               (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+                                COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+                                COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+                                COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+                                COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+ unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+ #endif
  static const char *cpu_name;
  static const char *machine_name;
  phys_addr_t __fdt_pointer __initdata;
@@@ -124,18 -134,70 +134,18 @@@ static void __init setup_processor(void
  
  static void __init setup_machine_fdt(phys_addr_t dt_phys)
  {
 -      struct boot_param_header *devtree;
 -      unsigned long dt_root;
 -
 -      /* Check we have a non-NULL DT pointer */
 -      if (!dt_phys) {
 -              early_print("\n"
 -                      "Error: NULL or invalid device tree blob\n"
 -                      "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
 -                      "\nPlease check your bootloader.\n");
 -
 -              while (true)
 -                      cpu_relax();
 -
 -      }
 -
 -      devtree = phys_to_virt(dt_phys);
 -
 -      /* Check device tree validity */
 -      if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
 +      if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
                early_print("\n"
                        "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
 -                      "Expected 0x%x, found 0x%x\n"
 +                      "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
                        "\nPlease check your bootloader.\n",
 -                      dt_phys, devtree, OF_DT_HEADER,
 -                      be32_to_cpu(devtree->magic));
 +                      dt_phys, phys_to_virt(dt_phys));
  
                while (true)
                        cpu_relax();
        }
  
 -      initial_boot_params = devtree;
 -      dt_root = of_get_flat_dt_root();
 -
 -      machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
 -      if (!machine_name)
 -              machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
 -      if (!machine_name)
 -              machine_name = "<unknown>";
 -      pr_info("Machine: %s\n", machine_name);
 -
 -      /* Retrieve various information from the /chosen node */
 -      of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
 -      /* Initialize {size,address}-cells info */
 -      of_scan_flat_dt(early_init_dt_scan_root, NULL);
 -      /* Setup memory, calling early_init_dt_add_memory_arch */
 -      of_scan_flat_dt(early_init_dt_scan_memory, NULL);
 -}
 -
 -void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 -{
 -      base &= PAGE_MASK;
 -      size &= PAGE_MASK;
 -      if (base + size < PHYS_OFFSET) {
 -              pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
 -                         base, base + size);
 -              return;
 -      }
 -      if (base < PHYS_OFFSET) {
 -              pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
 -                         base, PHYS_OFFSET);
 -              size -= PHYS_OFFSET - base;
 -              base = PHYS_OFFSET;
 -      }
 -      memblock_add(base, size);
 +      machine_name = of_flat_dt_get_machine_name();
  }
  
  /*
@@@ -252,6 -314,7 +262,7 @@@ subsys_initcall(topology_init)
  static const char *hwcap_str[] = {
        "fp",
        "asimd",
+       "evtstrm",
        NULL
  };
  
diff --combined arch/powerpc/Kconfig
index e2e03a6d060f4543e76c8bf4c34b6a5886822fe3,b365d5cbb722ce9ef712a6066695a409f9e0c627..58736860cffee48fbc4908a48fcb2e6a1c444c39
@@@ -85,7 -85,6 +85,7 @@@ config GENERIC_HWEIGH
  config PPC
        bool
        default y
 +      select ARCH_MIGHT_HAVE_PC_PARPORT
        select BINFMT_ELF
        select OF
        select OF_EARLY_FLATTREE
@@@ -98,7 -97,7 +98,7 @@@
        select VIRT_TO_BUS if !PPC64
        select HAVE_IDE
        select HAVE_IOREMAP_PROT
 -      select HAVE_EFFICIENT_UNALIGNED_ACCESS
 +      select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_LITTLE_ENDIAN
        select HAVE_KPROBES
        select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
        select OLD_SIGSUSPEND
        select OLD_SIGACTION if PPC32
        select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK
  
 +config GENERIC_CSUM
 +      def_bool CPU_LITTLE_ENDIAN
 +
  config EARLY_PRINTK
        bool
        default y
@@@ -1013,9 -1010,6 +1014,9 @@@ config PHYSICAL_STAR
        default "0x00000000"
  endif
  
 +config        ARCH_RANDOM
 +      def_bool n
 +
  source "net/Kconfig"
  
  source "drivers/Kconfig"
diff --combined arch/x86/Kconfig
index e0836de76f3c90dcb57ec0e7bbfb293cb7d0ca99,6af783f547251ec7eb3973b87a3539dd31a7b467..591b19ec8af8216954888958c315195e3fe14ce4
@@@ -22,7 -22,6 +22,7 @@@ config X86_6
  config X86
        def_bool y
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
 +      select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_AOUT if X86_32
        select HAVE_UNSTABLE_SCHED_CLOCK
        select ARCH_SUPPORTS_NUMA_BALANCING
        select COMPAT_OLD_SIGACTION if IA32_EMULATION
        select RTC_LIB
        select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
  
  config INSTRUCTION_DECODER
        def_bool y
@@@ -255,6 -255,10 +256,6 @@@ config ARCH_HWEIGHT_CFLAG
        default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
        default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
  
 -config ARCH_CPU_PROBE_RELEASE
 -      def_bool y
 -      depends on HOTPLUG_CPU
 -
  config ARCH_SUPPORTS_UPROBES
        def_bool y
  
@@@ -753,20 -757,25 +754,25 @@@ config DM
          BIOS code.
  
  config GART_IOMMU
-       bool "GART IOMMU support" if EXPERT
-       default y
+       bool "Old AMD GART IOMMU support"
        select SWIOTLB
        depends on X86_64 && PCI && AMD_NB
        ---help---
-         Support for full DMA access of devices with 32bit memory access only
-         on systems with more than 3GB. This is usually needed for USB,
-         sound, many IDE/SATA chipsets and some other devices.
-         Provides a driver for the AMD Athlon64/Opteron/Turion/Sempron GART
-         based hardware IOMMU and a software bounce buffer based IOMMU used
-         on Intel systems and as fallback.
-         The code is only active when needed (enough memory and limited
-         device) unless CONFIG_IOMMU_DEBUG or iommu=force is specified
-         too.
+         Provides a driver for older AMD Athlon64/Opteron/Turion/Sempron
+         GART based hardware IOMMUs.
+         The GART supports full DMA access for devices with 32-bit access
+         limitations, on systems with more than 3 GB. This is usually needed
+         for USB, sound, many IDE/SATA chipsets and some other devices.
+         Newer systems typically have a modern AMD IOMMU, supported via
+         the CONFIG_AMD_IOMMU=y config option.
+         In normal configurations this driver is only active when needed:
+         there's more than 3 GB of memory and the system contains a
+         32-bit limited device.
+         If unsure, say Y.
  
  config CALGARY_IOMMU
        bool "IBM Calgary IOMMU support"
@@@ -1591,7 -1600,7 +1597,7 @@@ config EFI_STU
            This kernel feature allows a bzImage to be loaded directly
          by EFI firmware without the use of a bootloader.
  
-         See Documentation/x86/efi-stub.txt for more information.
+         See Documentation/efi-stub.txt for more information.
  
  config SECCOMP
        def_bool y
@@@ -1720,16 -1729,56 +1726,56 @@@ config RELOCATABL
  
          Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
          it has been loaded at and the compile time physical address
-         (CONFIG_PHYSICAL_START) is ignored.
+         (CONFIG_PHYSICAL_START) is used as the minimum location.
  
- # Relocation on x86-32 needs some additional build support
+ config RANDOMIZE_BASE
+       bool "Randomize the address of the kernel image"
+       depends on RELOCATABLE
+       depends on !HIBERNATION
+       default n
+       ---help---
+          Randomizes the physical and virtual address at which the
+          kernel image is decompressed, as a security feature that
+          deters exploit attempts relying on knowledge of the location
+          of kernel internals.
+          Entropy is generated using the RDRAND instruction if it
+          is supported.  If not, then RDTSC is used, if supported. If
+          neither RDRAND nor RDTSC are supported, then no randomness
+          is introduced.
+          The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET,
+          and aligned according to PHYSICAL_ALIGN.
+ config RANDOMIZE_BASE_MAX_OFFSET
+       hex "Maximum ASLR offset allowed"
+       depends on RANDOMIZE_BASE
+       range 0x0 0x20000000 if X86_32
+       default "0x20000000" if X86_32
+       range 0x0 0x40000000 if X86_64
+       default "0x40000000" if X86_64
+       ---help---
+        Determines the maximal offset in bytes that will be applied to the
+        kernel when Address Space Layout Randomization (ASLR) is active.
+        Must be less than or equal to the actual physical memory on the
+        system. This must be a multiple of CONFIG_PHYSICAL_ALIGN.
+        On 32-bit this is limited to 512MiB.
+        On 64-bit this is limited by how the kernel fixmap page table is
+        positioned, so this cannot be larger that 1GiB currently. Normally
+        there is a 512MiB to 1.5GiB split between kernel and modules. When
+        this is raised above the 512MiB default, the modules area will
+        shrink to compensate, up to the current maximum 1GiB to 1GiB split.
+ # Relocation on x86 needs some additional build support
  config X86_NEED_RELOCS
        def_bool y
-       depends on X86_32 && RELOCATABLE
+       depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE)
  
  config PHYSICAL_ALIGN
        hex "Alignment value to which kernel should be aligned"
-       default "0x1000000"
+       default "0x200000"
        range 0x2000 0x1000000 if X86_32
        range 0x200000 0x1000000 if X86_64
        ---help---
index e73b3f53310c7663b2c37a3cffd57c0cad974a3c,2a165580fa1620e5192472d67ac8a54a7f36da89..85dc05a3aa02b3251b64ae0406b95716c714cb85
  #include <asm/setup.h>
  #include <asm/uv/uv.h>
  #include <linux/mc146818rtc.h>
  #include <asm/smpboot_hooks.h>
  #include <asm/i8259.h>
  #include <asm/realmode.h>
+ #include <asm/misc.h>
  
  /* State of each CPU */
  DEFINE_PER_CPU(int, cpu_state) = { 0 };
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -/*
 - * We need this for trampoline_base protection from concurrent accesses when
 - * off- and onlining cores wildly.
 - */
 -static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
 -
 -void cpu_hotplug_driver_lock(void)
 -{
 -      mutex_lock(&x86_cpu_hotplug_driver_mutex);
 -}
 -
 -void cpu_hotplug_driver_unlock(void)
 -{
 -      mutex_unlock(&x86_cpu_hotplug_driver_mutex);
 -}
 -
 -ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
 -ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
 -#endif
 -
  /* Number of siblings per CPU package */
  int smp_num_siblings = 1;
  EXPORT_SYMBOL(smp_num_siblings);
@@@ -627,22 -647,46 +626,46 @@@ wakeup_secondary_cpu_via_init(int phys_
        return (send_status | accept_status);
  }
  
+ void smp_announce(void)
+ {
+       int num_nodes = num_online_nodes();
+       printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
+              num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
+ }
  /* reduce the number of lines printed when booting a large cpu count system */
  static void announce_cpu(int cpu, int apicid)
  {
        static int current_node = -1;
        int node = early_cpu_to_node(cpu);
-       int max_cpu_present = find_last_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
+       static int width, node_width;
+       if (!width)
+               width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
+       if (!node_width)
+               node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
+       if (cpu == 1)
+               printk(KERN_INFO "x86: Booting SMP configuration:\n");
  
        if (system_state == SYSTEM_BOOTING) {
                if (node != current_node) {
                        if (current_node > (-1))
-                               pr_cont(" OK\n");
+                               pr_cont("\n");
                        current_node = node;
-                       pr_info("Booting Node %3d, Processors ", node);
+                       printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
+                              node_width - num_digits(node), " ", node);
                }
-               pr_cont(" #%4d%s", cpu, cpu == max_cpu_present ? " OK\n" : "");
-               return;
+               /* Add padding for the BSP */
+               if (cpu == 1)
+                       pr_cont("%*s", width + 1, " ");
+               pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
        } else
                pr_info("Booting Node %d Processor %d APIC 0x%x\n",
                        node, cpu, apicid);
index 35c8f2bbcc40b45510ea7d1b67ee294efc33471b,c7414a545a4f402506468c0e1d73aa160e55e836..644516d9bde6cf18a824d29c3ae1730de30fd1a6
@@@ -119,17 -119,10 +119,10 @@@ static struct dmi_system_id processor_p
   */
  static void acpi_safe_halt(void)
  {
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we
-        * test NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched()) {
+       if (!tif_need_resched()) {
                safe_halt();
                local_irq_disable();
        }
-       current_thread_info()->status |= TS_POLLING;
  }
  
  #ifdef ARCH_APICTIMER_STOPS_ON_C3
@@@ -272,6 -265,9 +265,6 @@@ static void tsc_check_state(int state) 
  static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  {
  
 -      if (!pr)
 -              return -EINVAL;
 -
        if (!pr->pblk)
                return -ENODEV;
  
@@@ -734,6 -730,11 +727,11 @@@ static int acpi_idle_enter_c1(struct cp
        if (unlikely(!pr))
                return -EINVAL;
  
+       if (cx->entry_method == ACPI_CSTATE_FFH) {
+               if (current_set_polling_and_test())
+                       return -EINVAL;
+       }
        lapic_timer_state_broadcast(pr, cx, 1);
        acpi_idle_do_entry(cx);
  
@@@ -787,18 -788,9 +785,9 @@@ static int acpi_idle_enter_simple(struc
        if (unlikely(!pr))
                return -EINVAL;
  
-       if (cx->entry_method != ACPI_CSTATE_FFH) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we test
-                * NEED_RESCHED:
-                */
-               smp_mb();
-               if (unlikely(need_resched())) {
-                       current_thread_info()->status |= TS_POLLING;
+       if (cx->entry_method == ACPI_CSTATE_FFH) {
+               if (current_set_polling_and_test())
                        return -EINVAL;
-               }
        }
  
        /*
  
        sched_clock_idle_wakeup_event(0);
  
-       if (cx->entry_method != ACPI_CSTATE_FFH)
-               current_thread_info()->status |= TS_POLLING;
        lapic_timer_state_broadcast(pr, cx, 0);
        return index;
  }
@@@ -855,18 -844,9 +841,9 @@@ static int acpi_idle_enter_bm(struct cp
                }
        }
  
-       if (cx->entry_method != ACPI_CSTATE_FFH) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we test
-                * NEED_RESCHED:
-                */
-               smp_mb();
-               if (unlikely(need_resched())) {
-                       current_thread_info()->status |= TS_POLLING;
+       if (cx->entry_method == ACPI_CSTATE_FFH) {
+               if (current_set_polling_and_test())
                        return -EINVAL;
-               }
        }
  
        acpi_unlazy_tlb(smp_processor_id());
  
        sched_clock_idle_wakeup_event(0);
  
-       if (cx->entry_method != ACPI_CSTATE_FFH)
-               current_thread_info()->status |= TS_POLLING;
        lapic_timer_state_broadcast(pr, cx, 0);
        return index;
  }
@@@ -1073,8 -1050,12 +1047,8 @@@ int acpi_processor_hotplug(struct acpi_
        if (disabled_by_idle_boot_param())
                return 0;
  
 -      if (!pr)
 -              return -EINVAL;
 -
 -      if (nocst) {
 +      if (nocst)
                return -ENODEV;
 -      }
  
        if (!pr->flags.power_setup_done)
                return -ENODEV;
@@@ -1101,6 -1082,9 +1075,6 @@@ int acpi_processor_cst_has_changed(stru
        if (disabled_by_idle_boot_param())
                return 0;
  
 -      if (!pr)
 -              return -EINVAL;
 -
        if (nocst)
                return -ENODEV;
  
@@@ -1173,6 -1157,9 +1147,6 @@@ int acpi_processor_power_init(struct ac
                first_run++;
        }
  
 -      if (!pr)
 -              return -EINVAL;
 -
        if (acpi_gbl_FADT.cst_control && !nocst) {
                status =
                    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
index 3a449f65eb2d3beb889217893a0e9d677214c1d9,f116d664b4737edcabb621a9fdb7076cbcee1334..45935d9949d2f840f22a720df675a521aca78f61
@@@ -1,7 -1,7 +1,7 @@@
  /*
   * intel_idle.c - native hardware idle loop for modern Intel processors
   *
 - * Copyright (c) 2010, Intel Corporation.
 + * Copyright (c) 2013, Intel Corporation.
   * Len Brown <len.brown@intel.com>
   *
   * This program is free software; you can redistribute it and/or modify it
@@@ -123,7 -123,7 +123,7 @@@ static struct cpuidle_state *cpuidle_st
   * which is also the index into the MWAIT hint array.
   * Thus C0 is a dummy.
   */
 -static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
 +static struct cpuidle_state nehalem_cstates[] __initdata = {
        {
                .name = "C1-NHM",
                .desc = "MWAIT 0x00",
                .enter = NULL }
  };
  
 -static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
 +static struct cpuidle_state snb_cstates[] __initdata = {
        {
                .name = "C1-SNB",
                .desc = "MWAIT 0x00",
                .enter = NULL }
  };
  
 -static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
 +static struct cpuidle_state ivb_cstates[] __initdata = {
        {
                .name = "C1-IVB",
                .desc = "MWAIT 0x00",
                .enter = NULL }
  };
  
 -static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
 +static struct cpuidle_state hsw_cstates[] __initdata = {
        {
                .name = "C1-HSW",
                .desc = "MWAIT 0x00",
                .enter = NULL }
  };
  
 -static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
 +static struct cpuidle_state atom_cstates[] __initdata = {
        {
                .name = "C1E-ATM",
                .desc = "MWAIT 0x00",
        {
                .enter = NULL }
  };
 +static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = {
 +      {
 +              .name = "C1-AVN",
 +              .desc = "MWAIT 0x00",
 +              .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
 +              .exit_latency = 1,
 +              .target_residency = 1,
 +              .enter = &intel_idle },
 +      {
 +              .name = "C1E-AVN",
 +              .desc = "MWAIT 0x01",
 +              .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
 +              .exit_latency = 5,
 +              .target_residency = 10,
 +              .enter = &intel_idle },
 +      {
 +              .name = "C6NS-AVN",     /* No Cache Shrink */
 +              .desc = "MWAIT 0x51",
 +              .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 +              .exit_latency = 15,
 +              .target_residency = 45,
 +              .enter = &intel_idle },
 +      {
 +              .name = "C6FS-AVN",     /* Full Cache shrink */
 +              .desc = "MWAIT 0x52",
 +              .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 +              .exit_latency = 150,            /* fake penalty added due to cold cache */
 +              .target_residency = 100000,     /* fake penalty added due to cold cache */
 +              .enter = &intel_idle },
 +};
  
  /**
   * intel_idle
@@@ -389,7 -359,7 +389,7 @@@ static int intel_idle(struct cpuidle_de
        if (!(lapic_timer_reliable_states & (1 << (cstate))))
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
  
-       if (!need_resched()) {
+       if (!current_set_polling_and_test()) {
  
                __monitor((void *)&current_thread_info()->flags, 0, 0);
                smp_mb();
@@@ -492,11 -462,6 +492,11 @@@ static const struct idle_cpu idle_cpu_h
        .disable_promotion_to_c1e = true,
  };
  
 +static const struct idle_cpu idle_cpu_avn = {
 +      .state_table = avn_cstates,
 +      .disable_promotion_to_c1e = true,
 +};
 +
  #define ICPU(model, cpu) \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
  
@@@ -518,7 -483,6 +518,7 @@@ static const struct x86_cpu_id intel_id
        ICPU(0x3f, idle_cpu_hsw),
        ICPU(0x45, idle_cpu_hsw),
        ICPU(0x46, idle_cpu_hsw),
 +      ICPU(0x4D, idle_cpu_avn),
        {}
  };
  MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
  /*
   * intel_idle_probe()
   */
 -static int intel_idle_probe(void)
 +static int __init intel_idle_probe(void)
  {
        unsigned int eax, ebx, ecx;
        const struct x86_cpu_id *id;
@@@ -594,7 -558,7 +594,7 @@@ static void intel_idle_cpuidle_devices_
   * intel_idle_cpuidle_driver_init()
   * allocate, initialize cpuidle_states
   */
 -static int intel_idle_cpuidle_driver_init(void)
 +static int __init intel_idle_cpuidle_driver_init(void)
  {
        int cstate;
        struct cpuidle_driver *drv = &intel_idle_driver;
@@@ -664,7 -628,7 +664,7 @@@ static int intel_idle_cpu_init(int cpu
                int num_substates, mwait_hint, mwait_cstate, mwait_substate;
  
                if (cpuidle_state_table[cstate].enter == NULL)
 -                      continue;
 +                      break;
  
                if (cstate + 1 > max_cstate) {
                        printk(PREFIX "max_cstate %d reached\n", max_cstate);
diff --combined include/linux/mm.h
index 1a0668e5a4eef0377b708b9aded0e11f8da67a80,81443d557a2e179a7d3768c63abc2519d9d1ed19..8aa4006b9636e11dbcb4e32e988f43c30c865396
@@@ -297,26 -297,12 +297,26 @@@ static inline int put_page_testzero(str
  /*
   * Try to grab a ref unless the page has a refcount of zero, return false if
   * that is the case.
 + * This can be called when MMU is off so it must not access
 + * any of the virtual mappings.
   */
  static inline int get_page_unless_zero(struct page *page)
  {
        return atomic_inc_not_zero(&page->_count);
  }
  
 +/*
 + * Try to drop a ref unless the page has a refcount of one, return false if
 + * that is the case.
 + * This is to make sure that the refcount won't become zero after this drop.
 + * This can be called when MMU is off so it must not access
 + * any of the virtual mappings.
 + */
 +static inline int put_page_unless_one(struct page *page)
 +{
 +      return atomic_add_unless(&page->_count, -1, 1);
 +}
 +
  extern int page_is_ram(unsigned long pfn);
  
  /* Support for virtually mapped pages */
@@@ -595,11 -581,11 +595,11 @@@ static inline pte_t maybe_mkwrite(pte_
   * sets it, so none of the operations on it need to be atomic.
   */
  
- /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NID] | ... | FLAGS | */
+ /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
  #define SECTIONS_PGOFF                ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
  #define NODES_PGOFF           (SECTIONS_PGOFF - NODES_WIDTH)
  #define ZONES_PGOFF           (NODES_PGOFF - ZONES_WIDTH)
- #define LAST_NID_PGOFF                (ZONES_PGOFF - LAST_NID_WIDTH)
+ #define LAST_CPUPID_PGOFF     (ZONES_PGOFF - LAST_CPUPID_WIDTH)
  
  /*
   * Define the bit shifts to access each section.  For non-existent
  #define SECTIONS_PGSHIFT      (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
  #define NODES_PGSHIFT         (NODES_PGOFF * (NODES_WIDTH != 0))
  #define ZONES_PGSHIFT         (ZONES_PGOFF * (ZONES_WIDTH != 0))
- #define LAST_NID_PGSHIFT      (LAST_NID_PGOFF * (LAST_NID_WIDTH != 0))
+ #define LAST_CPUPID_PGSHIFT   (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
  
  /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
  #ifdef NODE_NOT_IN_PAGE_FLAGS
  #define ZONES_MASK            ((1UL << ZONES_WIDTH) - 1)
  #define NODES_MASK            ((1UL << NODES_WIDTH) - 1)
  #define SECTIONS_MASK         ((1UL << SECTIONS_WIDTH) - 1)
- #define LAST_NID_MASK         ((1UL << LAST_NID_WIDTH) - 1)
+ #define LAST_CPUPID_MASK      ((1UL << LAST_CPUPID_WIDTH) - 1)
  #define ZONEID_MASK           ((1UL << ZONEID_SHIFT) - 1)
  
  static inline enum zone_type page_zonenum(const struct page *page)
@@@ -675,51 -661,117 +675,117 @@@ static inline int page_to_nid(const str
  #endif
  
  #ifdef CONFIG_NUMA_BALANCING
- #ifdef LAST_NID_NOT_IN_PAGE_FLAGS
- static inline int page_nid_xchg_last(struct page *page, int nid)
+ static inline int cpu_pid_to_cpupid(int cpu, int pid)
  {
-       return xchg(&page->_last_nid, nid);
+       return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
  }
  
- static inline int page_nid_last(struct page *page)
+ static inline int cpupid_to_pid(int cpupid)
  {
-       return page->_last_nid;
+       return cpupid & LAST__PID_MASK;
  }
- static inline void page_nid_reset_last(struct page *page)
+ static inline int cpupid_to_cpu(int cpupid)
  {
-       page->_last_nid = -1;
+       return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
  }
- #else
- static inline int page_nid_last(struct page *page)
+ static inline int cpupid_to_nid(int cpupid)
  {
-       return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
+       return cpu_to_node(cpupid_to_cpu(cpupid));
  }
  
- extern int page_nid_xchg_last(struct page *page, int nid);
+ static inline bool cpupid_pid_unset(int cpupid)
+ {
+       return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
+ }
  
- static inline void page_nid_reset_last(struct page *page)
+ static inline bool cpupid_cpu_unset(int cpupid)
  {
-       int nid = (1 << LAST_NID_SHIFT) - 1;
+       return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
+ }
  
-       page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
-       page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
+ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
+ {
+       return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
+ }
+ #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
+ #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+ {
+       return xchg(&page->_last_cpupid, cpupid);
+ }
+ static inline int page_cpupid_last(struct page *page)
+ {
+       return page->_last_cpupid;
+ }
+ static inline void page_cpupid_reset_last(struct page *page)
+ {
+       page->_last_cpupid = -1;
  }
- #endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
  #else
- static inline int page_nid_xchg_last(struct page *page, int nid)
+ static inline int page_cpupid_last(struct page *page)
  {
-       return page_to_nid(page);
+       return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
  }
  
- static inline int page_nid_last(struct page *page)
+ extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+ static inline void page_cpupid_reset_last(struct page *page)
  {
-       return page_to_nid(page);
+       int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
+       page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+       page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
+ }
+ #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+ #else /* !CONFIG_NUMA_BALANCING */
+ static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+ {
+       return page_to_nid(page); /* XXX */
  }
  
- static inline void page_nid_reset_last(struct page *page)
+ static inline int page_cpupid_last(struct page *page)
  {
+       return page_to_nid(page); /* XXX */
  }
- #endif
+ static inline int cpupid_to_nid(int cpupid)
+ {
+       return -1;
+ }
+ static inline int cpupid_to_pid(int cpupid)
+ {
+       return -1;
+ }
+ static inline int cpupid_to_cpu(int cpupid)
+ {
+       return -1;
+ }
+ static inline int cpu_pid_to_cpupid(int nid, int pid)
+ {
+       return -1;
+ }
+ static inline bool cpupid_pid_unset(int cpupid)
+ {
+       return 1;
+ }
+ static inline void page_cpupid_reset_last(struct page *page)
+ {
+ }
+ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
+ {
+       return false;
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
  
  static inline struct zone *page_zone(const struct page *page)
  {
diff --combined include/linux/wait.h
index d9eff54c69cfc1ba5696e205e8122e072019eb1c,ec099b03e11b654e4558dd814e4a373deff13238..7f8caa519128d6bc2432161f05c53eebaa548065
@@@ -1,7 -1,8 +1,8 @@@
  #ifndef _LINUX_WAIT_H
  #define _LINUX_WAIT_H
+ /*
+  * Linux wait queue related types and methods
+  */
  #include <linux/list.h>
  #include <linux/stddef.h>
  #include <linux/spinlock.h>
@@@ -13,27 -14,27 +14,27 @@@ typedef int (*wait_queue_func_t)(wait_q
  int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
  
  struct __wait_queue {
-       unsigned int flags;
+       unsigned int            flags;
  #define WQ_FLAG_EXCLUSIVE     0x01
-       void *private;
-       wait_queue_func_t func;
-       struct list_head task_list;
+       void                    *private;
+       wait_queue_func_t       func;
+       struct list_head        task_list;
  };
  
  struct wait_bit_key {
-       void *flags;
-       int bit_nr;
- #define WAIT_ATOMIC_T_BIT_NR -1
+       void                    *flags;
+       int                     bit_nr;
+ #define WAIT_ATOMIC_T_BIT_NR  -1
  };
  
  struct wait_bit_queue {
-       struct wait_bit_key key;
-       wait_queue_t wait;
+       struct wait_bit_key     key;
+       wait_queue_t            wait;
  };
  
  struct __wait_queue_head {
-       spinlock_t lock;
-       struct list_head task_list;
+       spinlock_t              lock;
+       struct list_head        task_list;
  };
  typedef struct __wait_queue_head wait_queue_head_t;
  
@@@ -84,17 -85,17 +85,17 @@@ extern void __init_waitqueue_head(wait_
  
  static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
  {
-       q->flags = 0;
-       q->private = p;
-       q->func = default_wake_function;
+       q->flags        = 0;
+       q->private      = p;
+       q->func         = default_wake_function;
  }
  
- static inline void init_waitqueue_func_entry(wait_queue_t *q,
                                      wait_queue_func_t func)
+ static inline void
init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
  {
-       q->flags = 0;
-       q->private = NULL;
-       q->func = func;
+       q->flags        = 0;
+       q->private      = NULL;
+       q->func         = func;
  }
  
  static inline int waitqueue_active(wait_queue_head_t *q)
@@@ -114,8 -115,8 +115,8 @@@ static inline void __add_wait_queue(wai
  /*
   * Used for wake-one threads:
   */
- static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
                                            wait_queue_t *wait)
+ static inline void
__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  {
        wait->flags |= WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(q, wait);
@@@ -127,23 -128,22 +128,22 @@@ static inline void __add_wait_queue_tai
        list_add_tail(&new->task_list, &head->task_list);
  }
  
- static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
                                            wait_queue_t *wait)
+ static inline void
__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  {
        wait->flags |= WQ_FLAG_EXCLUSIVE;
        __add_wait_queue_tail(q, wait);
  }
  
- static inline void __remove_wait_queue(wait_queue_head_t *head,
                                                      wait_queue_t *old)
+ static inline void
__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
  {
        list_del(&old->task_list);
  }
  
  void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
- void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
-                       void *key);
+ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
  void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
  void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
  void __wake_up_bit(wait_queue_head_t *, void *, int);
@@@ -170,27 -170,64 +170,64 @@@ wait_queue_head_t *bit_waitqueue(void *
  /*
   * Wakeup macros to be used to report events to the targets.
   */
- #define wake_up_poll(x, m)                            \
+ #define wake_up_poll(x, m)                                            \
        __wake_up(x, TASK_NORMAL, 1, (void *) (m))
- #define wake_up_locked_poll(x, m)                             \
+ #define wake_up_locked_poll(x, m)                                     \
        __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
- #define wake_up_interruptible_poll(x, m)                      \
+ #define wake_up_interruptible_poll(x, m)                              \
        __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
  #define wake_up_interruptible_sync_poll(x, m)                         \
        __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
  
- #define __wait_event(wq, condition)                                   \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
+ #define ___wait_cond_timeout(condition)                                       \
+ ({                                                                    \
+       bool __cond = (condition);                                      \
+       if (__cond && !__ret)                                           \
+               __ret = 1;                                              \
+       __cond || !__ret;                                               \
+ })
+ #define ___wait_is_interruptible(state)                                       \
+       (!__builtin_constant_p(state) ||                                \
+               state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)  \
+ #define ___wait_event(wq, condition, state, exclusive, ret, cmd)      \
+ ({                                                                    \
+       __label__ __out;                                                \
+       wait_queue_t __wait;                                            \
+       long __ret = ret;                                               \
+                                                                       \
+       INIT_LIST_HEAD(&__wait.task_list);                              \
+       if (exclusive)                                                  \
+               __wait.flags = WQ_FLAG_EXCLUSIVE;                       \
+       else                                                            \
+               __wait.flags = 0;                                       \
                                                                        \
        for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
+               long __int = prepare_to_wait_event(&wq, &__wait, state);\
+                                                                       \
                if (condition)                                          \
                        break;                                          \
-               schedule();                                             \
+                                                                       \
+               if (___wait_is_interruptible(state) && __int) {         \
+                       __ret = __int;                                  \
+                       if (exclusive) {                                \
+                               abort_exclusive_wait(&wq, &__wait,      \
+                                                    state, NULL);      \
+                               goto __out;                             \
+                       }                                               \
+                       break;                                          \
+               }                                                       \
+                                                                       \
+               cmd;                                                    \
        }                                                               \
        finish_wait(&wq, &__wait);                                      \
- } while (0)
+ __out:        __ret;                                                          \
+ })
+ #define __wait_event(wq, condition)                                   \
+       (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
+                           schedule())
  
  /**
   * wait_event - sleep until a condition gets true
   * wake_up() has to be called after changing any variable that could
   * change the result of the wait condition.
   */
- #define wait_event(wq, condition)                                     \
+ #define wait_event(wq, condition)                                     \
  do {                                                                  \
-       if (condition)                                                  \
+       if (condition)                                                  \
                break;                                                  \
        __wait_event(wq, condition);                                    \
  } while (0)
  
- #define __wait_event_timeout(wq, condition, ret)                      \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
-               if (condition)                                          \
-                       break;                                          \
-               ret = schedule_timeout(ret);                            \
-               if (!ret)                                               \
-                       break;                                          \
-       }                                                               \
-       if (!ret && (condition))                                        \
-               ret = 1;                                                \
-       finish_wait(&wq, &__wait);                                      \
- } while (0)
+ #define __wait_event_timeout(wq, condition, timeout)                  \
+       ___wait_event(wq, ___wait_cond_timeout(condition),              \
+                     TASK_UNINTERRUPTIBLE, 0, timeout,                 \
+                     __ret = schedule_timeout(__ret))
  
  /**
   * wait_event_timeout - sleep until a condition gets true or a timeout elapses
  #define wait_event_timeout(wq, condition, timeout)                    \
  ({                                                                    \
        long __ret = timeout;                                           \
-       if (!(condition))                                               \
-               __wait_event_timeout(wq, condition, __ret);             \
+       if (!___wait_cond_timeout(condition))                           \
+               __ret = __wait_event_timeout(wq, condition, timeout);   \
        __ret;                                                          \
  })
  
- #define __wait_event_interruptible(wq, condition, ret)                        \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);      \
-               if (condition)                                          \
-                       break;                                          \
-               if (!signal_pending(current)) {                         \
-                       schedule();                                     \
-                       continue;                                       \
-               }                                                       \
-               ret = -ERESTARTSYS;                                     \
-               break;                                                  \
-       }                                                               \
-       finish_wait(&wq, &__wait);                                      \
- } while (0)
 +#define __wait_event_cmd(wq, condition, cmd1, cmd2)                   \
 +do {                                                                  \
 +      DEFINE_WAIT(__wait);                                            \
 +                                                                      \
 +      for (;;) {                                                      \
 +              prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
 +              if (condition)                                          \
 +                      break;                                          \
 +              cmd1;                                                   \
 +              schedule();                                             \
 +              cmd2;                                                   \
 +      }                                                               \
 +      finish_wait(&wq, &__wait);                                      \
 +} while (0)
 +
 +/**
 + * wait_event_cmd - sleep until a condition gets true
 + * @wq: the waitqueue to wait on
 + * @condition: a C expression for the event to wait for
 + * cmd1: the command will be executed before sleep
 + * cmd2: the command will be executed after sleep
 + *
 + * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
 + * @condition evaluates to true. The @condition is checked each time
 + * the waitqueue @wq is woken up.
 + *
 + * wake_up() has to be called after changing any variable that could
 + * change the result of the wait condition.
 + */
 +#define wait_event_cmd(wq, condition, cmd1, cmd2)                     \
 +do {                                                                  \
 +      if (condition)                                                  \
 +              break;                                                  \
 +      __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
 +} while (0)
 +
+ #define __wait_event_interruptible(wq, condition)                     \
+       ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
+                     schedule())
  
  /**
   * wait_event_interruptible - sleep until a condition gets true
  ({                                                                    \
        int __ret = 0;                                                  \
        if (!(condition))                                               \
-               __wait_event_interruptible(wq, condition, __ret);       \
+               __ret = __wait_event_interruptible(wq, condition);      \
        __ret;                                                          \
  })
  
- #define __wait_event_interruptible_timeout(wq, condition, ret)                \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);      \
-               if (condition)                                          \
-                       break;                                          \
-               if (!signal_pending(current)) {                         \
-                       ret = schedule_timeout(ret);                    \
-                       if (!ret)                                       \
-                               break;                                  \
-                       continue;                                       \
-               }                                                       \
-               ret = -ERESTARTSYS;                                     \
-               break;                                                  \
-       }                                                               \
-       if (!ret && (condition))                                        \
-               ret = 1;                                                \
-       finish_wait(&wq, &__wait);                                      \
- } while (0)
+ #define __wait_event_interruptible_timeout(wq, condition, timeout)    \
+       ___wait_event(wq, ___wait_cond_timeout(condition),              \
+                     TASK_INTERRUPTIBLE, 0, timeout,                   \
+                     __ret = schedule_timeout(__ret))
  
  /**
   * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
  #define wait_event_interruptible_timeout(wq, condition, timeout)      \
  ({                                                                    \
        long __ret = timeout;                                           \
-       if (!(condition))                                               \
-               __wait_event_interruptible_timeout(wq, condition, __ret); \
+       if (!___wait_cond_timeout(condition))                           \
+               __ret = __wait_event_interruptible_timeout(wq,          \
+                                               condition, timeout);    \
        __ret;                                                          \
  })
  
  #define __wait_event_hrtimeout(wq, condition, timeout, state)         \
  ({                                                                    \
        int __ret = 0;                                                  \
-       DEFINE_WAIT(__wait);                                            \
        struct hrtimer_sleeper __t;                                     \
                                                                        \
        hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,              \
                                       current->timer_slack_ns,         \
                                       HRTIMER_MODE_REL);               \
                                                                        \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, state);                   \
-               if (condition)                                          \
-                       break;                                          \
-               if (state == TASK_INTERRUPTIBLE &&                      \
-                   signal_pending(current)) {                          \
-                       __ret = -ERESTARTSYS;                           \
-                       break;                                          \
-               }                                                       \
+       __ret = ___wait_event(wq, condition, state, 0, 0,               \
                if (!__t.task) {                                        \
                        __ret = -ETIME;                                 \
                        break;                                          \
                }                                                       \
-               schedule();                                             \
-       }                                                               \
+               schedule());                                            \
                                                                        \
        hrtimer_cancel(&__t.timer);                                     \
        destroy_hrtimer_on_stack(&__t.timer);                           \
-       finish_wait(&wq, &__wait);                                      \
        __ret;                                                          \
  })
  
        __ret;                                                          \
  })
  
- #define __wait_event_interruptible_exclusive(wq, condition, ret)      \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait_exclusive(&wq, &__wait,                 \
-                                       TASK_INTERRUPTIBLE);            \
-               if (condition) {                                        \
-                       finish_wait(&wq, &__wait);                      \
-                       break;                                          \
-               }                                                       \
-               if (!signal_pending(current)) {                         \
-                       schedule();                                     \
-                       continue;                                       \
-               }                                                       \
-               ret = -ERESTARTSYS;                                     \
-               abort_exclusive_wait(&wq, &__wait,                      \
-                               TASK_INTERRUPTIBLE, NULL);              \
-               break;                                                  \
-       }                                                               \
- } while (0)
+ #define __wait_event_interruptible_exclusive(wq, condition)           \
+       ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
+                     schedule())
  
  #define wait_event_interruptible_exclusive(wq, condition)             \
  ({                                                                    \
        int __ret = 0;                                                  \
        if (!(condition))                                               \
-               __wait_event_interruptible_exclusive(wq, condition, __ret);\
+               __ret = __wait_event_interruptible_exclusive(wq, condition);\
        __ret;                                                          \
  })
  
         ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
  
  
- #define __wait_event_killable(wq, condition, ret)                     \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_KILLABLE);           \
-               if (condition)                                          \
-                       break;                                          \
-               if (!fatal_signal_pending(current)) {                   \
-                       schedule();                                     \
-                       continue;                                       \
-               }                                                       \
-               ret = -ERESTARTSYS;                                     \
-               break;                                                  \
-       }                                                               \
-       finish_wait(&wq, &__wait);                                      \
- } while (0)
+ #define __wait_event_killable(wq, condition)                          \
+       ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
  
  /**
   * wait_event_killable - sleep until a condition gets true
  ({                                                                    \
        int __ret = 0;                                                  \
        if (!(condition))                                               \
-               __wait_event_killable(wq, condition, __ret);            \
+               __ret = __wait_event_killable(wq, condition);           \
        __ret;                                                          \
  })
  
  
  #define __wait_event_lock_irq(wq, condition, lock, cmd)                       \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);    \
-               if (condition)                                          \
-                       break;                                          \
-               spin_unlock_irq(&lock);                                 \
-               cmd;                                                    \
-               schedule();                                             \
-               spin_lock_irq(&lock);                                   \
-       }                                                               \
-       finish_wait(&wq, &__wait);                                      \
- } while (0)
+       (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
+                           spin_unlock_irq(&lock);                     \
+                           cmd;                                        \
+                           schedule();                                 \
+                           spin_lock_irq(&lock))
  
  /**
   * wait_event_lock_irq_cmd - sleep until a condition gets true. The
@@@ -759,26 -664,12 +700,12 @@@ do {                                                                    
  } while (0)
  
  
- #define __wait_event_interruptible_lock_irq(wq, condition,            \
-                                           lock, ret, cmd)             \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);      \
-               if (condition)                                          \
-                       break;                                          \
-               if (signal_pending(current)) {                          \
-                       ret = -ERESTARTSYS;                             \
-                       break;                                          \
-               }                                                       \
-               spin_unlock_irq(&lock);                                 \
-               cmd;                                                    \
-               schedule();                                             \
-               spin_lock_irq(&lock);                                   \
-       }                                                               \
-       finish_wait(&wq, &__wait);                                      \
- } while (0)
+ #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
+       ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
+                     spin_unlock_irq(&lock);                           \
+                     cmd;                                              \
+                     schedule();                                       \
+                     spin_lock_irq(&lock))
  
  /**
   * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
  #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd)       \
  ({                                                                    \
        int __ret = 0;                                                  \
-                                                                       \
        if (!(condition))                                               \
-               __wait_event_interruptible_lock_irq(wq, condition,      \
-                                                   lock, __ret, cmd);  \
+               __ret = __wait_event_interruptible_lock_irq(wq,         \
+                                               condition, lock, cmd);  \
        __ret;                                                          \
  })
  
  #define wait_event_interruptible_lock_irq(wq, condition, lock)                \
  ({                                                                    \
        int __ret = 0;                                                  \
-                                                                       \
        if (!(condition))                                               \
-               __wait_event_interruptible_lock_irq(wq, condition,      \
-                                                   lock, __ret, );     \
+               __ret = __wait_event_interruptible_lock_irq(wq,         \
+                                               condition, lock,)       \
        __ret;                                                          \
  })
  
  #define __wait_event_interruptible_lock_irq_timeout(wq, condition,    \
-                                                   lock, ret)          \
- do {                                                                  \
-       DEFINE_WAIT(__wait);                                            \
-                                                                       \
-       for (;;) {                                                      \
-               prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);      \
-               if (condition)                                          \
-                       break;                                          \
-               if (signal_pending(current)) {                          \
-                       ret = -ERESTARTSYS;                             \
-                       break;                                          \
-               }                                                       \
-               spin_unlock_irq(&lock);                                 \
-               ret = schedule_timeout(ret);                            \
-               spin_lock_irq(&lock);                                   \
-               if (!ret)                                               \
-                       break;                                          \
-       }                                                               \
-       finish_wait(&wq, &__wait);                                      \
- } while (0)
+                                                   lock, timeout)      \
+       ___wait_event(wq, ___wait_cond_timeout(condition),              \
+                     TASK_INTERRUPTIBLE, 0, ret,                       \
+                     spin_unlock_irq(&lock);                           \
+                     __ret = schedule_timeout(__ret);                  \
+                     spin_lock_irq(&lock));
  
  /**
-  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
-  *            The condition is checked under the lock. This is expected
-  *            to be called with the lock taken.
+  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
+  *            true or a timeout elapses. The condition is checked under
+  *            the lock. This is expected to be called with the lock taken.
   * @wq: the waitqueue to wait on
   * @condition: a C expression for the event to wait for
   * @lock: a locked spinlock_t, which will be released before schedule()
  #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,        \
                                                  timeout)              \
  ({                                                                    \
-       int __ret = timeout;                                            \
-                                                                       \
-       if (!(condition))                                               \
-               __wait_event_interruptible_lock_irq_timeout(            \
-                                       wq, condition, lock, __ret);    \
+       long __ret = timeout;                                           \
+       if (!___wait_cond_timeout(condition))                           \
+               __ret = __wait_event_interruptible_lock_irq_timeout(    \
+                                       wq, condition, lock, timeout);  \
        __ret;                                                          \
  })
  
   * We plan to remove these interfaces.
   */
  extern void sleep_on(wait_queue_head_t *q);
- extern long sleep_on_timeout(wait_queue_head_t *q,
-                                     signed long timeout);
+ extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
  extern void interruptible_sleep_on(wait_queue_head_t *q);
- extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
-                                          signed long timeout);
+ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
  
  /*
   * Waitqueues which are removed from the waitqueue_head at wakeup time
   */
  void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
  void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
  void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
- void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
-                       unsigned int mode, void *key);
+ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
  int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  
   * One uses wait_on_bit() where one is waiting for the bit to clear,
   * but has no intention of setting it.
   */
- static inline int wait_on_bit(void *word, int bit,
                              int (*action)(void *), unsigned mode)
+ static inline int
wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
  {
        if (!test_bit(bit, word))
                return 0;
   * One uses wait_on_bit_lock() where one is waiting for the bit to
   * clear with the intention of setting it, and when done, clearing it.
   */
- static inline int wait_on_bit_lock(void *word, int bit,
                              int (*action)(void *), unsigned mode)
+ static inline int
wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
  {
        if (!test_and_set_bit(bit, word))
                return 0;
@@@ -1019,5 -891,5 +927,5 @@@ int wait_on_atomic_t(atomic_t *val, in
                return 0;
        return out_of_line_wait_on_atomic_t(val, action, mode);
  }
-       
- #endif
+ #endif /* _LINUX_WAIT_H */
diff --combined init/Kconfig
index 4d09f69eb76648a5f2495623acad64747ae9c023,841e79cb8bb363fd37aa55222ca7ccb09b303021..6ba4329410a32bbafd54167a6965cdf19e7cba01
@@@ -284,7 -284,7 +284,7 @@@ config AUDI
  
  config AUDITSYSCALL
        bool "Enable system-call auditing support"
 -      depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
 +      depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
        default y if SECURITY_SELINUX
        help
          Enable low-overhead system-call auditing infrastructure that
@@@ -354,7 -354,8 +354,8 @@@ config VIRT_CPU_ACCOUNTING_NATIV
  
  config VIRT_CPU_ACCOUNTING_GEN
        bool "Full dynticks CPU time accounting"
-       depends on HAVE_CONTEXT_TRACKING && 64BIT
+       depends on HAVE_CONTEXT_TRACKING
+       depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
        select VIRT_CPU_ACCOUNTING
        select CONTEXT_TRACKING
        help
@@@ -844,7 -845,7 +845,7 @@@ config NUMA_BALANCING_DEFAULT_ENABLE
        default y
        depends on NUMA_BALANCING
        help
 -        If set, autonumic NUMA balancing will be enabled if running on a NUMA
 +        If set, automatic NUMA balancing will be enabled if running on a NUMA
          machine.
  
  config NUMA_BALANCING
        help
          This option adds support for automatic NUMA aware memory/task placement.
          The mechanism is quite primitive and is based on migrating memory when
 -        it is references to the node the task is running on.
 +        it has references to the node the task is running on.
  
          This system will be inactive on UMA systems.
  
@@@ -1668,18 -1669,6 +1669,18 @@@ config BASE_SMAL
        default 0 if BASE_FULL
        default 1 if !BASE_FULL
  
 +config SYSTEM_TRUSTED_KEYRING
 +      bool "Provide system-wide ring of trusted keys"
 +      depends on KEYS
 +      help
 +        Provide a system keyring to which trusted keys can be added.  Keys in
 +        the keyring are considered to be trusted.  Keys may be added at will
 +        by the kernel from compiled-in data and from hardware key stores, but
 +        userspace may only add extra keys if those keys can be verified by
 +        keys already in the keyring.
 +
 +        Keys in this keyring are used by module signature checking.
 +
  menuconfig MODULES
        bool "Enable loadable module support"
        option modules
@@@ -1753,7 -1742,6 +1754,7 @@@ config MODULE_SRCVERSION_AL
  config MODULE_SIG
        bool "Module signature verification"
        depends on MODULES
 +      select SYSTEM_TRUSTED_KEYRING
        select KEYS
        select CRYPTO
        select ASYMMETRIC_KEY_TYPE
diff --combined init/main.c
index edee99f735746f4bb3b6a8fb17794919469f4888,379090fadac9a1652aa9f095db2bf6ef49ddc763..dad19cf42dbaa0f088516355e3a359d99821d249
@@@ -136,13 -136,6 +136,13 @@@ static char *static_command_line
  static char *execute_command;
  static char *ramdisk_execute_command;
  
 +/*
 + * Used to generate warnings if static_key manipulation functions are used
 + * before jump_label_init is called.
 + */
 +bool static_key_initialized __read_mostly = false;
 +EXPORT_SYMBOL_GPL(static_key_initialized);
 +
  /*
   * If set, this is an indication to the drivers that reset the underlying
   * device before going ahead with the initialization otherwise driver might
@@@ -700,7 -693,7 +700,7 @@@ int __init_or_module do_one_initcall(in
  
        if (preempt_count() != count) {
                sprintf(msgbuf, "preemption imbalance ");
-               preempt_count() = count;
+               preempt_count_set(count);
        }
        if (irqs_disabled()) {
                strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
diff --combined kernel/Makefile
index 63136989c132e6095320547098e157878fd1942a,f99d908b5550766817d22ec0406f5776599044af..9a52eb5bf68980b8860014655de49018f9d00c49
@@@ -6,9 -6,9 +6,9 @@@ obj-y     = fork.o exec_domain.o panic.
            cpu.o exit.o itimer.o time.o softirq.o resource.o \
            sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
            signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
-           rcupdate.o extable.o params.o posix-timers.o \
+           extable.o params.o posix-timers.o \
            kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
-           hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
+           hrtimer.o rwsem.o nsproxy.o semaphore.o \
            notifier.o ksysfs.o cred.o reboot.o \
            async.o range.o groups.o lglock.o smpboot.o
  
@@@ -27,6 -27,7 +27,7 @@@ obj-y += power
  obj-y += printk/
  obj-y += cpu/
  obj-y += irq/
+ obj-y += rcu/
  
  obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
  obj-$(CONFIG_FREEZER) += freezer.o
@@@ -54,9 -55,8 +55,9 @@@ obj-$(CONFIG_SMP) += spinlock.
  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
  obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
  obj-$(CONFIG_UID16) += uid16.o
 +obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
  obj-$(CONFIG_MODULES) += module.o
 -obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o
 +obj-$(CONFIG_MODULE_SIG) += module_signing.o
  obj-$(CONFIG_KALLSYMS) += kallsyms.o
  obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
  obj-$(CONFIG_KEXEC) += kexec.o
@@@ -82,12 -82,6 +83,6 @@@ obj-$(CONFIG_KGDB) += debug
  obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
  obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
  obj-$(CONFIG_SECCOMP) += seccomp.o
- obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
- obj-$(CONFIG_TREE_RCU) += rcutree.o
- obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
- obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
- obj-$(CONFIG_TINY_RCU) += rcutiny.o
- obj-$(CONFIG_TINY_PREEMPT_RCU) += rcutiny.o
  obj-$(CONFIG_RELAY) += relay.o
  obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
  obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@@ -142,52 -136,19 +137,52 @@@ targets += timeconst.
  $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
        $(call if_changed,bc)
  
 -ifeq ($(CONFIG_MODULE_SIG),y)
 +###############################################################################
 +#
 +# Roll all the X.509 certificates that we can find together and pull them into
 +# the kernel so that they get loaded into the system trusted keyring during
 +# boot.
  #
 -# Pull the signing certificate and any extra certificates into the kernel
 +# We look in the source root and the build root for all files whose name ends
 +# in ".x509".  Unfortunately, this will generate duplicate filenames, so we
 +# have make canonicalise the pathnames and then sort them to discard the
 +# duplicates.
  #
 +###############################################################################
 +ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
 +X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
 +X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509
 +X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
 +                              $(or $(realpath $(CERT)),$(CERT))))
 +
 +ifeq ($(X509_CERTIFICATES),)
 +$(warning *** No X.509 certificates found ***)
 +endif
 +
 +ifneq ($(wildcard $(obj)/.x509.list),)
 +ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES))
 +$(info X.509 certificate list changed)
 +$(shell rm $(obj)/.x509.list)
 +endif
 +endif
 +
 +kernel/system_certificates.o: $(obj)/x509_certificate_list
  
 -quiet_cmd_touch = TOUCH   $@
 -      cmd_touch = touch   $@
 +quiet_cmd_x509certs  = CERTS   $@
 +      cmd_x509certs  = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; echo "  - Including cert $(X509)")
  
 -extra_certificates:
 -      $(call cmd,touch)
 +targets += $(obj)/x509_certificate_list
 +$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
 +      $(call if_changed,x509certs)
  
 -kernel/modsign_certificate.o: signing_key.x509 extra_certificates
 +targets += $(obj)/.x509.list
 +$(obj)/.x509.list:
 +      @echo $(X509_CERTIFICATES) >$@
  
 +clean-files := x509_certificate_list .x509.list
 +endif
 +
 +ifeq ($(CONFIG_MODULE_SIG),y)
  ###############################################################################
  #
  # If module signing is requested, say by allyesconfig, but a key has not been
index 75b93d7f786010000368e299d214717131c67fa1,c516d6ba67163654fbcbe150292ebdf8a1fa2f7e..e4d06b2080d77d32925b5806176ff6c735d7bb1f
@@@ -23,19 -23,15 +23,19 @@@ ifeq ($(ARCH),x86_64
    endif
    ifeq (${IS_X86_64}, 1)
      RAW_ARCH := x86_64
-     CFLAGS += -DARCH_X86_64
+     CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
      ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
    endif
    NO_PERF_REGS := 0
    LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
  endif
 +ifeq ($(ARCH),arm)
 +  NO_PERF_REGS := 0
 +  LIBUNWIND_LIBS = -lunwind -lunwind-arm
 +endif
  
  ifeq ($(NO_PERF_REGS),0)
-   CFLAGS += -DHAVE_PERF_REGS
+   CFLAGS += -DHAVE_PERF_REGS_SUPPORT
  endif
  
  ifeq ($(src-perf),)
@@@ -55,7 -51,6 +55,6 @@@ LIB_INCLUDE := $(srctree)/tools/lib
  # include ARCH specific config
  -include $(src-perf)/arch/$(ARCH)/Makefile
  
- include $(src-perf)/config/feature-tests.mak
  include $(src-perf)/config/utilities.mak
  
  ifeq ($(call get-executable,$(FLEX)),)
@@@ -71,10 -66,7 +70,7 @@@ ifneq ($(WERROR),0
    CFLAGS += -Werror
  endif
  
- ifeq ("$(origin DEBUG)", "command line")
-   PERF_DEBUG = $(DEBUG)
- endif
- ifndef PERF_DEBUG
+ ifeq ($(DEBUG),0)
    CFLAGS += -O6
  endif
  
@@@ -93,20 -85,125 +89,126 @@@ CFLAGS += -std=gnu9
  
  EXTLIBS = -lelf -lpthread -lrt -lm -ldl
  
- ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -fstack-protector-all,-fstack-protector-all),y)
-   CFLAGS += -fstack-protector-all
+ ifneq ($(OUTPUT),)
+   OUTPUT_FEATURES = $(OUTPUT)config/feature-checks/
+   $(shell mkdir -p $(OUTPUT_FEATURES))
  endif
  
- ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wstack-protector,-Wstack-protector),y)
-   CFLAGS += -Wstack-protector
+ feature_check = $(eval $(feature_check_code))
+ define feature_check_code
+   feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) LDFLAGS=$(LDFLAGS) -C config/feature-checks test-$1 >/dev/null 2>/dev/null && echo 1 || echo 0)
+ endef
+ feature_set = $(eval $(feature_set_code))
+ define feature_set_code
+   feature-$(1) := 1
+ endef
+ #
+ # Build the feature check binaries in parallel, ignore errors, ignore return value and suppress output:
+ #
+ #
+ # Note that this is not a complete list of all feature tests, just
+ # those that are typically built on a fully configured system.
+ #
+ # [ Feature tests not mentioned here have to be built explicitly in
+ #   the rule that uses them - an example for that is the 'bionic'
+ #   feature check. ]
+ #
+ CORE_FEATURE_TESTS =                  \
+       backtrace                       \
+       dwarf                           \
+       fortify-source                  \
+       glibc                           \
+       gtk2                            \
+       gtk2-infobar                    \
+       libaudit                        \
+       libbfd                          \
+       libelf                          \
+       libelf-getphdrnum               \
+       libelf-mmap                     \
+       libnuma                         \
+       libperl                         \
+       libpython                       \
+       libpython-version               \
+       libslang                        \
+       libunwind                       \
++      libunwind-debug-frame           \
+       on-exit                         \
+       stackprotector                  \
+       stackprotector-all
+ #
+ # So here we detect whether test-all was rebuilt, to be able
+ # to skip the print-out of the long features list if the file
+ # existed before and after it was built:
+ #
+ ifeq ($(wildcard $(OUTPUT)config/feature-checks/test-all),)
+   test-all-failed := 1
+ else
+   test-all-failed := 0
+ endif
+ #
+ # Special fast-path for the 'all features are available' case:
+ #
+ $(call feature_check,all,$(MSG))
+ #
+ # Just in case the build freshly failed, make sure we print the
+ # feature matrix:
+ #
+ ifeq ($(feature-all), 0)
+   test-all-failed := 1
+ endif
+ ifeq ($(test-all-failed),1)
+   $(info )
+   $(info Auto-detecting system features:)
+ endif
+ ifeq ($(feature-all), 1)
+   #
+   # test-all.c passed - just set all the core feature flags to 1:
+   #
+   $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_set,$(feat)))
+ else
+   $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) LDFLAGS=$(LDFLAGS) -i -j -C config/feature-checks $(CORE_FEATURE_TESTS) >/dev/null 2>&1)
+   $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_check,$(feat)))
+ endif
+ #
+ # Print the result of the feature test:
+ #
+ feature_print = $(eval $(feature_print_code)) $(info $(MSG))
+ define feature_print_code
+   ifeq ($(feature-$(1)), 1)
+     MSG = $(shell printf '...%30s: [ \033[32mon\033[m  ]' $(1))
+   else
+     MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
+   endif
+ endef
+ #
+ # Only print out our features if we rebuilt the testcases or if a test failed:
+ #
+ ifeq ($(test-all-failed), 1)
+   $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_print,$(feat)))
+   $(info )
  endif
  
- ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-Wvolatile-register-var),y)
-   CFLAGS += -Wvolatile-register-var
+ ifeq ($(feature-stackprotector-all), 1)
+   CFLAGS += -fstack-protector-all
+ endif
+ ifeq ($(feature-stackprotector), 1)
+   CFLAGS += -Wstack-protector
  endif
  
- ifndef PERF_DEBUG
-   ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y)
+ ifeq ($(DEBUG),0)
+   ifeq ($(feature-fortify-source), 1)
      CFLAGS += -D_FORTIFY_SOURCE=2
    endif
  endif
@@@ -132,123 -229,111 +234,115 @@@ CFLAGS += -I$(LIB_INCLUDE
  CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
  
  ifndef NO_BIONIC
- ifeq ($(call try-cc,$(SOURCE_BIONIC),$(CFLAGS),bionic),y)
-   BIONIC := 1
-   EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
-   EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
+   $(feature_check,bionic)
+   ifeq ($(feature-bionic), 1)
+     BIONIC := 1
+     EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
+     EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
+   endif
  endif
- endif # NO_BIONIC
  
  ifdef NO_LIBELF
    NO_DWARF := 1
    NO_DEMANGLE := 1
    NO_LIBUNWIND := 1
  else
- FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
- ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF),libelf),y)
-   FLAGS_GLIBC=$(CFLAGS) $(LDFLAGS)
-   ifeq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC),glibc),y)
-     LIBC_SUPPORT := 1
-   endif
-   ifeq ($(BIONIC),1)
-     LIBC_SUPPORT := 1
-   endif
-   ifeq ($(LIBC_SUPPORT),1)
-     msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev);
+   ifeq ($(feature-libelf), 0)
+     ifeq ($(feature-glibc), 1)
+       LIBC_SUPPORT := 1
+     endif
+     ifeq ($(BIONIC),1)
+       LIBC_SUPPORT := 1
+     endif
+     ifeq ($(LIBC_SUPPORT),1)
+       msg := $(warning No libelf found, disables 'probe' tool, please install elfutils-libelf-devel/libelf-dev);
  
-     NO_LIBELF := 1
-     NO_DWARF := 1
-     NO_DEMANGLE := 1
+       NO_LIBELF := 1
+       NO_DWARF := 1
+       NO_DEMANGLE := 1
+     else
+       msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+     endif
    else
-     msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
-   endif
- else
-   # for linking with debug library, run like:
-   # make DEBUG=1 LIBDW_DIR=/opt/libdw/
-   ifdef LIBDW_DIR
-     LIBDW_CFLAGS  := -I$(LIBDW_DIR)/include
-     LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
-   endif
+     # for linking with debug library, run like:
+     # make DEBUG=1 LIBDW_DIR=/opt/libdw/
+     ifdef LIBDW_DIR
+       LIBDW_CFLAGS  := -I$(LIBDW_DIR)/include
+       LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
+     endif
  
-   FLAGS_DWARF=$(CFLAGS) $(LIBDW_CFLAGS) -ldw -lz -lelf $(LIBDW_LDFLAGS) $(LDFLAGS) $(EXTLIBS)
-   ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF),libdw),y)
-     msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
-     NO_DWARF := 1
-   endif # Dwarf support
- endif # SOURCE_LIBELF
+     ifneq ($(feature-dwarf), 1)
+       msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
+       NO_DWARF := 1
+     endif # Dwarf support
+   endif # libelf support
  endif # NO_LIBELF
  
  ifndef NO_LIBELF
- CFLAGS += -DLIBELF_SUPPORT
- FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
- ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
-   CFLAGS += -DLIBELF_MMAP
- endif
- ifeq ($(call try-cc,$(SOURCE_ELF_GETPHDRNUM),$(FLAGS_LIBELF),-DHAVE_ELF_GETPHDRNUM),y)
-   CFLAGS += -DHAVE_ELF_GETPHDRNUM
- endif
+   CFLAGS += -DHAVE_LIBELF_SUPPORT
  
- # include ARCH specific config
- -include $(src-perf)/arch/$(ARCH)/Makefile
+   ifeq ($(feature-libelf-mmap), 1)
+     CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
+   endif
  
- ifndef NO_DWARF
- ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
-   msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
-   NO_DWARF := 1
- else
-   CFLAGS += -DDWARF_SUPPORT $(LIBDW_CFLAGS)
-   LDFLAGS += $(LIBDW_LDFLAGS)
-   EXTLIBS += -lelf -ldw
- endif # PERF_HAVE_DWARF_REGS
- endif # NO_DWARF
+   ifeq ($(feature-libelf-getphdrnum), 1)
+     CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
+   endif
  
- endif # NO_LIBELF
+   # include ARCH specific config
+   -include $(src-perf)/arch/$(ARCH)/Makefile
  
- ifndef NO_LIBELF
- CFLAGS += -DLIBELF_SUPPORT
- FLAGS_LIBELF=$(CFLAGS) $(LDFLAGS) $(EXTLIBS)
- ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
-   CFLAGS += -DLIBELF_MMAP
- endif # try-cc
+   ifndef NO_DWARF
+     ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
+       msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
+       NO_DWARF := 1
+     else
+       CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
+       LDFLAGS += $(LIBDW_LDFLAGS)
+       EXTLIBS += -lelf -ldw
+     endif # PERF_HAVE_DWARF_REGS
+   endif # NO_DWARF
  endif # NO_LIBELF
  
 -# There's only x86 (both 32 and 64) support for CFI unwind so far
 -ifneq ($(ARCH),x86)
 +ifeq ($(LIBUNWIND_LIBS),)
    NO_LIBUNWIND := 1
  endif
  
  ifndef NO_LIBUNWIND
- # for linking with debug library, run like:
- # make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
- ifdef LIBUNWIND_DIR
-   LIBUNWIND_CFLAGS  := -I$(LIBUNWIND_DIR)/include
-   LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib
- endif
+   #
+   # For linking with debug library, run like:
+   #
+   #   make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
+   #
+   ifdef LIBUNWIND_DIR
+     LIBUNWIND_CFLAGS  := -I$(LIBUNWIND_DIR)/include
+     LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib
+   endif
  
- FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(CFLAGS) $(LIBUNWIND_LDFLAGS) $(LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
- ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND),libunwind),y)
-   msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
-   NO_LIBUNWIND := 1
- endif # Libunwind support
- ifneq ($(call try-cc,$(SOURCE_LIBUNWIND_DEBUG_FRAME),$(FLAGS_UNWIND),libunwind debug_frame),y)
-   msg := $(warning No debug_frame support found in libunwind);
- CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
- endif # debug_frame support in libunwind
- endif # NO_LIBUNWIND
+   ifneq ($(feature-libunwind), 1)
 -    msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
++    msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
+     NO_LIBUNWIND := 1
++  else
++    ifneq ($(feature-libunwind-debug-frame), 1)
++      msg := $(warning No debug_frame support found in libunwind);
++      CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
++    endif
+   endif
+ endif
  
  ifndef NO_LIBUNWIND
-   CFLAGS += -DLIBUNWIND_SUPPORT
+   CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
    EXTLIBS += $(LIBUNWIND_LIBS)
    CFLAGS += $(LIBUNWIND_CFLAGS)
    LDFLAGS += $(LIBUNWIND_LDFLAGS)
- endif # NO_LIBUNWIND
+ endif
  
  ifndef NO_LIBAUDIT
-   FLAGS_LIBAUDIT = $(CFLAGS) $(LDFLAGS) -laudit
-   ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT),libaudit),y)
+   ifneq ($(feature-libaudit), 1)
      msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
      NO_LIBAUDIT := 1
    else
-     CFLAGS += -DLIBAUDIT_SUPPORT
+     CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
      EXTLIBS += -laudit
    endif
  endif
@@@ -258,30 -343,30 +352,30 @@@ ifdef NO_NEW
  endif
  
  ifndef NO_SLANG
-   FLAGS_SLANG=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) -I/usr/include/slang -lslang
-   ifneq ($(call try-cc,$(SOURCE_SLANG),$(FLAGS_SLANG),libslang),y)
+   ifneq ($(feature-libslang), 1)
      msg := $(warning slang not found, disables TUI support. Please install slang-devel or libslang-dev);
      NO_SLANG := 1
    else
      # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
      CFLAGS += -I/usr/include/slang
-     CFLAGS += -DSLANG_SUPPORT
+     CFLAGS += -DHAVE_SLANG_SUPPORT
      EXTLIBS += -lslang
    endif
  endif
  
  ifndef NO_GTK2
    FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
-   ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2),gtk2),y)
+   ifneq ($(feature-gtk2), 1)
      msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
      NO_GTK2 := 1
    else
-     ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2),-DHAVE_GTK_INFO_BAR),y)
-       CFLAGS += -DHAVE_GTK_INFO_BAR
+     ifeq ($(feature-gtk2-infobar), 1)
+       GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
      endif
-     CFLAGS += -DGTK2_SUPPORT
-     CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
-     EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
+     CFLAGS += -DHAVE_GTK2_SUPPORT
+     GTK_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null)
+     GTK_LIBS := $(shell pkg-config --libs gtk+-2.0 2>/dev/null)
+     EXTLIBS += -ldl
    endif
  endif
  
@@@ -297,7 -382,7 +391,7 @@@ els
    PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
    FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
  
-   ifneq ($(call try-cc,$(SOURCE_PERL_EMBED),$(FLAGS_PERL_EMBED),perl),y)
+   ifneq ($(feature-libperl), 1)
      CFLAGS += -DNO_LIBPERL
      NO_LIBPERL := 1
    else
@@@ -342,11 -427,11 +436,11 @@@ els
        PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
        FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
  
-       ifneq ($(call try-cc,$(SOURCE_PYTHON_EMBED),$(FLAGS_PYTHON_EMBED),python),y)
+       ifneq ($(feature-libpython), 1)
          $(call disable-python,Python.h (for Python 2.x))
        else
  
-         ifneq ($(call try-cc,$(SOURCE_PYTHON_VERSION),$(FLAGS_PYTHON_EMBED),python version),y)
+         ifneq ($(feature-libpython-version), 1)
            $(warning Python 3 is not yet supported; please set)
            $(warning PYTHON and/or PYTHON_CONFIG appropriately.)
            $(warning If you also have Python 2 installed, then)
    endif
  endif
  
+ ifeq ($(feature-libbfd), 1)
+   EXTLIBS += -lbfd
+ endif
  ifdef NO_DEMANGLE
    CFLAGS += -DNO_DEMANGLE
  else
-   ifdef HAVE_CPLUS_DEMANGLE
+   ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
      EXTLIBS += -liberty
-     CFLAGS += -DHAVE_CPLUS_DEMANGLE
+     CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
    else
-     FLAGS_BFD=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd
-     has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD),libbfd)
-     ifeq ($(has_bfd),y)
-       EXTLIBS += -lbfd
-     else
-       FLAGS_BFD_IBERTY=$(FLAGS_BFD) -liberty
-       has_bfd_iberty := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY),liberty)
-       ifeq ($(has_bfd_iberty),y)
+     ifneq ($(feature-libbfd), 1)
+       $(feature_check,liberty)
+       ifeq ($(feature-liberty), 1)
          EXTLIBS += -lbfd -liberty
        else
-         FLAGS_BFD_IBERTY_Z=$(FLAGS_BFD_IBERTY) -lz
-         has_bfd_iberty_z := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD_IBERTY_Z),libz)
-         ifeq ($(has_bfd_iberty_z),y)
+         $(feature_check,liberty-z)
+         ifeq ($(feature-liberty-z), 1)
            EXTLIBS += -lbfd -liberty -lz
          else
-           FLAGS_CPLUS_DEMANGLE=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) -liberty
-           has_cplus_demangle := $(call try-cc,$(SOURCE_CPLUS_DEMANGLE),$(FLAGS_CPLUS_DEMANGLE),demangle)
-           ifeq ($(has_cplus_demangle),y)
+           $(feature_check,cplus-demangle)
+           ifeq ($(feature-cplus-demangle), 1)
              EXTLIBS += -liberty
-             CFLAGS += -DHAVE_CPLUS_DEMANGLE
+             CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
            else
              msg := $(warning No bfd.h/libbfd found, install binutils-dev[el]/zlib-static to gain symbol demangling)
              CFLAGS += -DNO_DEMANGLE
    endif
  endif
  
- ifndef NO_STRLCPY
-   ifeq ($(call try-cc,$(SOURCE_STRLCPY),,-DHAVE_STRLCPY),y)
-     CFLAGS += -DHAVE_STRLCPY
-   endif
+ ifneq ($(filter -lbfd,$(EXTLIBS)),)
+   CFLAGS += -DHAVE_LIBBFD_SUPPORT
  endif
  
  ifndef NO_ON_EXIT
-   ifeq ($(call try-cc,$(SOURCE_ON_EXIT),,-DHAVE_ON_EXIT),y)
-     CFLAGS += -DHAVE_ON_EXIT
+   ifeq ($(feature-on-exit), 1)
+     CFLAGS += -DHAVE_ON_EXIT_SUPPORT
    endif
  endif
  
  ifndef NO_BACKTRACE
-   ifeq ($(call try-cc,$(SOURCE_BACKTRACE),,-DBACKTRACE_SUPPORT),y)
-     CFLAGS += -DBACKTRACE_SUPPORT
+   ifeq ($(feature-backtrace), 1)
+     CFLAGS += -DHAVE_BACKTRACE_SUPPORT
    endif
  endif
  
  ifndef NO_LIBNUMA
-   FLAGS_LIBNUMA = $(CFLAGS) $(LDFLAGS) -lnuma
-   ifneq ($(call try-cc,$(SOURCE_LIBNUMA),$(FLAGS_LIBNUMA),libnuma),y)
+   ifeq ($(feature-libnuma), 0)
      msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numa-libs-devel or libnuma-dev);
      NO_LIBNUMA := 1
    else
-     CFLAGS += -DLIBNUMA_SUPPORT
+     CFLAGS += -DHAVE_LIBNUMA_SUPPORT
      EXTLIBS += -lnuma
    endif
  endif
@@@ -466,7 -545,12 +554,12 @@@ els
  sysconfdir = $(prefix)/etc
  ETC_PERFCONFIG = etc/perfconfig
  endif
+ ifeq ($(IS_X86_64),1)
+ lib = lib64
+ else
  lib = lib
+ endif
+ libdir = $(prefix)/$(lib)
  
  # Shell quote (do not use $(call) to accommodate ancient setups);
  ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
@@@ -479,6 -563,7 +572,7 @@@ template_dir_SQ = $(subst ','\'',$(temp
  htmldir_SQ = $(subst ','\'',$(htmldir))
  prefix_SQ = $(subst ','\'',$(prefix))
  sysconfdir_SQ = $(subst ','\'',$(sysconfdir))
+ libdir_SQ = $(subst ','\'',$(libdir))
  
  ifneq ($(filter /%,$(firstword $(perfexecdir))),)
  perfexec_instdir = $(perfexecdir)
index 0000000000000000000000000000000000000000,452b67cc4d7b071e87ecee194d47f5a899d421b1..abaf8f4ea93a56f93c9ca7feb3a7c51b2d10ddbc
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,144 +1,148 @@@
+ FILES=                                        \
+       test-all                        \
+       test-backtrace                  \
+       test-bionic                     \
+       test-dwarf                      \
+       test-fortify-source             \
+       test-glibc                      \
+       test-gtk2                       \
+       test-gtk2-infobar               \
+       test-hello                      \
+       test-libaudit                   \
+       test-libbfd                     \
+       test-liberty                    \
+       test-liberty-z                  \
+       test-cplus-demangle             \
+       test-libelf                     \
+       test-libelf-getphdrnum          \
+       test-libelf-mmap                \
+       test-libnuma                    \
+       test-libperl                    \
+       test-libpython                  \
+       test-libpython-version          \
+       test-libslang                   \
+       test-libunwind                  \
++      test-libunwind-debug-frame      \
+       test-on-exit                    \
+       test-stackprotector-all         \
+       test-stackprotector
+ CC := $(CC) -MD
+ all: $(FILES)
+ BUILD = $(CC) $(LDFLAGS) -o $(OUTPUT)$@ $@.c
+ ###############################
+ test-all:
+       $(BUILD) -Werror -fstack-protector -fstack-protector-all -O2 -Werror -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lunwind -lunwind-x86_64 -lelf -laudit -I/usr/include/slang -lslang $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl
+ test-hello:
+       $(BUILD)
+ test-stackprotector-all:
+       $(BUILD) -Werror -fstack-protector-all
+ test-stackprotector:
+       $(BUILD) -Werror -fstack-protector -Wstack-protector
+ test-fortify-source:
+       $(BUILD) -O2 -Werror -D_FORTIFY_SOURCE=2
+ test-bionic:
+       $(BUILD)
+ test-libelf:
+       $(BUILD) -lelf
+ test-glibc:
+       $(BUILD)
+ test-dwarf:
+       $(BUILD) -ldw
+ test-libelf-mmap:
+       $(BUILD) -lelf
+ test-libelf-getphdrnum:
+       $(BUILD) -lelf
+ test-libnuma:
+       $(BUILD) -lnuma
+ test-libunwind:
+       $(BUILD) -lunwind -lunwind-x86_64 -lelf
++test-libunwind-debug-frame:
++      $(BUILD) -lunwind -lunwind-x86_64 -lelf
++
+ test-libaudit:
+       $(BUILD) -laudit
+ test-libslang:
+       $(BUILD) -I/usr/include/slang -lslang
+ test-gtk2:
+       $(BUILD) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
+ test-gtk2-infobar:
+       $(BUILD) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null)
+ grep-libs  = $(filter -l%,$(1))
+ strip-libs = $(filter-out -l%,$(1))
+ PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
+ PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
+ PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
+ PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+ FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
+ test-libperl:
+       $(BUILD) $(FLAGS_PERL_EMBED)
+ override PYTHON := python
+ override PYTHON_CONFIG := python-config
+ escape-for-shell-sq =  $(subst ','\'',$(1))
+ shell-sq = '$(escape-for-shell-sq)'
+ PYTHON_CONFIG_SQ = $(call shell-sq,$(PYTHON_CONFIG))
+ PYTHON_EMBED_LDOPTS = $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+ PYTHON_EMBED_LDFLAGS = $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
+ PYTHON_EMBED_LIBADD = $(call grep-libs,$(PYTHON_EMBED_LDOPTS))
+ PYTHON_EMBED_CCOPTS = $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
+ FLAGS_PYTHON_EMBED = $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
+ test-libpython:
+       $(BUILD) $(FLAGS_PYTHON_EMBED)
+ test-libpython-version:
+       $(BUILD) $(FLAGS_PYTHON_EMBED)
+ test-libbfd:
+       $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ test-liberty:
+       $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
+ test-liberty-z:
+       $(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz
+ test-cplus-demangle:
+       $(BUILD) -liberty
+ test-on-exit:
+       $(BUILD)
+ test-backtrace:
+       $(BUILD)
+ -include *.d
+ ###############################
+ clean:
+       rm -f $(FILES) *.d
index 0000000000000000000000000000000000000000,50d431892a0ce28bd229e24e5053452651f8ff1b..ed8aa7b4aed8ba6fa1ff6392b97d52d98440ebef
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,106 +1,111 @@@
+ /*
+  * test-all.c: Try to build all the main testcases at once.
+  *
+  * A well-configured system will have all the prereqs installed, so we can speed
+  * up auto-detection on such systems.
+  */
+ /*
+  * Quirk: Python and Perl headers cannot be in arbitrary places, so keep
+  * these 3 testcases at the top:
+  */
+ #define main main_test_libpython
+ # include "test-libpython.c"
+ #undef main
+ #define main main_test_libpython_version
+ # include "test-libpython-version.c"
+ #undef main
+ #define main main_test_libperl
+ # include "test-libperl.c"
+ #undef main
+ #define main main_test_hello
+ # include "test-hello.c"
+ #undef main
+ #define main main_test_libelf
+ # include "test-libelf.c"
+ #undef main
+ #define main main_test_libelf_mmap
+ # include "test-libelf-mmap.c"
+ #undef main
+ #define main main_test_glibc
+ # include "test-glibc.c"
+ #undef main
+ #define main main_test_dwarf
+ # include "test-dwarf.c"
+ #undef main
+ #define main main_test_libelf_getphdrnum
+ # include "test-libelf-getphdrnum.c"
+ #undef main
+ #define main main_test_libunwind
+ # include "test-libunwind.c"
+ #undef main
++#define main main_test_libunwind_debug_frame
++# include "test-libunwind-debug-frame.c"
++#undef main
++
+ #define main main_test_libaudit
+ # include "test-libaudit.c"
+ #undef main
+ #define main main_test_libslang
+ # include "test-libslang.c"
+ #undef main
+ #define main main_test_gtk2
+ # include "test-gtk2.c"
+ #undef main
+ #define main main_test_gtk2_infobar
+ # include "test-gtk2-infobar.c"
+ #undef main
+ #define main main_test_libbfd
+ # include "test-libbfd.c"
+ #undef main
+ #define main main_test_on_exit
+ # include "test-on-exit.c"
+ #undef main
+ #define main main_test_backtrace
+ # include "test-backtrace.c"
+ #undef main
+ #define main main_test_libnuma
+ # include "test-libnuma.c"
+ #undef main
+ int main(int argc, char *argv[])
+ {
+       main_test_libpython();
+       main_test_libpython_version();
+       main_test_libperl();
+       main_test_hello();
+       main_test_libelf();
+       main_test_libelf_mmap();
+       main_test_glibc();
+       main_test_dwarf();
+       main_test_libelf_getphdrnum();
+       main_test_libunwind();
++      main_test_libunwind_debug_frame();
+       main_test_libaudit();
+       main_test_libslang();
+       main_test_gtk2(argc, argv);
+       main_test_gtk2_infobar(argc, argv);
+       main_test_libbfd();
+       main_test_on_exit();
+       main_test_backtrace();
+       main_test_libnuma();
+       return 0;
+ }
index 0000000000000000000000000000000000000000,0000000000000000000000000000000000000000..0ef8087a104a7c9623b85ac097c3b406e114d405
new file mode 100644 (file)
--- /dev/null
--- /dev/null
@@@ -1,0 -1,0 +1,16 @@@
++#include <libunwind.h>
++#include <stdlib.h>
++
++extern int
++UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
++                               unw_word_t ip, unw_word_t segbase,
++                               const char *obj_name, unw_word_t start,
++                               unw_word_t end);
++
++#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
++
++int main(void)
++{
++      dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
++      return 0;
++}