]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'irq/for-arm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
authorCatalin Marinas <catalin.marinas@arm.com>
Thu, 22 Oct 2015 16:30:08 +0000 (17:30 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 22 Oct 2015 16:31:10 +0000 (17:31 +0100)
This is an incremental fix for a patch previously pulled from tip
irq/for-arm.

* 'irq/for-arm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  genirq: Make the cpuhotplug migration code less noisy

378 files changed:
Documentation/Changes
Documentation/arm/uefi.txt
Documentation/arm64/booting.txt
Documentation/devicetree/bindings/arm/pmu.txt
Documentation/devicetree/bindings/input/cypress,cyapa.txt
Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt
Documentation/features/debug/KASAN/arch-support.txt
Documentation/input/multi-touch-protocol.txt
Documentation/power/pci.txt
Documentation/ptp/testptp.c
MAINTAINERS
Makefile
arch/arc/include/asm/Kbuild
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/Makefile
arch/arm64/boot/dts/arm/juno-r1.dts
arch/arm64/boot/dts/arm/juno.dts
arch/arm64/configs/defconfig
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/cpu.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/fixmap.h
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/hwcap.h
arch/arm64/include/asm/irq.h
arch/arm64/include/asm/kasan.h [new file with mode: 0644]
arch/arm64/include/asm/kernel-pgtable.h [new file with mode: 0644]
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/pmu.h [deleted file]
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/string.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/tlb.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/arm64ksyms.c
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/efi.c
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/image.h
arch/arm64/kernel/irq.c
arch/arm64/kernel/module.c
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/process.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/Kconfig
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_template.S [new file with mode: 0644]
arch/arm64/lib/copy_to_user.S
arch/arm64/lib/memchr.S
arch/arm64/lib/memcmp.S
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S
arch/arm64/lib/memset.S
arch/arm64/lib/strcmp.S
arch/arm64/lib/strlen.S
arch/arm64/lib/strncmp.S
arch/arm64/mm/Makefile
arch/arm64/mm/cache.S
arch/arm64/mm/context.c
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/kasan_init.c [new file with mode: 0644]
arch/arm64/mm/mmu.c
arch/arm64/mm/pgd.c
arch/arm64/mm/proc.S
arch/avr32/include/asm/Kbuild
arch/blackfin/include/asm/Kbuild
arch/c6x/include/asm/Kbuild
arch/cris/include/asm/Kbuild
arch/frv/include/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/ia64/include/asm/Kbuild
arch/m32r/include/asm/Kbuild
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/linkage.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/include/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/mips/ath79/irq.c
arch/mips/cavium-octeon/setup.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/maar.h
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/jz4740/board-qi_lb60.c
arch/mips/jz4740/gpio.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/smp.c
arch/mips/loongson64/common/env.c
arch/mips/mm/dma-default.c
arch/mips/mm/init.c
arch/mips/net/bpf_jit_asm.S
arch/mn10300/include/asm/Kbuild
arch/nios2/include/asm/Kbuild
arch/powerpc/include/asm/Kbuild
arch/s390/include/asm/Kbuild
arch/score/include/asm/Kbuild
arch/tile/gxio/mpipe.c
arch/tile/include/asm/Kbuild
arch/tile/kernel/usb.c
arch/um/include/asm/Kbuild
arch/unicore32/include/asm/Kbuild
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/pvclock-abi.h
arch/x86/include/uapi/asm/bitsperlong.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_msr.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/crash.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi.c
arch/xtensa/include/asm/Kbuild
block/blk-mq-cpumap.c
block/blk-mq-sysfs.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-mq.h
crypto/asymmetric_keys/x509_public_key.c
drivers/acpi/ec.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/base/power/opp.c
drivers/block/loop.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/clocksource/rockchip_timer.c
drivers/clocksource/timer-keystone.c
drivers/dma/at_xdmac.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/idma64.c
drivers/dma/pxa_dma.c
drivers/dma/sun4i-dma.c
drivers/dma/xgene-dma.c
drivers/dma/zx296702_dma.c
drivers/firmware/efi/Makefile
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/libstub/string.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/include/cgs_linux.h
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
drivers/hwmon/abx500.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/pwm-fan.c
drivers/idle/intel_idle.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/input/joystick/Kconfig
drivers/input/joystick/walkera0701.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/misc/pm8941-pwrkey.c
drivers/input/misc/uinput.c
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_i2c.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/synaptics.c
drivers/input/serio/libps2.c
drivers/input/serio/parkbd.c
drivers/input/touchscreen/imx6ul_tsc.c
drivers/input/touchscreen/mms114.c
drivers/iommu/Kconfig
drivers/iommu/intel-iommu.c
drivers/iommu/iova.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mips-gic.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/ubi/io.c
drivers/mtd/ubi/vtbl.c
drivers/mtd/ubi/wl.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/ibm/emac/core.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/realtek/r8169.c
drivers/pci/pci-driver.c
drivers/perf/Kconfig
drivers/scsi/scsi_lib.c
drivers/thermal/power_allocator.c
drivers/watchdog/Kconfig
drivers/watchdog/bcm2835_wdt.c
drivers/watchdog/gef_wdt.c
drivers/watchdog/mena21_wdt.c
drivers/watchdog/moxart_wdt.c
fs/dax.c
fs/ubifs/xattr.c
include/asm-generic/word-at-a-time.h
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/drm/drm_dp_mst_helper.h
include/linux/acpi.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/iova.h
include/linux/memcontrol.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/rcupdate.h
include/linux/skbuff.h
include/linux/string.h
include/net/af_unix.h
include/uapi/linux/userfaultfd.h
ipc/msg.c
ipc/shm.c
ipc/util.c
kernel/events/core.c
kernel/irq/proc.c
kernel/locking/lockdep.c
kernel/rcu/tree.c
kernel/sched/core.c
kernel/time/clocksource.c
lib/string.c
mm/dmapool.c
mm/hugetlb.c
mm/memcontrol.c
mm/migrate.c
mm/slab.c
net/core/net-sysfs.c
net/core/skbuff.c
net/dsa/slave.c
net/ipv4/fib_frontend.c
net/ipv4/route.c
net/ipv6/route.c
net/l2tp/l2tp_core.c
net/sctp/associola.c
net/sctp/sm_sideeffect.c
net/sunrpc/xprtrdma/fmr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/physical_ops.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/unix/af_unix.c
samples/kprobes/jprobe_example.c
samples/kprobes/kprobe_example.c
samples/kprobes/kretprobe_example.c
scripts/Makefile.kasan
scripts/extract-cert.c
scripts/sign-file.c
security/keys/gc.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-get_cpuid.c [new file with mode: 0644]
tools/build/feature/test-numa_num_possible_cpus.c [new file with mode: 0644]
tools/lib/traceevent/event-parse.c
tools/perf/Documentation/intel-pt.txt
tools/perf/config/Makefile
tools/perf/util/probe-event.c
tools/perf/util/session.c
tools/perf/util/stat.c
tools/perf/util/symbol-elf.c
tools/perf/util/util.c
tools/power/x86/turbostat/turbostat.c

index 6d886300485827846541744075f7919be3d16200..f447f0516f074c700b0c78ca87fcfcf4595ea49f 100644 (file)
@@ -43,7 +43,7 @@ o  udev                   081                     # udevd --version
 o  grub                   0.93                    # grub --version || grub-install --version
 o  mcelog                 0.6                     # mcelog --version
 o  iptables               1.4.2                   # iptables -V
-o  openssl & libcrypto    1.0.1k                  # openssl version
+o  openssl & libcrypto    1.0.                  # openssl version
 
 
 Kernel compilation
index d60030a1b909bc68dbe7e6d275064cafbe3fdf4e..7f1bed8872f3d73361f143d8131ff5d29fedb848 100644 (file)
@@ -58,7 +58,5 @@ linux,uefi-mmap-desc-size | 32-bit | Size in bytes of each entry in the UEFI
 --------------------------------------------------------------------------------
 linux,uefi-mmap-desc-ver  | 32-bit | Version of the mmap descriptor format.
 --------------------------------------------------------------------------------
-linux,uefi-stub-kern-ver  | string | Copy of linux_banner from build.
---------------------------------------------------------------------------------
 
 For verbose debug messages, specify 'uefi_debug' on the kernel command line.
index 7d9d3c2286b282d96f03cd1545be5780e998002e..aaf6d77e4148d63b6ffa514334f9dfd8f0a9c4ac 100644 (file)
@@ -104,7 +104,12 @@ Header notes:
 - The flags field (introduced in v3.17) is a little-endian 64-bit field
   composed as follows:
   Bit 0:       Kernel endianness.  1 if BE, 0 if LE.
-  Bits 1-63:   Reserved.
+  Bit 1-2:     Kernel Page size.
+                       0 - Unspecified.
+                       1 - 4K
+                       2 - 16K
+                       3 - 64K
+  Bits 3-63:   Reserved.
 
 - When image_size is zero, a bootloader should attempt to keep as much
   memory as possible free for use by the kernel immediately after the
index 435251fa9ce0d4150547bd45967d2a2577e0b398..4b7c3d9b29bbf33327a832f2712f4f7bd1a101b7 100644 (file)
@@ -8,6 +8,8 @@ Required properties:
 
 - compatible : should be one of
        "arm,armv8-pmuv3"
+       "arm.cortex-a57-pmu"
+       "arm.cortex-a53-pmu"
        "arm,cortex-a17-pmu"
        "arm,cortex-a15-pmu"
        "arm,cortex-a12-pmu"
index 635a3b03663002d8f4eb47b25fd504d2a7ebca93..8d91ba9ff2fd0918bbf18fc1149f2fb3613bf80e 100644 (file)
@@ -25,7 +25,7 @@ Example:
                /* Cypress Gen3 touchpad */
                touchpad@67 {
                        compatible = "cypress,cyapa";
-                       reg = <0x24>;
+                       reg = <0x67>;
                        interrupt-parent = <&gpio>;
                        interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */
                        wakeup-source;
index 391717a68f3b1dffe100762775a4bd0368184a83..ec96b1f0147886102554c16e3bd260dc3f425619 100644 (file)
@@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority
 interrupt.
 
 Required Properties:
-- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc"
-  as fallback
+- compatible: has to be "qca,<soctype>-cpu-intc", "qca,ar7100-misc-intc" or
+  "qca,<soctype>-cpu-intc", "qca,ar7240-misc-intc"
 - reg: Base address and size of the controllers memory area
 - interrupt-parent: phandle of the parent interrupt controller.
 - interrupts: Interrupt specifier for the controllers interrupt.
@@ -13,6 +13,9 @@ Required Properties:
 - #interrupt-cells : Specifies the number of cells needed to encode interrupt
                     source, should be 1
 
+Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x,
+use ar7240 for all other SoCs.
+
 Please refer to interrupts.txt in this directory for details of the common
 Interrupt Controllers bindings used by client devices.
 
@@ -28,3 +31,16 @@ Example:
                interrupt-controller;
                #interrupt-cells = <1>;
        };
+
+Another example:
+
+       interrupt-controller@18060010 {
+               compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc";
+               reg = <0x18060010 0x4>;
+
+               interrupt-parent = <&cpuintc>;
+               interrupts = <6>;
+
+               interrupt-controller;
+               #interrupt-cells = <1>;
+       };
index 14531da2fb54eb1761095373bd62583557310163..703f5784bc90d9647d015c086330b59a48b4631d 100644 (file)
@@ -9,7 +9,7 @@
     |       alpha: | TODO |
     |         arc: | TODO |
     |         arm: | TODO |
-    |       arm64: | TODO |
+    |       arm64: |  ok  |
     |       avr32: | TODO |
     |    blackfin: | TODO |
     |         c6x: | TODO |
index b85d000faeb4067c9ab1ed06690105d459a40a4e..c51f1146f3bd8396f572cddb9c87ae2620d236ba 100644 (file)
@@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is
    ABS_MT_POSITION_X := T_X
    ABS_MT_POSITION_Y := T_Y
    ABS_MT_TOOL_X := C_X
-   ABS_MT_TOOL_X := C_Y
+   ABS_MT_TOOL_Y := C_Y
 
 Unfortunately, there is not enough information to specify both the touching
 ellipse and the tool ellipse, so one has to resort to approximations.  One
index 62328d76b55bd9cfc59294666b49a70e0ddca5da..b0e911e0e8f50ad749686961e494377a1a49db45 100644 (file)
@@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned
 (alternatively, the runtime_suspend() callback will have to check if the
 device should really be suspended and return -EAGAIN if that is not the case).
 
-The runtime PM of PCI devices is disabled by default.  It is also blocked by
-pci_pm_init() that runs the pm_runtime_forbid() helper function.  If a PCI
-driver implements the runtime PM callbacks and intends to use the runtime PM
-framework provided by the PM core and the PCI subsystem, it should enable this
-feature by executing the pm_runtime_enable() helper function.  However, the
-driver should not call the pm_runtime_allow() helper function unblocking
-the runtime PM of the device.  Instead, it should allow user space or some
-platform-specific code to do that (user space can do it via sysfs), although
-once it has called pm_runtime_enable(), it must be prepared to handle the
+The runtime PM of PCI devices is enabled by default by the PCI core.  PCI
+device drivers do not need to enable it and should not attempt to do so.
+However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid()
+helper function.  In addition to that, the runtime PM usage counter of
+each PCI device is incremented by local_pci_probe() before executing the
+probe callback provided by the device's driver.
+
+If a PCI driver implements the runtime PM callbacks and intends to use the
+runtime PM framework provided by the PM core and the PCI subsystem, it needs
+to decrement the device's runtime PM usage counter in its probe callback
+function.  If it doesn't do that, the counter will always be different from
+zero for the device and it will never be runtime-suspended.  The simplest
+way to do that is by calling pm_runtime_put_noidle(), but if the driver
+wants to schedule an autosuspend right away, for example, it may call
+pm_runtime_put_autosuspend() instead for this purpose.  Generally, it
+just needs to call a function that decrements the devices usage counter
+from its probe routine to make runtime PM work for the device.
+
+It is important to remember that the driver's runtime_suspend() callback
+may be executed right after the usage counter has been decremented, because
+user space may already have cuased the pm_runtime_allow() helper function
+unblocking the runtime PM of the device to run via sysfs, so the driver must
+be prepared to cope with that.
+
+The driver itself should not call pm_runtime_allow(), though.  Instead, it
+should let user space or some platform-specific code do that (user space can
+do it via sysfs as stated above), but it must be prepared to handle the
 runtime PM of the device correctly as soon as pm_runtime_allow() is called
-(which may happen at any time).  [It also is possible that user space causes
-pm_runtime_allow() to be called via sysfs before the driver is loaded, so in
-fact the driver has to be prepared to handle the runtime PM of the device as
-soon as it calls pm_runtime_enable().]
+(which may happen at any time, even before the driver is loaded).
+
+When the driver's remove callback runs, it has to balance the decrementation
+of the device's runtime PM usage counter at the probe time.  For this reason,
+if it has decremented the counter in its probe callback, it must run
+pm_runtime_get_noresume() in its remove callback.  [Since the core carries
+out a runtime resume of the device and bumps up the device's usage counter
+before running the driver's remove callback, the runtime PM of the device
+is effectively disabled for the duration of the remove execution and all
+runtime PM helper functions incrementing the device's usage counter are
+then effectively equivalent to pm_runtime_get_noresume().]
 
 The runtime PM framework works by processing requests to suspend or resume
 devices, or to check if they are idle (in which cases it is reasonable to
index 2bc8abc57fa04c1a4e47bb2d2b8626f8209b24a9..6c6247aaa7b93a0a038e5f65b30679486b410cc2 100644 (file)
@@ -18,6 +18,7 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__        /* For PPC64, to get LL64 types */
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
index 9f6685f6c5a97062e7d44d04493afe518fa27ab8..8addb19c299bd678dc2b4a5108d1bdd3bc1260b3 100644 (file)
@@ -822,12 +822,13 @@ F:        arch/arm/include/asm/floppy.h
 
 ARM PMU PROFILING AND DEBUGGING
 M:     Will Deacon <will.deacon@arm.com>
+R:     Mark Rutland <mark.rutland@arm.com>
 S:     Maintained
-F:     arch/arm/kernel/perf_*
+F:     arch/arm*/kernel/perf_*
 F:     arch/arm/oprofile/common.c
-F:     arch/arm/kernel/hw_breakpoint.c
-F:     arch/arm/include/asm/hw_breakpoint.h
-F:     arch/arm/include/asm/perf_event.h
+F:     arch/arm*/kernel/hw_breakpoint.c
+F:     arch/arm*/include/asm/hw_breakpoint.h
+F:     arch/arm*/include/asm/perf_event.h
 F:     drivers/perf/arm_pmu.c
 F:     include/linux/perf/arm_pmu.h
 
@@ -5957,7 +5958,7 @@ F:        virt/kvm/
 KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
 M:     Joerg Roedel <joro@8bytes.org>
 L:     kvm@vger.kernel.org
-W:     http://kvm.qumranet.com
+W:     http://www.linux-kvm.org/
 S:     Maintained
 F:     arch/x86/include/asm/svm.h
 F:     arch/x86/kvm/svm.c
@@ -5965,7 +5966,7 @@ F:        arch/x86/kvm/svm.c
 KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
 M:     Alexander Graf <agraf@suse.com>
 L:     kvm-ppc@vger.kernel.org
-W:     http://kvm.qumranet.com
+W:     http://www.linux-kvm.org/
 T:     git git://github.com/agraf/linux-2.6.git
 S:     Supported
 F:     arch/powerpc/include/asm/kvm*
index 1d341eba143d38f0b9e7bd7669c6357f477d94b4..fd46821e428d255581680130f4292447b974201c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index 7611b10a2d238c7b4bb73696b59e2fe8b14ceb2c..0b10ef2a43726e0188b61de96bd5a5bf1ba50067 100644 (file)
@@ -48,4 +48,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 07d1811aa03fcd1ecd5ee7c260688a59f7ab97e4..c62fb0393567d183ecef4ebf09bbf31dbd9835b1 100644 (file)
@@ -48,6 +48,7 @@ config ARM64
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
        select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
@@ -169,10 +170,12 @@ config FIX_EARLYCON_MEM
 
 config PGTABLE_LEVELS
        int
+       default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
        default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
        default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
        default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
-       default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
+       default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
+       default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
 
 source "init/Kconfig"
 
@@ -362,25 +365,37 @@ config ARM64_4K_PAGES
        help
          This feature enables 4KB pages support.
 
+config ARM64_16K_PAGES
+       bool "16KB"
+       help
+         The system will use 16KB pages support. AArch32 emulation
+         requires applications compiled with 16K (or a multiple of 16K)
+         aligned segments.
+
 config ARM64_64K_PAGES
        bool "64KB"
        help
          This feature enables 64KB pages support (4KB by default)
          allowing only two levels of page tables and faster TLB
-         look-up. AArch32 emulation is not available when this feature
-         is enabled.
+         look-up. AArch32 emulation requires applications compiled
+         with 64K aligned segments.
 
 endchoice
 
 choice
        prompt "Virtual address space size"
        default ARM64_VA_BITS_39 if ARM64_4K_PAGES
+       default ARM64_VA_BITS_47 if ARM64_16K_PAGES
        default ARM64_VA_BITS_42 if ARM64_64K_PAGES
        help
          Allows choosing one of multiple possible virtual address
          space sizes. The level of translation table is determined by
          a combination of page size and virtual address space size.
 
+config ARM64_VA_BITS_36
+       bool "36-bit" if EXPERT
+       depends on ARM64_16K_PAGES
+
 config ARM64_VA_BITS_39
        bool "39-bit"
        depends on ARM64_4K_PAGES
@@ -389,6 +404,10 @@ config ARM64_VA_BITS_42
        bool "42-bit"
        depends on ARM64_64K_PAGES
 
+config ARM64_VA_BITS_47
+       bool "47-bit"
+       depends on ARM64_16K_PAGES
+
 config ARM64_VA_BITS_48
        bool "48-bit"
 
@@ -396,8 +415,10 @@ endchoice
 
 config ARM64_VA_BITS
        int
+       default 36 if ARM64_VA_BITS_36
        default 39 if ARM64_VA_BITS_39
        default 42 if ARM64_VA_BITS_42
+       default 47 if ARM64_VA_BITS_47
        default 48 if ARM64_VA_BITS_48
 
 config CPU_BIG_ENDIAN
@@ -427,6 +448,7 @@ config NR_CPUS
 
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
+       select GENERIC_IRQ_MIGRATION
        help
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu.
@@ -454,12 +476,8 @@ config HAVE_ARCH_PFN_VALID
        def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
 
 config HW_PERF_EVENTS
-       bool "Enable hardware performance counter support for perf events"
-       depends on PERF_EVENTS
-       default y
-       help
-         Enable hardware performance counter support for perf events. If
-         disabled, perf events will use software events only.
+       def_bool y
+       depends on ARM_PMU
 
 config SYS_SUPPORTS_HUGETLBFS
        def_bool y
@@ -468,7 +486,7 @@ config ARCH_WANT_GENERAL_HUGETLB
        def_bool y
 
 config ARCH_WANT_HUGE_PMD_SHARE
-       def_bool y if !ARM64_64K_PAGES
+       def_bool y if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
 
 config HAVE_ARCH_TRANSPARENT_HUGEPAGE
        def_bool y
@@ -505,7 +523,25 @@ config XEN
 config FORCE_MAX_ZONEORDER
        int
        default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
+       default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
        default "11"
+       help
+         The kernel memory allocator divides physically contiguous memory
+         blocks into "zones", where each zone is a power of two number of
+         pages.  This option selects the largest power of two that the kernel
+         keeps in the memory allocator.  If you need to allocate very large
+         blocks of physically contiguous memory, then you may need to
+         increase this value.
+
+         This config option is actually maximum order plus one. For example,
+         a value of 11 means that the largest free memory block is 2^10 pages.
+
+         We make sure that we can allocate upto a HugePage size for each configuration.
+         Hence we have :
+               MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2
+
+         However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
+         4M allocations matching the default size used by generic code.
 
 menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
@@ -680,7 +716,7 @@ source "fs/Kconfig.binfmt"
 
 config COMPAT
        bool "Kernel support for 32-bit EL0"
-       depends on !ARM64_64K_PAGES || EXPERT
+       depends on ARM64_4K_PAGES || EXPERT
        select COMPAT_BINFMT_ELF
        select HAVE_UID16
        select OLD_SIGSUSPEND3
@@ -691,9 +727,9 @@ config COMPAT
          the user helper functions, VFP support and the ptrace interface are
          handled appropriately by the kernel.
 
-         If you also enabled CONFIG_ARM64_64K_PAGES, please be aware that you
-         will only be able to execute AArch32 binaries that were compiled with
-         64k aligned segments.
+         If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
+         that you will only be able to execute AArch32 binaries that were compiled
+         with page size aligned segments.
 
          If you want to execute 32-bit userspace applications, say Y.
 
index d6285ef9b5f976639630606b04d7244a05ac6130..c24d6adc0420c78bb0efbf747101c87c0be9e81a 100644 (file)
@@ -77,7 +77,7 @@ config DEBUG_RODATA
           If in doubt, say Y
 
 config DEBUG_ALIGN_RODATA
-       depends on DEBUG_RODATA && !ARM64_64K_PAGES
+       depends on DEBUG_RODATA && ARM64_4K_PAGES
        bool "Align linker sections up to SECTION_SIZE"
        help
          If this option is enabled, sections that may potentially be marked as
index f9914d7c1bb00b5c4cbe7a19c0f62c8eca54cf81..f41c6761ec36017224a99bf0fd89a4d354a2c685 100644 (file)
@@ -55,6 +55,13 @@ else
 TEXT_OFFSET := 0x00080000
 endif
 
+# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - 3)) - (1 << 61)
+# in 32-bit arithmetic
+KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
+                       (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+                       + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - 3)) \
+                       - (1 << (64 - 32 - 3)) )) )
+
 export TEXT_OFFSET GZFLAGS
 
 core-y         += arch/arm64/kernel/ arch/arm64/mm/
index c62751153a4fc528202b4d645fc930f851aa2bc1..734e1272b19f526957e37eebd463a3af890816b7 100644 (file)
                };
        };
 
-       pmu {
-               compatible = "arm,armv8-pmuv3";
+       pmu_a57 {
+               compatible = "arm,cortex-a57-pmu";
                interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-affinity = <&A57_0>,
+                                    <&A57_1>;
+       };
+
+       pmu_a53 {
+               compatible = "arm,cortex-a53-pmu";
+               interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-affinity = <&A57_0>,
-                                    <&A57_1>,
-                                    <&A53_0>,
+               interrupt-affinity = <&A53_0>,
                                     <&A53_1>,
                                     <&A53_2>,
                                     <&A53_3>;
index d7cbdd482a61d231cbbfcff772dcf48e51e92bb2..ffa05aeab3c72158630f9831fa2764f3dd900cc5 100644 (file)
                };
        };
 
-       pmu {
-               compatible = "arm,armv8-pmuv3";
+       pmu_a57 {
+               compatible = "arm,cortex-a57-pmu";
                interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 06 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-affinity = <&A57_0>,
+                                    <&A57_1>;
+       };
+
+       pmu_a53 {
+               compatible = "arm,cortex-a53-pmu";
+               interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-affinity = <&A57_0>,
-                                    <&A57_1>,
-                                    <&A53_0>,
+               interrupt-affinity = <&A53_0>,
                                     <&A53_1>,
                                     <&A53_2>,
                                     <&A53_3>;
index 34d71dd867811af658d782520f2f688db9066c69..c8ccda16e079979555f6ead6690e404246afc493 100644 (file)
@@ -109,6 +109,10 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_UARTS_4=y
+CONFIG_SERIAL_SAMSUNG_UARTS=4
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
@@ -145,6 +149,10 @@ CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_IDMAC=y
+CONFIG_MMC_DW_PLTFM=y
+CONFIG_MMC_DW_EXYNOS=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_SYSCON=y
index b51f2cc22ca99731f2fa5efdced173f0a50b6c92..12eff928ef8b38dd18ae3bd157b12eb918f797a6 100644 (file)
@@ -193,4 +193,15 @@ lr .req    x30             // link register
        str     \src, [\tmp, :lo12:\sym]
        .endm
 
+/*
+ * Annotate a function as position independent, i.e., safe to be called before
+ * the kernel virtual mapping is activated.
+ */
+#define ENDPIPROC(x)                   \
+       .globl  __pi_##x;               \
+       .type   __pi_##x, %function;    \
+       .set    __pi_##x, x;            \
+       .size   __pi_##x, . - x;        \
+       ENDPROC(x)
+
 #endif /* __ASM_ASSEMBLER_H */
index 35a67783cfa088d4166de9ff7ed6f993b899c09b..5e13ad76a2493ad1458943338cade910e77d979d 100644 (file)
 
 #define atomic_read(v)                 READ_ONCE((v)->counter)
 #define atomic_set(v, i)               (((v)->counter) = (i))
+
+#define atomic_add_return_relaxed      atomic_add_return_relaxed
+#define atomic_add_return_acquire      atomic_add_return_acquire
+#define atomic_add_return_release      atomic_add_return_release
+#define atomic_add_return              atomic_add_return
+
+#define atomic_inc_return_relaxed(v)   atomic_add_return_relaxed(1, (v))
+#define atomic_inc_return_acquire(v)   atomic_add_return_acquire(1, (v))
+#define atomic_inc_return_release(v)   atomic_add_return_release(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+
+#define atomic_sub_return_relaxed      atomic_sub_return_relaxed
+#define atomic_sub_return_acquire      atomic_sub_return_acquire
+#define atomic_sub_return_release      atomic_sub_return_release
+#define atomic_sub_return              atomic_sub_return
+
+#define atomic_dec_return_relaxed(v)   atomic_sub_return_relaxed(1, (v))
+#define atomic_dec_return_acquire(v)   atomic_sub_return_acquire(1, (v))
+#define atomic_dec_return_release(v)   atomic_sub_return_release(1, (v))
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+
+#define atomic_xchg_relaxed(v, new)    xchg_relaxed(&((v)->counter), (new))
+#define atomic_xchg_acquire(v, new)    xchg_acquire(&((v)->counter), (new))
+#define atomic_xchg_release(v, new)    xchg_release(&((v)->counter), (new))
 #define atomic_xchg(v, new)            xchg(&((v)->counter), (new))
+
+#define atomic_cmpxchg_relaxed(v, old, new)                            \
+       cmpxchg_relaxed(&((v)->counter), (old), (new))
+#define atomic_cmpxchg_acquire(v, old, new)                            \
+       cmpxchg_acquire(&((v)->counter), (old), (new))
+#define atomic_cmpxchg_release(v, old, new)                            \
+       cmpxchg_release(&((v)->counter), (old), (new))
 #define atomic_cmpxchg(v, old, new)    cmpxchg(&((v)->counter), (old), (new))
 
 #define atomic_inc(v)                  atomic_add(1, (v))
 #define atomic_dec(v)                  atomic_sub(1, (v))
-#define atomic_inc_return(v)           atomic_add_return(1, (v))
-#define atomic_dec_return(v)           atomic_sub_return(1, (v))
 #define atomic_inc_and_test(v)         (atomic_inc_return(v) == 0)
 #define atomic_dec_and_test(v)         (atomic_dec_return(v) == 0)
 #define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
 #define ATOMIC64_INIT                  ATOMIC_INIT
 #define atomic64_read                  atomic_read
 #define atomic64_set                   atomic_set
+
+#define atomic64_add_return_relaxed    atomic64_add_return_relaxed
+#define atomic64_add_return_acquire    atomic64_add_return_acquire
+#define atomic64_add_return_release    atomic64_add_return_release
+#define atomic64_add_return            atomic64_add_return
+
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
+#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
+#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
+
+#define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
+#define atomic64_sub_return_acquire    atomic64_sub_return_acquire
+#define atomic64_sub_return_release    atomic64_sub_return_release
+#define atomic64_sub_return            atomic64_sub_return
+
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
+#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
+#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
+#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
+
+#define atomic64_xchg_relaxed          atomic_xchg_relaxed
+#define atomic64_xchg_acquire          atomic_xchg_acquire
+#define atomic64_xchg_release          atomic_xchg_release
 #define atomic64_xchg                  atomic_xchg
+
+#define atomic64_cmpxchg_relaxed       atomic_cmpxchg_relaxed
+#define atomic64_cmpxchg_acquire       atomic_cmpxchg_acquire
+#define atomic64_cmpxchg_release       atomic_cmpxchg_release
 #define atomic64_cmpxchg               atomic_cmpxchg
 
 #define atomic64_inc(v)                        atomic64_add(1, (v))
 #define atomic64_dec(v)                        atomic64_sub(1, (v))
-#define atomic64_inc_return(v)         atomic64_add_return(1, (v))
-#define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
 #define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return(v) == 0)
 #define atomic64_sub_and_test(i, v)    (atomic64_sub_return((i), (v)) == 0)
index b3b5c4ae3800b061d5ad2fdd911aea677feedc32..74d0b8eb0799cb6635b999f7afb1c7e7c0a361bf 100644 (file)
@@ -55,40 +55,47 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                             \
 }                                                                      \
 __LL_SC_EXPORT(atomic_##op);
 
-#define ATOMIC_OP_RETURN(op, asm_op)                                   \
+#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)           \
 __LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v))               \
+__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))         \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
                                                                        \
-       asm volatile("// atomic_" #op "_return\n"                       \
+       asm volatile("// atomic_" #op "_return" #name "\n"              \
 "      prfm    pstl1strm, %2\n"                                        \
-"1:    ldxr    %w0, %2\n"                                              \
+"1:    ld" #acq "xr    %w0, %2\n"                                      \
 "      " #asm_op "     %w0, %w0, %w3\n"                                \
-"      stlxr   %w1, %w0, %2\n"                                         \
-"      cbnz    %w1, 1b"                                                \
+"      st" #rel "xr    %w1, %w0, %2\n"                                 \
+"      cbnz    %w1, 1b\n"                                              \
+"      " #mb                                                           \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i)                                                      \
-       : "memory");                                                    \
+       : cl);                                                          \
                                                                        \
-       smp_mb();                                                       \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic_##op##_return);
+__LL_SC_EXPORT(atomic_##op##_return##name);
+
+#define ATOMIC_OPS(...)                                                        \
+       ATOMIC_OP(__VA_ARGS__)                                          \
+       ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)
 
-#define ATOMIC_OPS(op, asm_op)                                         \
-       ATOMIC_OP(op, asm_op)                                           \
-       ATOMIC_OP_RETURN(op, asm_op)
+#define ATOMIC_OPS_RLX(...)                                            \
+       ATOMIC_OPS(__VA_ARGS__)                                         \
+       ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
+       ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
+       ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)
 
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
+ATOMIC_OPS_RLX(add, add)
+ATOMIC_OPS_RLX(sub, sub)
 
 ATOMIC_OP(and, and)
 ATOMIC_OP(andnot, bic)
 ATOMIC_OP(or, orr)
 ATOMIC_OP(xor, eor)
 
+#undef ATOMIC_OPS_RLX
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -111,40 +118,47 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                      \
 }                                                                      \
 __LL_SC_EXPORT(atomic64_##op);
 
-#define ATOMIC64_OP_RETURN(op, asm_op)                                 \
+#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)         \
 __LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v))          \
+__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))    \
 {                                                                      \
        long result;                                                    \
        unsigned long tmp;                                              \
                                                                        \
-       asm volatile("// atomic64_" #op "_return\n"                     \
+       asm volatile("// atomic64_" #op "_return" #name "\n"            \
 "      prfm    pstl1strm, %2\n"                                        \
-"1:    ldxr    %0, %2\n"                                               \
+"1:    ld" #acq "xr    %0, %2\n"                                       \
 "      " #asm_op "     %0, %0, %3\n"                                   \
-"      stlxr   %w1, %0, %2\n"                                          \
-"      cbnz    %w1, 1b"                                                \
+"      st" #rel "xr    %w1, %0, %2\n"                                  \
+"      cbnz    %w1, 1b\n"                                              \
+"      " #mb                                                           \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i)                                                      \
-       : "memory");                                                    \
+       : cl);                                                          \
                                                                        \
-       smp_mb();                                                       \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_##op##_return);
+__LL_SC_EXPORT(atomic64_##op##_return##name);
+
+#define ATOMIC64_OPS(...)                                              \
+       ATOMIC64_OP(__VA_ARGS__)                                        \
+       ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)
 
-#define ATOMIC64_OPS(op, asm_op)                                       \
-       ATOMIC64_OP(op, asm_op)                                         \
-       ATOMIC64_OP_RETURN(op, asm_op)
+#define ATOMIC64_OPS_RLX(...)                                          \
+       ATOMIC64_OPS(__VA_ARGS__)                                       \
+       ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
+       ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
+       ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)
 
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
+ATOMIC64_OPS_RLX(add, add)
+ATOMIC64_OPS_RLX(sub, sub)
 
 ATOMIC64_OP(and, and)
 ATOMIC64_OP(andnot, bic)
 ATOMIC64_OP(or, orr)
 ATOMIC64_OP(xor, eor)
 
+#undef ATOMIC64_OPS_RLX
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
@@ -172,7 +186,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
 }
 __LL_SC_EXPORT(atomic64_dec_if_positive);
 
-#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl)                       \
+#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)                  \
 __LL_SC_INLINE unsigned long                                           \
 __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,               \
                                     unsigned long old,                 \
@@ -182,7 +196,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,            \
                                                                        \
        asm volatile(                                                   \
        "       prfm    pstl1strm, %[v]\n"                              \
-       "1:     ldxr" #sz "\t%" #w "[oldval], %[v]\n"                   \
+       "1:     ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n"           \
        "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
        "       cbnz    %" #w "[tmp], 2f\n"                             \
        "       st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"     \
@@ -199,14 +213,22 @@ __LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,          \
 }                                                                      \
 __LL_SC_EXPORT(__cmpxchg_case_##name);
 
-__CMPXCHG_CASE(w, b,    1,        ,  ,         )
-__CMPXCHG_CASE(w, h,    2,        ,  ,         )
-__CMPXCHG_CASE(w,  ,    4,        ,  ,         )
-__CMPXCHG_CASE( ,  ,    8,        ,  ,         )
-__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
-__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
-__CMPXCHG_CASE(w,  , mb_4, dmb ish, l, "memory")
-__CMPXCHG_CASE( ,  , mb_8, dmb ish, l, "memory")
+__CMPXCHG_CASE(w, b,     1,        ,  ,  ,         )
+__CMPXCHG_CASE(w, h,     2,        ,  ,  ,         )
+__CMPXCHG_CASE(w,  ,     4,        ,  ,  ,         )
+__CMPXCHG_CASE( ,  ,     8,        ,  ,  ,         )
+__CMPXCHG_CASE(w, b, acq_1,        , a,  , "memory")
+__CMPXCHG_CASE(w, h, acq_2,        , a,  , "memory")
+__CMPXCHG_CASE(w,  , acq_4,        , a,  , "memory")
+__CMPXCHG_CASE( ,  , acq_8,        , a,  , "memory")
+__CMPXCHG_CASE(w, b, rel_1,        ,  , l, "memory")
+__CMPXCHG_CASE(w, h, rel_2,        ,  , l, "memory")
+__CMPXCHG_CASE(w,  , rel_4,        ,  , l, "memory")
+__CMPXCHG_CASE( ,  , rel_8,        ,  , l, "memory")
+__CMPXCHG_CASE(w, b,  mb_1, dmb ish,  , l, "memory")
+__CMPXCHG_CASE(w, h,  mb_2, dmb ish,  , l, "memory")
+__CMPXCHG_CASE(w,  ,  mb_4, dmb ish,  , l, "memory")
+__CMPXCHG_CASE( ,  ,  mb_8, dmb ish,  , l, "memory")
 
 #undef __CMPXCHG_CASE
 
index 55d740e634596363f6cbfbdeacd77113a00a486e..1fce7908e6904a43791a385b5df76ef080ebefa2 100644 (file)
@@ -75,24 +75,32 @@ static inline void atomic_add(int i, atomic_t *v)
        : "x30");
 }
 
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
+#define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                          \
+static inline int atomic_add_return##name(int i, atomic_t *v)          \
+{                                                                      \
+       register int w0 asm ("w0") = i;                                 \
+       register atomic_t *x1 asm ("x1") = v;                           \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC(add_return##name),                               \
+       /* LSE atomics */                                               \
+       "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
+       "       add     %w[i], %w[i], w30")                             \
+       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : "x30" , ##cl);                                                \
+                                                                       \
+       return w0;                                                      \
+}
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "       nop\n"
-       __LL_SC_ATOMIC(add_return),
-       /* LSE atomics */
-       "       ldaddal %w[i], w30, %[v]\n"
-       "       add     %w[i], %w[i], w30")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : "x30", "memory");
+ATOMIC_OP_ADD_RETURN(_relaxed,   )
+ATOMIC_OP_ADD_RETURN(_acquire,  a, "memory")
+ATOMIC_OP_ADD_RETURN(_release,  l, "memory")
+ATOMIC_OP_ADD_RETURN(        , al, "memory")
 
-       return w0;
-}
+#undef ATOMIC_OP_ADD_RETURN
 
 static inline void atomic_and(int i, atomic_t *v)
 {
@@ -128,27 +136,34 @@ static inline void atomic_sub(int i, atomic_t *v)
        : "x30");
 }
 
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "       nop\n"
-       __LL_SC_ATOMIC(sub_return)
-       "       nop",
-       /* LSE atomics */
-       "       neg     %w[i], %w[i]\n"
-       "       ldaddal %w[i], w30, %[v]\n"
-       "       add     %w[i], %w[i], w30")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : "x30", "memory");
-
-       return w0;
+#define ATOMIC_OP_SUB_RETURN(name, mb, cl...)                          \
+static inline int atomic_sub_return##name(int i, atomic_t *v)          \
+{                                                                      \
+       register int w0 asm ("w0") = i;                                 \
+       register atomic_t *x1 asm ("x1") = v;                           \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC(sub_return##name)                                \
+       "       nop",                                                   \
+       /* LSE atomics */                                               \
+       "       neg     %w[i], %w[i]\n"                                 \
+       "       ldadd" #mb "    %w[i], w30, %[v]\n"                     \
+       "       add     %w[i], %w[i], w30")                             \
+       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : "x30" , ##cl);                                                \
+                                                                       \
+       return w0;                                                      \
 }
 
+ATOMIC_OP_SUB_RETURN(_relaxed,   )
+ATOMIC_OP_SUB_RETURN(_acquire,  a, "memory")
+ATOMIC_OP_SUB_RETURN(_release,  l, "memory")
+ATOMIC_OP_SUB_RETURN(        , al, "memory")
+
+#undef ATOMIC_OP_SUB_RETURN
 #undef __LL_SC_ATOMIC
 
 #define __LL_SC_ATOMIC64(op)   __LL_SC_CALL(atomic64_##op)
@@ -201,24 +216,32 @@ static inline void atomic64_add(long i, atomic64_t *v)
        : "x30");
 }
 
-static inline long atomic64_add_return(long i, atomic64_t *v)
-{
-       register long x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
+#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                                \
+static inline long atomic64_add_return##name(long i, atomic64_t *v)    \
+{                                                                      \
+       register long x0 asm ("x0") = i;                                \
+       register atomic64_t *x1 asm ("x1") = v;                         \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC64(add_return##name),                             \
+       /* LSE atomics */                                               \
+       "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
+       "       add     %[i], %[i], x30")                               \
+       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : "x30" , ##cl);                                                \
+                                                                       \
+       return x0;                                                      \
+}
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "       nop\n"
-       __LL_SC_ATOMIC64(add_return),
-       /* LSE atomics */
-       "       ldaddal %[i], x30, %[v]\n"
-       "       add     %[i], %[i], x30")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : "x30", "memory");
+ATOMIC64_OP_ADD_RETURN(_relaxed,   )
+ATOMIC64_OP_ADD_RETURN(_acquire,  a, "memory")
+ATOMIC64_OP_ADD_RETURN(_release,  l, "memory")
+ATOMIC64_OP_ADD_RETURN(        , al, "memory")
 
-       return x0;
-}
+#undef ATOMIC64_OP_ADD_RETURN
 
 static inline void atomic64_and(long i, atomic64_t *v)
 {
@@ -254,26 +277,34 @@ static inline void atomic64_sub(long i, atomic64_t *v)
        : "x30");
 }
 
-static inline long atomic64_sub_return(long i, atomic64_t *v)
-{
-       register long x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
+#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)                                \
+static inline long atomic64_sub_return##name(long i, atomic64_t *v)    \
+{                                                                      \
+       register long x0 asm ("x0") = i;                                \
+       register atomic64_t *x1 asm ("x1") = v;                         \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC64(sub_return##name)                              \
+       "       nop",                                                   \
+       /* LSE atomics */                                               \
+       "       neg     %[i], %[i]\n"                                   \
+       "       ldadd" #mb "    %[i], x30, %[v]\n"                      \
+       "       add     %[i], %[i], x30")                               \
+       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : "x30" , ##cl);                                                \
+                                                                       \
+       return x0;                                                      \
+}
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(
-       /* LL/SC */
-       "       nop\n"
-       __LL_SC_ATOMIC64(sub_return)
-       "       nop",
-       /* LSE atomics */
-       "       neg     %[i], %[i]\n"
-       "       ldaddal %[i], x30, %[v]\n"
-       "       add     %[i], %[i], x30")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : "x30", "memory");
+ATOMIC64_OP_SUB_RETURN(_relaxed,   )
+ATOMIC64_OP_SUB_RETURN(_acquire,  a, "memory")
+ATOMIC64_OP_SUB_RETURN(_release,  l, "memory")
+ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 
-       return x0;
-}
+#undef ATOMIC64_OP_SUB_RETURN
 
 static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
@@ -333,14 +364,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,     \
        return x0;                                                      \
 }
 
-__CMPXCHG_CASE(w, b,    1,   )
-__CMPXCHG_CASE(w, h,    2,   )
-__CMPXCHG_CASE(w,  ,    4,   )
-__CMPXCHG_CASE(x,  ,    8,   )
-__CMPXCHG_CASE(w, b, mb_1, al, "memory")
-__CMPXCHG_CASE(w, h, mb_2, al, "memory")
-__CMPXCHG_CASE(w,  , mb_4, al, "memory")
-__CMPXCHG_CASE(x,  , mb_8, al, "memory")
+__CMPXCHG_CASE(w, b,     1,   )
+__CMPXCHG_CASE(w, h,     2,   )
+__CMPXCHG_CASE(w,  ,     4,   )
+__CMPXCHG_CASE(x,  ,     8,   )
+__CMPXCHG_CASE(w, b, acq_1,  a, "memory")
+__CMPXCHG_CASE(w, h, acq_2,  a, "memory")
+__CMPXCHG_CASE(w,  , acq_4,  a, "memory")
+__CMPXCHG_CASE(x,  , acq_8,  a, "memory")
+__CMPXCHG_CASE(w, b, rel_1,  l, "memory")
+__CMPXCHG_CASE(w, h, rel_2,  l, "memory")
+__CMPXCHG_CASE(w,  , rel_4,  l, "memory")
+__CMPXCHG_CASE(x,  , rel_8,  l, "memory")
+__CMPXCHG_CASE(w, b,  mb_1, al, "memory")
+__CMPXCHG_CASE(w, h,  mb_2, al, "memory")
+__CMPXCHG_CASE(w,  ,  mb_4, al, "memory")
+__CMPXCHG_CASE(x,  ,  mb_8, al, "memory")
 
 #undef __LL_SC_CMPXCHG
 #undef __CMPXCHG_CASE
index c75b8d027eb1e657efaa2ec219e14af27c9fcfa9..54efedaf331fda55478d001d860d6137be5d08e8 100644 (file)
@@ -115,6 +115,13 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *);
 
+static inline void __local_flush_icache_all(void)
+{
+       asm("ic iallu");
+       dsb(nsh);
+       isb();
+}
+
 static inline void __flush_icache_all(void)
 {
        asm("ic ialluis");
index 899e9f1d19e486defa413d087f6518ef38d35e29..9ea611ea69df739009d0a6d432bbbedcab05284b 100644 (file)
 #include <asm/barrier.h>
 #include <asm/lse.h>
 
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
-{
-       unsigned long ret, tmp;
-
-       switch (size) {
-       case 1:
-               asm volatile(ARM64_LSE_ATOMIC_INSN(
-               /* LL/SC */
-               "       prfm    pstl1strm, %2\n"
-               "1:     ldxrb   %w0, %2\n"
-               "       stlxrb  %w1, %w3, %2\n"
-               "       cbnz    %w1, 1b\n"
-               "       dmb     ish",
-               /* LSE atomics */
-               "       nop\n"
-               "       nop\n"
-               "       swpalb  %w3, %w0, %2\n"
-               "       nop\n"
-               "       nop")
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       case 2:
-               asm volatile(ARM64_LSE_ATOMIC_INSN(
-               /* LL/SC */
-               "       prfm    pstl1strm, %2\n"
-               "1:     ldxrh   %w0, %2\n"
-               "       stlxrh  %w1, %w3, %2\n"
-               "       cbnz    %w1, 1b\n"
-               "       dmb     ish",
-               /* LSE atomics */
-               "       nop\n"
-               "       nop\n"
-               "       swpalh  %w3, %w0, %2\n"
-               "       nop\n"
-               "       nop")
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       case 4:
-               asm volatile(ARM64_LSE_ATOMIC_INSN(
-               /* LL/SC */
-               "       prfm    pstl1strm, %2\n"
-               "1:     ldxr    %w0, %2\n"
-               "       stlxr   %w1, %w3, %2\n"
-               "       cbnz    %w1, 1b\n"
-               "       dmb     ish",
-               /* LSE atomics */
-               "       nop\n"
-               "       nop\n"
-               "       swpal   %w3, %w0, %2\n"
-               "       nop\n"
-               "       nop")
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       case 8:
-               asm volatile(ARM64_LSE_ATOMIC_INSN(
-               /* LL/SC */
-               "       prfm    pstl1strm, %2\n"
-               "1:     ldxr    %0, %2\n"
-               "       stlxr   %w1, %3, %2\n"
-               "       cbnz    %w1, 1b\n"
-               "       dmb     ish",
-               /* LSE atomics */
-               "       nop\n"
-               "       nop\n"
-               "       swpal   %3, %0, %2\n"
-               "       nop\n"
-               "       nop")
-                       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
-                       : "r" (x)
-                       : "memory");
-               break;
-       default:
-               BUILD_BUG();
-       }
-
-       return ret;
+/*
+ * We need separate acquire parameters for ll/sc and lse, since the full
+ * barrier case is generated as release+dmb for the former and
+ * acquire+release for the latter.
+ */
+#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl)   \
+static inline unsigned long __xchg_case_##name(unsigned long x,                \
+                                              volatile void *ptr)      \
+{                                                                      \
+       unsigned long ret, tmp;                                         \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       prfm    pstl1strm, %2\n"                                \
+       "1:     ld" #acq "xr" #sz "\t%" #w "0, %2\n"                    \
+       "       st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n"               \
+       "       cbnz    %w1, 1b\n"                                      \
+       "       " #mb,                                                  \
+       /* LSE atomics */                                               \
+       "       nop\n"                                                  \
+       "       nop\n"                                                  \
+       "       swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n"     \
+       "       nop\n"                                                  \
+       "       " #nop_lse)                                             \
+       : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)                   \
+       : "r" (x)                                                       \
+       : cl);                                                          \
+                                                                       \
+       return ret;                                                     \
 }
 
-#define xchg(ptr,x) \
-({ \
-       __typeof__(*(ptr)) __ret; \
-       __ret = (__typeof__(*(ptr))) \
-               __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
-       __ret; \
+__XCHG_CASE(w, b,     1,        ,    ,  ,  ,  ,         )
+__XCHG_CASE(w, h,     2,        ,    ,  ,  ,  ,         )
+__XCHG_CASE(w,  ,     4,        ,    ,  ,  ,  ,         )
+__XCHG_CASE( ,  ,     8,        ,    ,  ,  ,  ,         )
+__XCHG_CASE(w, b, acq_1,        ,    , a, a,  , "memory")
+__XCHG_CASE(w, h, acq_2,        ,    , a, a,  , "memory")
+__XCHG_CASE(w,  , acq_4,        ,    , a, a,  , "memory")
+__XCHG_CASE( ,  , acq_8,        ,    , a, a,  , "memory")
+__XCHG_CASE(w, b, rel_1,        ,    ,  ,  , l, "memory")
+__XCHG_CASE(w, h, rel_2,        ,    ,  ,  , l, "memory")
+__XCHG_CASE(w,  , rel_4,        ,    ,  ,  , l, "memory")
+__XCHG_CASE( ,  , rel_8,        ,    ,  ,  , l, "memory")
+__XCHG_CASE(w, b,  mb_1, dmb ish, nop,  , a, l, "memory")
+__XCHG_CASE(w, h,  mb_2, dmb ish, nop,  , a, l, "memory")
+__XCHG_CASE(w,  ,  mb_4, dmb ish, nop,  , a, l, "memory")
+__XCHG_CASE( ,  ,  mb_8, dmb ish, nop,  , a, l, "memory")
+
+#undef __XCHG_CASE
+
+#define __XCHG_GEN(sfx)                                                        \
+static inline unsigned long __xchg##sfx(unsigned long x,               \
+                                       volatile void *ptr,             \
+                                       int size)                       \
+{                                                                      \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               return __xchg_case##sfx##_1(x, ptr);                    \
+       case 2:                                                         \
+               return __xchg_case##sfx##_2(x, ptr);                    \
+       case 4:                                                         \
+               return __xchg_case##sfx##_4(x, ptr);                    \
+       case 8:                                                         \
+               return __xchg_case##sfx##_8(x, ptr);                    \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+                                                                       \
+       unreachable();                                                  \
+}
+
+__XCHG_GEN()
+__XCHG_GEN(_acq)
+__XCHG_GEN(_rel)
+__XCHG_GEN(_mb)
+
+#undef __XCHG_GEN
+
+#define __xchg_wrapper(sfx, ptr, x)                                    \
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       __ret = (__typeof__(*(ptr)))                                    \
+               __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+       __ret;                                                          \
 })
 
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-                                     unsigned long new, int size)
-{
-       switch (size) {
-       case 1:
-               return __cmpxchg_case_1(ptr, (u8)old, new);
-       case 2:
-               return __cmpxchg_case_2(ptr, (u16)old, new);
-       case 4:
-               return __cmpxchg_case_4(ptr, old, new);
-       case 8:
-               return __cmpxchg_case_8(ptr, old, new);
-       default:
-               BUILD_BUG();
-       }
-
-       unreachable();
+/* xchg */
+#define xchg_relaxed(...)      __xchg_wrapper(    , __VA_ARGS__)
+#define xchg_acquire(...)      __xchg_wrapper(_acq, __VA_ARGS__)
+#define xchg_release(...)      __xchg_wrapper(_rel, __VA_ARGS__)
+#define xchg(...)              __xchg_wrapper( _mb, __VA_ARGS__)
+
+#define __CMPXCHG_GEN(sfx)                                             \
+static inline unsigned long __cmpxchg##sfx(volatile void *ptr,         \
+                                          unsigned long old,           \
+                                          unsigned long new,           \
+                                          int size)                    \
+{                                                                      \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               return __cmpxchg_case##sfx##_1(ptr, (u8)old, new);      \
+       case 2:                                                         \
+               return __cmpxchg_case##sfx##_2(ptr, (u16)old, new);     \
+       case 4:                                                         \
+               return __cmpxchg_case##sfx##_4(ptr, old, new);          \
+       case 8:                                                         \
+               return __cmpxchg_case##sfx##_8(ptr, old, new);          \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+                                                                       \
+       unreachable();                                                  \
 }
 
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-                                        unsigned long new, int size)
-{
-       switch (size) {
-       case 1:
-               return __cmpxchg_case_mb_1(ptr, (u8)old, new);
-       case 2:
-               return __cmpxchg_case_mb_2(ptr, (u16)old, new);
-       case 4:
-               return __cmpxchg_case_mb_4(ptr, old, new);
-       case 8:
-               return __cmpxchg_case_mb_8(ptr, old, new);
-       default:
-               BUILD_BUG();
-       }
-
-       unreachable();
-}
+__CMPXCHG_GEN()
+__CMPXCHG_GEN(_acq)
+__CMPXCHG_GEN(_rel)
+__CMPXCHG_GEN(_mb)
 
-#define cmpxchg(ptr, o, n) \
-({ \
-       __typeof__(*(ptr)) __ret; \
-       __ret = (__typeof__(*(ptr))) \
-               __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
-                            sizeof(*(ptr))); \
-       __ret; \
-})
+#undef __CMPXCHG_GEN
 
-#define cmpxchg_local(ptr, o, n) \
-({ \
-       __typeof__(*(ptr)) __ret; \
-       __ret = (__typeof__(*(ptr))) \
-               __cmpxchg((ptr), (unsigned long)(o), \
-                         (unsigned long)(n), sizeof(*(ptr))); \
-       __ret; \
+#define __cmpxchg_wrapper(sfx, ptr, o, n)                              \
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       __ret = (__typeof__(*(ptr)))                                    \
+               __cmpxchg##sfx((ptr), (unsigned long)(o),               \
+                               (unsigned long)(n), sizeof(*(ptr)));    \
+       __ret;                                                          \
 })
 
+/* cmpxchg */
+#define cmpxchg_relaxed(...)   __cmpxchg_wrapper(    , __VA_ARGS__)
+#define cmpxchg_acquire(...)   __cmpxchg_wrapper(_acq, __VA_ARGS__)
+#define cmpxchg_release(...)   __cmpxchg_wrapper(_rel, __VA_ARGS__)
+#define cmpxchg(...)           __cmpxchg_wrapper( _mb, __VA_ARGS__)
+#define cmpxchg_local          cmpxchg_relaxed
+
+/* cmpxchg64 */
+#define cmpxchg64_relaxed      cmpxchg_relaxed
+#define cmpxchg64_acquire      cmpxchg_acquire
+#define cmpxchg64_release      cmpxchg_release
+#define cmpxchg64              cmpxchg
+#define cmpxchg64_local                cmpxchg_local
+
+/* cmpxchg_double */
 #define system_has_cmpxchg_double()     1
 
 #define __cmpxchg_double_check(ptr1, ptr2)                                     \
@@ -202,6 +199,7 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
        __ret; \
 })
 
+/* this_cpu_cmpxchg */
 #define _protect_cmpxchg_local(pcp, o, n)                      \
 ({                                                             \
        typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
@@ -227,9 +225,4 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
        __ret;                                                          \
 })
 
-#define cmpxchg64(ptr,o,n)             cmpxchg((ptr),(o),(n))
-#define cmpxchg64_local(ptr,o,n)       cmpxchg_local((ptr),(o),(n))
-
-#define cmpxchg64_relaxed(ptr,o,n)     cmpxchg_local((ptr),(o),(n))
-
 #endif /* __ASM_CMPXCHG_H */
index 8e797b2fcc0186b6f5f505303b51fbea0eff2e94..b5e9cee4b5f81a3498a67b934dc9157e2d4cf45b 100644 (file)
@@ -63,4 +63,8 @@ DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
 void cpuinfo_store_cpu(void);
 void __init cpuinfo_store_boot_cpu(void);
 
+void __init init_cpu_features(struct cpuinfo_arm64 *info);
+void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
+                                struct cpuinfo_arm64 *boot);
+
 #endif /* __ASM_CPU_H */
index 171570702bb801431f141f46238eb0d9e1895fe5..ca4621033ca048598c5fb9e114f2a3449dec57fa 100644 (file)
@@ -10,6 +10,7 @@
 #define __ASM_CPUFEATURE_H
 
 #include <asm/hwcap.h>
+#include <asm/sysreg.h>
 
 /*
  * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
 
 #include <linux/kernel.h>
 
+/* CPU feature register tracking */
+enum ftr_type {
+       FTR_EXACT,      /* Use a predefined safe value */
+       FTR_LOWER_SAFE, /* Smaller value is safe */
+       FTR_HIGHER_SAFE,/* Bigger value is safe */
+};
+
+#define FTR_STRICT     true    /* SANITY check strict matching required */
+#define FTR_NONSTRICT  false   /* SANITY check ignored */
+
+struct arm64_ftr_bits {
+       bool            strict;   /* CPU Sanity check: strict matching required ? */
+       enum ftr_type   type;
+       u8              shift;
+       u8              width;
+       s64             safe_val; /* safe value for discrete features */
+};
+
+/*
+ * @arm64_ftr_reg - Feature register
+ * @strict_mask                Bits which should match across all CPUs for sanity.
+ * @sys_val            Safe value across the CPUs (system view)
+ */
+struct arm64_ftr_reg {
+       u32                     sys_id;
+       const char              *name;
+       u64                     strict_mask;
+       u64                     sys_val;
+       struct arm64_ftr_bits   *ftr_bits;
+};
+
 struct arm64_cpu_capabilities {
        const char *desc;
        u16 capability;
        bool (*matches)(const struct arm64_cpu_capabilities *);
-       void (*enable)(void);
+       void (*enable)(void *);         /* Called on all active CPUs */
        union {
                struct {        /* To be used for erratum handling only */
                        u32 midr_model;
@@ -46,8 +78,11 @@ struct arm64_cpu_capabilities {
                };
 
                struct {        /* Feature register checking */
+                       u32 sys_reg;
                        int field_pos;
                        int min_field_value;
+                       int hwcap_type;
+                       unsigned long hwcap;
                };
        };
 };
@@ -75,19 +110,59 @@ static inline void cpus_set_cap(unsigned int num)
                __set_bit(num, cpu_hwcaps);
 }
 
-static inline int __attribute_const__ cpuid_feature_extract_field(u64 features,
-                                                                 int field)
+static inline int __attribute_const__
+cpuid_feature_extract_field_width(u64 features, int field, int width)
+{
+       return (s64)(features << (64 - width - field)) >> (64 - width);
+}
+
+static inline int __attribute_const__
+cpuid_feature_extract_field(u64 features, int field)
+{
+       return cpuid_feature_extract_field_width(features, field, 4);
+}
+
+static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
+{
+       return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
+}
+
+static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
+{
+       return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width);
+}
+
+static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
 {
-       return (s64)(features << (64 - 4 - field)) >> (64 - 4);
+       return cpuid_feature_extract_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
+               cpuid_feature_extract_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
 }
 
+void __init setup_cpu_features(void);
 
-void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
                            const char *info);
 void check_local_cpu_errata(void);
-void check_local_cpu_features(void);
-bool cpu_supports_mixed_endian_el0(void);
-bool system_supports_mixed_endian_el0(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void verify_local_cpu_capabilities(void);
+#else
+static inline void verify_local_cpu_capabilities(void)
+{
+}
+#endif
+
+u64 read_system_reg(u32 id);
+
+static inline bool cpu_supports_mixed_endian_el0(void)
+{
+       return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
+}
+
+static inline bool system_supports_mixed_endian_el0(void)
+{
+       return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
+}
 
 #endif /* __ASSEMBLY__ */
 
index ee6403df9fe4c1f32e9a7b30172a7a141056ec40..31678b2f295f242fc03d461dd92f52de0388d9c9 100644 (file)
 
 #define APM_CPU_PART_POTENZA   0x000
 
-#define ID_AA64MMFR0_BIGENDEL0_SHIFT   16
-#define ID_AA64MMFR0_BIGENDEL0_MASK    (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
-#define ID_AA64MMFR0_BIGENDEL0(mmfr0)  \
-       (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT)
-#define ID_AA64MMFR0_BIGEND_SHIFT      8
-#define ID_AA64MMFR0_BIGEND_MASK       (0xf << ID_AA64MMFR0_BIGEND_SHIFT)
-#define ID_AA64MMFR0_BIGEND(mmfr0)     \
-       (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
-
 #ifndef __ASSEMBLY__
 
 /*
@@ -112,12 +103,6 @@ static inline u32 __attribute_const__ read_cpuid_cachetype(void)
 {
        return read_cpuid(CTR_EL0);
 }
-
-static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
-{
-       return (ID_AA64MMFR0_BIGEND(mmfr0) == 0x1) ||
-               (ID_AA64MMFR0_BIGENDEL0(mmfr0) == 0x1);
-}
 #endif /* __ASSEMBLY__ */
 
 #endif
index 8b9884c726adc78f37eb59e7288298267bec55b4..309704544d22763d6348095814fbdce935172c1b 100644 (file)
@@ -17,6 +17,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
+#include <linux/sizes.h>
 #include <asm/boot.h>
 #include <asm/page.h>
 
@@ -55,11 +56,7 @@ enum fixed_addresses {
         * Temporary boot-time mappings, used by early_ioremap(),
         * before ioremap() is functional.
         */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define NR_FIX_BTMAPS          4
-#else
-#define NR_FIX_BTMAPS          64
-#endif
+#define NR_FIX_BTMAPS          (SZ_256K / PAGE_SIZE)
 #define FIX_BTMAPS_SLOTS       7
 #define TOTAL_FIX_BTMAPS       (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
 
index 4c47cb2fbb526f7ae445e4f1b46178ba99934283..e54415ec693571d1d4195d57b1928e2e58173353 100644 (file)
@@ -17,6 +17,7 @@
 #define __ASM_HW_BREAKPOINT_H
 
 #include <asm/cputype.h>
+#include <asm/cpufeature.h>
 
 #ifdef __KERNEL__
 
@@ -137,13 +138,17 @@ extern struct pmu perf_ops_bp;
 /* Determine number of BRP registers available. */
 static inline int get_num_brps(void)
 {
-       return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
+       return 1 +
+               cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+                                               ID_AA64DFR0_BRPS_SHIFT);
 }
 
 /* Determine number of WRP registers available. */
 static inline int get_num_wrps(void)
 {
-       return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
+       return 1 +
+               cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+                                               ID_AA64DFR0_WRPS_SHIFT);
 }
 
 #endif /* __KERNEL__ */
index 0ad735166d9fa7303eaa961d6f07d833c95b84ff..400b80b49595dc147fb5a2110ff347405f58bd0b 100644 (file)
 extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
 #endif
 
+enum {
+       CAP_HWCAP = 1,
+#ifdef CONFIG_COMPAT
+       CAP_COMPAT_HWCAP,
+       CAP_COMPAT_HWCAP2,
+#endif
+};
+
 extern unsigned long elf_hwcap;
 #endif
 #endif
index bbb251b14746cb7005d1be35d50fdfb453464870..09169296c3cc4c235d10a0b040c68938eb4a4c80 100644 (file)
@@ -7,7 +7,6 @@
 
 struct pt_regs;
 
-extern void migrate_irqs(void);
 extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
 
 static inline void acpi_irq_init(void)
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
new file mode 100644 (file)
index 0000000..2774fa3
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#include <linux/linkage.h>
+#include <asm/memory.h>
+
+/*
+ * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
+ * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
+ */
+#define KASAN_SHADOW_START      (VA_START)
+#define KASAN_SHADOW_END        (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
+
+/*
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ *     shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
+ *
+ * (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END]
+ * cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET
+ * should satisfy the following equation:
+ *      KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61)
+ */
+#define KASAN_SHADOW_OFFSET     (KASAN_SHADOW_END - (1ULL << (64 - 3)))
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+
+#else
+static inline void kasan_init(void) { }
+#endif
+
+#endif
+#endif
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
new file mode 100644 (file)
index 0000000..a459714
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Kernel page table mapping
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_KERNEL_PGTABLE_H
+#define __ASM_KERNEL_PGTABLE_H
+
+
+/*
+ * The linear mapping and the start of memory are both 2M aligned (per
+ * the arm64 booting.txt requirements). Hence we can use section mapping
+ * with 4K (section size = 2M) but not with 16K (section size = 32M) or
+ * 64K (section size = 512M).
+ */
+#ifdef CONFIG_ARM64_4K_PAGES
+#define ARM64_SWAPPER_USES_SECTION_MAPS 1
+#else
+#define ARM64_SWAPPER_USES_SECTION_MAPS 0
+#endif
+
+/*
+ * The idmap and swapper page tables need some space reserved in the kernel
+ * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
+ * map the kernel. With the 64K page configuration, swapper and idmap need to
+ * map to pte level. The swapper also maps the FDT (see __create_page_tables
+ * for more information). Note that the number of ID map translation levels
+ * could be increased on the fly if system RAM is out of reach for the default
+ * VA range, so pages required to map highest possible PA are reserved in all
+ * cases.
+ */
+#if ARM64_SWAPPER_USES_SECTION_MAPS
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
+#define IDMAP_PGTABLE_LEVELS   (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
+#else
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
+#define IDMAP_PGTABLE_LEVELS   (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT))
+#endif
+
+#define SWAPPER_DIR_SIZE       (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
+#define IDMAP_DIR_SIZE         (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
+
+/* Initial memory map size */
+#if ARM64_SWAPPER_USES_SECTION_MAPS
+#define SWAPPER_BLOCK_SHIFT    SECTION_SHIFT
+#define SWAPPER_BLOCK_SIZE     SECTION_SIZE
+#define SWAPPER_TABLE_SHIFT    PUD_SHIFT
+#else
+#define SWAPPER_BLOCK_SHIFT    PAGE_SHIFT
+#define SWAPPER_BLOCK_SIZE     PAGE_SIZE
+#define SWAPPER_TABLE_SHIFT    PMD_SHIFT
+#endif
+
+/* The size of the initial kernel direct mapping */
+#define SWAPPER_INIT_MAP_SIZE  (_AC(1, UL) << SWAPPER_TABLE_SHIFT)
+
+/*
+ * Initial memory map attributes.
+ */
+#define SWAPPER_PTE_FLAGS      (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define SWAPPER_PMD_FLAGS      (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+
+#if ARM64_SWAPPER_USES_SECTION_MAPS
+#define SWAPPER_MM_MMUFLAGS    (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
+#else
+#define SWAPPER_MM_MMUFLAGS    (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+#endif
+
+
+#endif /* __ASM_KERNEL_PGTABLE_H */
index 6b4c3ad75a2a99b0760a38436b7d159bfe3209c3..11ccf6c095331599e93ccf5620555d3466166df1 100644 (file)
  * PAGE_OFFSET - the virtual address of the start of the kernel image (top
  *              (VA_BITS - 1))
  * VA_BITS - the maximum number of bits for virtual addresses.
+ * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
  * The module space lives between the addresses given by TASK_SIZE
  * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
 #define VA_BITS                        (CONFIG_ARM64_VA_BITS)
+#define VA_START               (UL(0xffffffffffffffff) << VA_BITS)
 #define PAGE_OFFSET            (UL(0xffffffffffffffff) << (VA_BITS - 1))
 #define MODULES_END            (PAGE_OFFSET)
 #define MODULES_VADDR          (MODULES_END - SZ_64M)
index 030208767185bd56edce4b01e9d5aa91c08c3058..990124a67eebd4b10a19ae9509cfd5b6d9ac1712 100644 (file)
 #define __ASM_MMU_H
 
 typedef struct {
-       unsigned int id;
-       raw_spinlock_t id_lock;
-       void *vdso;
+       atomic64_t      id;
+       void            *vdso;
 } mm_context_t;
 
-#define INIT_MM_CONTEXT(name) \
-       .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
-
-#define ASID(mm)       ((mm)->context.id & 0xffff)
+/*
+ * This macro is only used by the TLBI code, which cannot race with an
+ * ASID change and therefore doesn't need to reload the counter using
+ * atomic64_read.
+ */
+#define ASID(mm)       ((mm)->context.id.counter & 0xffff)
 
 extern void paging_init(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
index 8ec41e5f56f081b9d65d2ed58769a0f22060716d..c0e87898ba96b04f2e5b847a0dacf01f4eb1dff1 100644 (file)
 #include <asm/cputype.h>
 #include <asm/pgtable.h>
 
-#define MAX_ASID_BITS  16
-
-extern unsigned int cpu_last_asid;
-
-void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void __new_context(struct mm_struct *mm);
-
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 static inline void contextidr_thread_switch(struct task_struct *next)
 {
@@ -77,96 +70,38 @@ static inline bool __cpu_uses_extended_idmap(void)
                unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)));
 }
 
-static inline void __cpu_set_tcr_t0sz(u64 t0sz)
-{
-       unsigned long tcr;
-
-       if (__cpu_uses_extended_idmap())
-               asm volatile (
-               "       mrs     %0, tcr_el1     ;"
-               "       bfi     %0, %1, %2, %3  ;"
-               "       msr     tcr_el1, %0     ;"
-               "       isb"
-               : "=&r" (tcr)
-               : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
-}
-
-/*
- * Set TCR.T0SZ to the value appropriate for activating the identity map.
- */
-static inline void cpu_set_idmap_tcr_t0sz(void)
-{
-       __cpu_set_tcr_t0sz(idmap_t0sz);
-}
-
 /*
  * Set TCR.T0SZ to its default value (based on VA_BITS)
  */
 static inline void cpu_set_default_tcr_t0sz(void)
 {
-       __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS));
-}
-
-static inline void switch_new_context(struct mm_struct *mm)
-{
-       unsigned long flags;
-
-       __new_context(mm);
+       unsigned long tcr;
 
-       local_irq_save(flags);
-       cpu_switch_mm(mm->pgd, mm);
-       local_irq_restore(flags);
-}
+       if (!__cpu_uses_extended_idmap())
+               return;
 
-static inline void check_and_switch_context(struct mm_struct *mm,
-                                           struct task_struct *tsk)
-{
-       /*
-        * Required during context switch to avoid speculative page table
-        * walking with the wrong TTBR.
-        */
-       cpu_set_reserved_ttbr0();
-
-       if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS))
-               /*
-                * The ASID is from the current generation, just switch to the
-                * new pgd. This condition is only true for calls from
-                * context_switch() and interrupts are already disabled.
-                */
-               cpu_switch_mm(mm->pgd, mm);
-       else if (irqs_disabled())
-               /*
-                * Defer the new ASID allocation until after the context
-                * switch critical region since __new_context() cannot be
-                * called with interrupts disabled.
-                */
-               set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
-       else
-               /*
-                * That is a direct call to switch_mm() or activate_mm() with
-                * interrupts enabled and a new context.
-                */
-               switch_new_context(mm);
+       asm volatile (
+       "       mrs     %0, tcr_el1     ;"
+       "       bfi     %0, %1, %2, %3  ;"
+       "       msr     tcr_el1, %0     ;"
+       "       isb"
+       : "=&r" (tcr)
+       : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
 }
 
-#define init_new_context(tsk,mm)       (__init_new_context(tsk,mm),0)
+/*
+ * It would be nice to return ASIDs back to the allocator, but unfortunately
+ * that introduces a race with a generation rollover where we could erroneously
+ * free an ASID allocated in a future generation. We could workaround this by
+ * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
+ * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
+ * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
+ * take CPU migration into account.
+ */
 #define destroy_context(mm)            do { } while(0)
+void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 
-#define finish_arch_post_lock_switch \
-       finish_arch_post_lock_switch
-static inline void finish_arch_post_lock_switch(void)
-{
-       if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
-               struct mm_struct *mm = current->mm;
-               unsigned long flags;
-
-               __new_context(mm);
-
-               local_irq_save(flags);
-               cpu_switch_mm(mm->pgd, mm);
-               local_irq_restore(flags);
-       }
-}
+#define init_new_context(tsk,mm)       ({ atomic64_set(&mm->context.id, 0); 0; })
 
 /*
  * This is called when "tsk" is about to enter lazy TLB mode.
@@ -194,6 +129,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        unsigned int cpu = smp_processor_id();
 
+       if (prev == next)
+               return;
+
        /*
         * init_mm.pgd does not contain any user mappings and it is always
         * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -203,8 +141,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
                return;
        }
 
-       if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
-               check_and_switch_context(next, tsk);
+       check_and_switch_context(next, cpu);
 }
 
 #define deactivate_mm(tsk,mm)  do { } while (0)
index 7d9c7e4a424bddd87dfdbd34e2be0d05b2078f58..9b2f5a9d019df493fa6021ee3ca6b4779401d8c4 100644 (file)
 #define __ASM_PAGE_H
 
 /* PAGE_SHIFT determines the page size */
+/* CONT_SHIFT determines the number of pages which can be tracked together  */
 #ifdef CONFIG_ARM64_64K_PAGES
 #define PAGE_SHIFT             16
+#define CONT_SHIFT             5
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define PAGE_SHIFT             14
+#define CONT_SHIFT             7
 #else
 #define PAGE_SHIFT             12
+#define CONT_SHIFT             4
 #endif
-#define PAGE_SIZE              (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_SIZE              (_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK              (~(PAGE_SIZE-1))
 
-/*
- * The idmap and swapper page tables need some space reserved in the kernel
- * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
- * map the kernel. With the 64K page configuration, swapper and idmap need to
- * map to pte level. The swapper also maps the FDT (see __create_page_tables
- * for more information). Note that the number of ID map translation levels
- * could be increased on the fly if system RAM is out of reach for the default
- * VA range, so 3 pages are reserved in all cases.
- */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
-#else
-#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
-#endif
-
-#define SWAPPER_DIR_SIZE       (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
-#define IDMAP_DIR_SIZE         (3 * PAGE_SIZE)
+#define CONT_SIZE              (_AC(1, UL) << (CONT_SHIFT + PAGE_SHIFT))
+#define CONT_MASK              (~(CONT_SIZE-1))
 
 #ifndef __ASSEMBLY__
 
index 76420568d66a463d7ba74fdf8a74ba916dd1dbd8..c15053902942e0a3a34ba4882545c5b6d163c23c 100644 (file)
@@ -27,6 +27,7 @@
 #define check_pgt_cache()              do { } while (0)
 
 #define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
 #if CONFIG_PGTABLE_LEVELS > 2
 
index 24154b055835b05469903bb38d8d2f5ddea5f348..d6739e836f7bb919a7e49b7e14fc43d2454f46de 100644 (file)
 #ifndef __ASM_PGTABLE_HWDEF_H
 #define __ASM_PGTABLE_HWDEF_H
 
+/*
+ * Number of page-table levels required to address 'va_bits' wide
+ * address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
+ * bits with (PAGE_SHIFT - 3) bits at each page table level. Hence:
+ *
+ *  levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3))
+ *
+ * where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d))
+ *
+ * We cannot include linux/kernel.h which defines DIV_ROUND_UP here
+ * due to build issues. So we open code DIV_ROUND_UP here:
+ *
+ *     ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3))
+ *
+ * which gets simplified as :
+ */
+#define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
+
+/*
+ * Size mapped by an entry at level n ( 0 <= n <= 3)
+ * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
+ * in the final page. The maximum number of translation levels supported by
+ * the architecture is 4. Hence, starting at at level n, we have further
+ * ((4 - n) - 1) levels of translation excluding the offset within the page.
+ * So, the total number of bits mapped by an entry at level n is :
+ *
+ *  ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT
+ *
+ * Rearranging it a bit we get :
+ *   (4 - n) * (PAGE_SHIFT - 3) + 3
+ */
+#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n)        ((PAGE_SHIFT - 3) * (4 - (n)) + 3)
+
 #define PTRS_PER_PTE           (1 << (PAGE_SHIFT - 3))
 
 /*
  * PMD_SHIFT determines the size a level 2 page table entry can map.
  */
 #if CONFIG_PGTABLE_LEVELS > 2
-#define PMD_SHIFT              ((PAGE_SHIFT - 3) * 2 + 3)
+#define PMD_SHIFT              ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
 #define PMD_SIZE               (_AC(1, UL) << PMD_SHIFT)
 #define PMD_MASK               (~(PMD_SIZE-1))
 #define PTRS_PER_PMD           PTRS_PER_PTE
@@ -32,7 +65,7 @@
  * PUD_SHIFT determines the size a level 1 page table entry can map.
  */
 #if CONFIG_PGTABLE_LEVELS > 3
-#define PUD_SHIFT              ((PAGE_SHIFT - 3) * 3 + 3)
+#define PUD_SHIFT              ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
 #define PUD_SIZE               (_AC(1, UL) << PUD_SHIFT)
 #define PUD_MASK               (~(PUD_SIZE-1))
 #define PTRS_PER_PUD           PTRS_PER_PTE
@@ -42,7 +75,7 @@
  * PGDIR_SHIFT determines the size a top-level page table entry can map
  * (depending on the configuration, this level can be 0, 1 or 2).
  */
-#define PGDIR_SHIFT            ((PAGE_SHIFT - 3) * CONFIG_PGTABLE_LEVELS + 3)
+#define PGDIR_SHIFT            ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
 #define PGDIR_SIZE             (_AC(1, UL) << PGDIR_SHIFT)
 #define PGDIR_MASK             (~(PGDIR_SIZE-1))
 #define PTRS_PER_PGD           (1 << (VA_BITS - PGDIR_SHIFT))
 #define SECTION_SIZE           (_AC(1, UL) << SECTION_SHIFT)
 #define SECTION_MASK           (~(SECTION_SIZE-1))
 
+/*
+ * Contiguous page definitions.
+ */
+#define CONT_PTES              (_AC(1, UL) << CONT_SHIFT)
+/* the the numerical offset of the PTE within a range of CONT_PTES */
+#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
+
 /*
  * Hardware page table definitions.
  *
 #define PMD_SECT_S             (_AT(pmdval_t, 3) << 8)
 #define PMD_SECT_AF            (_AT(pmdval_t, 1) << 10)
 #define PMD_SECT_NG            (_AT(pmdval_t, 1) << 11)
+#define PMD_SECT_CONT          (_AT(pmdval_t, 1) << 52)
 #define PMD_SECT_PXN           (_AT(pmdval_t, 1) << 53)
 #define PMD_SECT_UXN           (_AT(pmdval_t, 1) << 54)
 
 #define PTE_AF                 (_AT(pteval_t, 1) << 10)        /* Access Flag */
 #define PTE_NG                 (_AT(pteval_t, 1) << 11)        /* nG */
 #define PTE_DBM                        (_AT(pteval_t, 1) << 51)        /* Dirty Bit Management */
+#define PTE_CONT               (_AT(pteval_t, 1) << 52)        /* Contiguous range */
 #define PTE_PXN                        (_AT(pteval_t, 1) << 53)        /* Privileged XN */
 #define PTE_UXN                        (_AT(pteval_t, 1) << 54)        /* User XN */
 
index b0329be95cb129f3b283f3d75e4dfeff64214bff..c3d22a5076483d4c9d1500ee3feebe9358278876 100644 (file)
  *     fixed mappings and modules
  */
 #define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
-#define VMALLOC_START          (UL(0xffffffffffffffff) << VA_BITS)
+
+#ifndef CONFIG_KASAN
+#define VMALLOC_START          (VA_START)
+#else
+#include <asm/kasan.h>
+#define VMALLOC_START          (KASAN_SHADOW_END + SZ_64K)
+#endif
+
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define vmemmap                        ((struct page *)(VMALLOC_END + SZ_64K))
@@ -72,6 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 #define PAGE_KERNEL            __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+#define PAGE_KERNEL_EXEC_CONT  __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
 #define PAGE_HYP               __pgprot(_PAGE_DEFAULT | PTE_HYP)
 #define PAGE_HYP_DEVICE                __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
@@ -79,7 +87,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PAGE_S2                        __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
 #define PAGE_S2_DEVICE         __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
 
-#define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
 #define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
 #define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_COPY              __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -140,6 +148,7 @@ extern struct page *empty_zero_page;
 #define pte_special(pte)       (!!(pte_val(pte) & PTE_SPECIAL))
 #define pte_write(pte)         (!!(pte_val(pte) & PTE_WRITE))
 #define pte_exec(pte)          (!(pte_val(pte) & PTE_UXN))
+#define pte_cont(pte)          (!!(pte_val(pte) & PTE_CONT))
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 #define pte_hw_dirty(pte)      (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -202,6 +211,16 @@ static inline pte_t pte_mkspecial(pte_t pte)
        return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
 }
 
+static inline pte_t pte_mkcont(pte_t pte)
+{
+       return set_pte_bit(pte, __pgprot(PTE_CONT));
+}
+
+static inline pte_t pte_mknoncont(pte_t pte)
+{
+       return clear_pte_bit(pte, __pgprot(PTE_CONT));
+}
+
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
        *ptep = pte;
@@ -496,7 +515,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
-                             PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+                             PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
        /* preserve the hardware dirty information */
        if (pte_hw_dirty(pte))
                pte = pte_mkdirty(pte);
@@ -646,14 +665,17 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
                                    unsigned long addr, pte_t *ptep)
 {
        /*
-        * set_pte() does not have a DSB for user mappings, so make sure that
-        * the page table write is visible.
+        * We don't do anything here, so there's a very small chance of
+        * us retaking a user fault which we just fixed up. The alternative
+        * is doing a dsb(ishst), but that penalises the fastpath.
         */
-       dsb(ishst);
 }
 
 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
+#define kc_vaddr_to_offset(v)  ((v) & ~VA_START)
+#define kc_offset_to_vaddr(o)  ((o) | VA_START)
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h
deleted file mode 100644 (file)
index b7710a5..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Based on arch/arm/include/asm/pmu.h
- *
- * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PMU_H
-#define __ASM_PMU_H
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/* The events for a given PMU register set. */
-struct pmu_hw_events {
-       /*
-        * The events that are active on the PMU for the given index.
-        */
-       struct perf_event       **events;
-
-       /*
-        * A 1 bit for an index indicates that the counter is being used for
-        * an event. A 0 means that the counter can be used.
-        */
-       unsigned long           *used_mask;
-
-       /*
-        * Hardware lock to serialize accesses to PMU registers. Needed for the
-        * read/modify/write sequences.
-        */
-       raw_spinlock_t          pmu_lock;
-};
-
-struct arm_pmu {
-       struct pmu              pmu;
-       cpumask_t               active_irqs;
-       int                     *irq_affinity;
-       const char              *name;
-       irqreturn_t             (*handle_irq)(int irq_num, void *dev);
-       void                    (*enable)(struct hw_perf_event *evt, int idx);
-       void                    (*disable)(struct hw_perf_event *evt, int idx);
-       int                     (*get_event_idx)(struct pmu_hw_events *hw_events,
-                                                struct hw_perf_event *hwc);
-       int                     (*set_event_filter)(struct hw_perf_event *evt,
-                                                   struct perf_event_attr *attr);
-       u32                     (*read_counter)(int idx);
-       void                    (*write_counter)(int idx, u32 val);
-       void                    (*start)(void);
-       void                    (*stop)(void);
-       void                    (*reset)(void *);
-       int                     (*map_event)(struct perf_event *event);
-       int                     num_events;
-       atomic_t                active_events;
-       struct mutex            reserve_mutex;
-       u64                     max_period;
-       struct platform_device  *plat_device;
-       struct pmu_hw_events    *(*get_hw_events)(void);
-};
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
-
-u64 armpmu_event_update(struct perf_event *event,
-                       struct hw_perf_event *hwc,
-                       int idx);
-
-int armpmu_event_set_period(struct perf_event *event,
-                           struct hw_perf_event *hwc,
-                           int idx);
-
-#endif /* CONFIG_HW_PERF_EVENTS */
-#endif /* __ASM_PMU_H */
index 98f32355dc972817eadbe3809ef92aaae9f9cfba..4acb7ca94fcd9c05569f3103ab09097c19ea72d5 100644 (file)
@@ -186,6 +186,6 @@ static inline void spin_lock_prefetch(const void *x)
 
 #endif
 
-void cpu_enable_pan(void);
+void cpu_enable_pan(void *__unused);
 
 #endif /* __ASM_PROCESSOR_H */
index 64d2d4884a9db1e663b1d025eca2a8db3bbdcce4..2eb714c4639f5669b1012aae0c77ca74fdb72881 100644 (file)
@@ -36,17 +36,33 @@ extern __kernel_size_t strnlen(const char *, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMCPY
 extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *, const void *, __kernel_size_t);
+extern void *__memmove(void *, const void *, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMCHR
 extern void *memchr(const void *, int, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMSET
 extern void *memset(void *, int, __kernel_size_t);
+extern void *__memset(void *, int, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMCMP
 extern int memcmp(const void *, const void *, size_t);
 
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#endif
+
 #endif
index a7f3d4b2514d615e191c86feb3da5da9351f173f..d48ab5b41f521c23819c0b3927a9ea0f73db242c 100644 (file)
@@ -22,9 +22,6 @@
 
 #include <asm/opcodes.h>
 
-#define SCTLR_EL1_CP15BEN      (0x1 << 5)
-#define SCTLR_EL1_SED          (0x1 << 8)
-
 /*
  * ARMv8 ARM reserves the following encoding for system registers:
  * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
 #define sys_reg(op0, op1, crn, crm, op2) \
        ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
 
-#define REG_PSTATE_PAN_IMM                     sys_reg(0, 0, 4, 0, 4)
-#define SCTLR_EL1_SPAN                         (1 << 23)
+#define SYS_MIDR_EL1                   sys_reg(3, 0, 0, 0, 0)
+#define SYS_MPIDR_EL1                  sys_reg(3, 0, 0, 0, 5)
+#define SYS_REVIDR_EL1                 sys_reg(3, 0, 0, 0, 6)
+
+#define SYS_ID_PFR0_EL1                        sys_reg(3, 0, 0, 1, 0)
+#define SYS_ID_PFR1_EL1                        sys_reg(3, 0, 0, 1, 1)
+#define SYS_ID_DFR0_EL1                        sys_reg(3, 0, 0, 1, 2)
+#define SYS_ID_MMFR0_EL1               sys_reg(3, 0, 0, 1, 4)
+#define SYS_ID_MMFR1_EL1               sys_reg(3, 0, 0, 1, 5)
+#define SYS_ID_MMFR2_EL1               sys_reg(3, 0, 0, 1, 6)
+#define SYS_ID_MMFR3_EL1               sys_reg(3, 0, 0, 1, 7)
+
+#define SYS_ID_ISAR0_EL1               sys_reg(3, 0, 0, 2, 0)
+#define SYS_ID_ISAR1_EL1               sys_reg(3, 0, 0, 2, 1)
+#define SYS_ID_ISAR2_EL1               sys_reg(3, 0, 0, 2, 2)
+#define SYS_ID_ISAR3_EL1               sys_reg(3, 0, 0, 2, 3)
+#define SYS_ID_ISAR4_EL1               sys_reg(3, 0, 0, 2, 4)
+#define SYS_ID_ISAR5_EL1               sys_reg(3, 0, 0, 2, 5)
+#define SYS_ID_MMFR4_EL1               sys_reg(3, 0, 0, 2, 6)
+
+#define SYS_MVFR0_EL1                  sys_reg(3, 0, 0, 3, 0)
+#define SYS_MVFR1_EL1                  sys_reg(3, 0, 0, 3, 1)
+#define SYS_MVFR2_EL1                  sys_reg(3, 0, 0, 3, 2)
+
+#define SYS_ID_AA64PFR0_EL1            sys_reg(3, 0, 0, 4, 0)
+#define SYS_ID_AA64PFR1_EL1            sys_reg(3, 0, 0, 4, 1)
+
+#define SYS_ID_AA64DFR0_EL1            sys_reg(3, 0, 0, 5, 0)
+#define SYS_ID_AA64DFR1_EL1            sys_reg(3, 0, 0, 5, 1)
+
+#define SYS_ID_AA64ISAR0_EL1           sys_reg(3, 0, 0, 6, 0)
+#define SYS_ID_AA64ISAR1_EL1           sys_reg(3, 0, 0, 6, 1)
+
+#define SYS_ID_AA64MMFR0_EL1           sys_reg(3, 0, 0, 7, 0)
+#define SYS_ID_AA64MMFR1_EL1           sys_reg(3, 0, 0, 7, 1)
+
+#define SYS_CNTFRQ_EL0                 sys_reg(3, 3, 14, 0, 0)
+#define SYS_CTR_EL0                    sys_reg(3, 3, 0, 0, 1)
+#define SYS_DCZID_EL0                  sys_reg(3, 3, 0, 0, 7)
+
+#define REG_PSTATE_PAN_IMM             sys_reg(0, 0, 4, 0, 4)
 
 #define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
                                     (!!x)<<8 | 0x1f)
 
+/* SCTLR_EL1 */
+#define SCTLR_EL1_CP15BEN      (0x1 << 5)
+#define SCTLR_EL1_SED          (0x1 << 8)
+#define SCTLR_EL1_SPAN         (0x1 << 23)
+
+
+/* id_aa64isar0 */
+#define ID_AA64ISAR0_RDM_SHIFT         28
+#define ID_AA64ISAR0_ATOMICS_SHIFT     20
+#define ID_AA64ISAR0_CRC32_SHIFT       16
+#define ID_AA64ISAR0_SHA2_SHIFT                12
+#define ID_AA64ISAR0_SHA1_SHIFT                8
+#define ID_AA64ISAR0_AES_SHIFT         4
+
+/* id_aa64pfr0 */
+#define ID_AA64PFR0_GIC_SHIFT          24
+#define ID_AA64PFR0_ASIMD_SHIFT                20
+#define ID_AA64PFR0_FP_SHIFT           16
+#define ID_AA64PFR0_EL3_SHIFT          12
+#define ID_AA64PFR0_EL2_SHIFT          8
+#define ID_AA64PFR0_EL1_SHIFT          4
+#define ID_AA64PFR0_EL0_SHIFT          0
+
+#define ID_AA64PFR0_FP_NI              0xf
+#define ID_AA64PFR0_FP_SUPPORTED       0x0
+#define ID_AA64PFR0_ASIMD_NI           0xf
+#define ID_AA64PFR0_ASIMD_SUPPORTED    0x0
+#define ID_AA64PFR0_EL1_64BIT_ONLY     0x1
+#define ID_AA64PFR0_EL0_64BIT_ONLY     0x1
+
+/* id_aa64mmfr0 */
+#define ID_AA64MMFR0_TGRAN4_SHIFT      28
+#define ID_AA64MMFR0_TGRAN64_SHIFT     24
+#define ID_AA64MMFR0_TGRAN16_SHIFT     20
+#define ID_AA64MMFR0_BIGENDEL0_SHIFT   16
+#define ID_AA64MMFR0_SNSMEM_SHIFT      12
+#define ID_AA64MMFR0_BIGENDEL_SHIFT    8
+#define ID_AA64MMFR0_ASID_SHIFT                4
+#define ID_AA64MMFR0_PARANGE_SHIFT     0
+
+#define ID_AA64MMFR0_TGRAN4_NI         0xf
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED  0x0
+#define ID_AA64MMFR0_TGRAN64_NI                0xf
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
+#define ID_AA64MMFR0_TGRAN16_NI                0x0
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
+
+/* id_aa64mmfr1 */
+#define ID_AA64MMFR1_PAN_SHIFT         20
+#define ID_AA64MMFR1_LOR_SHIFT         16
+#define ID_AA64MMFR1_HPD_SHIFT         12
+#define ID_AA64MMFR1_VHE_SHIFT         8
+#define ID_AA64MMFR1_VMIDBITS_SHIFT    4
+#define ID_AA64MMFR1_HADBS_SHIFT       0
+
+/* id_aa64dfr0 */
+#define ID_AA64DFR0_CTX_CMPS_SHIFT     28
+#define ID_AA64DFR0_WRPS_SHIFT         20
+#define ID_AA64DFR0_BRPS_SHIFT         12
+#define ID_AA64DFR0_PMUVER_SHIFT       8
+#define ID_AA64DFR0_TRACEVER_SHIFT     4
+#define ID_AA64DFR0_DEBUGVER_SHIFT     0
+
+#define ID_ISAR5_RDM_SHIFT             24
+#define ID_ISAR5_CRC32_SHIFT           16
+#define ID_ISAR5_SHA2_SHIFT            12
+#define ID_ISAR5_SHA1_SHIFT            8
+#define ID_ISAR5_AES_SHIFT             4
+#define ID_ISAR5_SEVL_SHIFT            0
+
+#define MVFR0_FPROUND_SHIFT            28
+#define MVFR0_FPSHVEC_SHIFT            24
+#define MVFR0_FPSQRT_SHIFT             20
+#define MVFR0_FPDIVIDE_SHIFT           16
+#define MVFR0_FPTRAP_SHIFT             12
+#define MVFR0_FPDP_SHIFT               8
+#define MVFR0_FPSP_SHIFT               4
+#define MVFR0_SIMD_SHIFT               0
+
+#define MVFR1_SIMDFMAC_SHIFT           28
+#define MVFR1_FPHP_SHIFT               24
+#define MVFR1_SIMDHP_SHIFT             20
+#define MVFR1_SIMDSP_SHIFT             16
+#define MVFR1_SIMDINT_SHIFT            12
+#define MVFR1_SIMDLS_SHIFT             8
+#define MVFR1_FPDNAN_SHIFT             4
+#define MVFR1_FPFTZ_SHIFT              0
+
+
+#define ID_AA64MMFR0_TGRAN4_SHIFT      28
+#define ID_AA64MMFR0_TGRAN64_SHIFT     24
+#define ID_AA64MMFR0_TGRAN16_SHIFT     20
+
+#define ID_AA64MMFR0_TGRAN4_NI         0xf
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED  0x0
+#define ID_AA64MMFR0_TGRAN64_NI                0xf
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
+#define ID_AA64MMFR0_TGRAN16_NI                0x0
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
+
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT       ID_AA64MMFR0_TGRAN4_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED   ID_AA64MMFR0_TGRAN4_SUPPORTED
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT       ID_AA64MMFR0_TGRAN16_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED   ID_AA64MMFR0_TGRAN16_SUPPORTED
+#elif defined(CONFIG_ARM64_64K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT       ID_AA64MMFR0_TGRAN64_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED   ID_AA64MMFR0_TGRAN64_SUPPORTED
+#endif
+
 #ifdef __ASSEMBLY__
 
        .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
index dcd06d18a42a36a4859c34370819d323f7e171d7..90c7ff233735d7691bf3b34b7075dffd07dbf939 100644 (file)
 
 #include <linux/compiler.h>
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#ifdef CONFIG_ARM64_4K_PAGES
 #define THREAD_SIZE_ORDER      2
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define THREAD_SIZE_ORDER      0
 #endif
 
 #define THREAD_SIZE            16384
@@ -111,7 +113,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_RESTORE_SIGMASK    20
 #define TIF_SINGLESTEP         21
 #define TIF_32BIT              22      /* 32bit process */
-#define TIF_SWITCH_MM          23      /* deferred switch_mm */
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
index d6e6b666038032e0ff472bea05a744f6515f41c0..ffdaea7954bb620daf19aba8b855d4c04b1a33c1 100644 (file)
@@ -37,17 +37,21 @@ static inline void __tlb_remove_table(void *_table)
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       if (tlb->fullmm) {
-               flush_tlb_mm(tlb->mm);
-       } else {
-               struct vm_area_struct vma = { .vm_mm = tlb->mm, };
-               /*
-                * The intermediate page table levels are already handled by
-                * the __(pte|pmd|pud)_free_tlb() functions, so last level
-                * TLBI is sufficient here.
-                */
-               __flush_tlb_range(&vma, tlb->start, tlb->end, true);
-       }
+       struct vm_area_struct vma = { .vm_mm = tlb->mm, };
+
+       /*
+        * The ASID allocator will either invalidate the ASID or mark
+        * it as used.
+        */
+       if (tlb->fullmm)
+               return;
+
+       /*
+        * The intermediate page table levels are already handled by
+        * the __(pte|pmd|pud)_free_tlb() functions, so last level
+        * TLBI is sufficient here.
+        */
+       __flush_tlb_range(&vma, tlb->start, tlb->end, true);
 }
 
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
index 7bd2da021658ad765aa6bc93374f443ad0d7b594..b460ae28e3463db46816f678c5f90988c3f059de 100644 (file)
  *             only require the D-TLB to be invalidated.
  *             - kaddr - Kernel virtual memory address
  */
+static inline void local_flush_tlb_all(void)
+{
+       dsb(nshst);
+       asm("tlbi       vmalle1");
+       dsb(nsh);
+       isb();
+}
+
 static inline void flush_tlb_all(void)
 {
        dsb(ishst);
@@ -73,7 +81,7 @@ static inline void flush_tlb_all(void)
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
 {
-       unsigned long asid = (unsigned long)ASID(mm) << 48;
+       unsigned long asid = ASID(mm) << 48;
 
        dsb(ishst);
        asm("tlbi       aside1is, %0" : : "r" (asid));
@@ -83,8 +91,7 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 static inline void flush_tlb_page(struct vm_area_struct *vma,
                                  unsigned long uaddr)
 {
-       unsigned long addr = uaddr >> 12 |
-               ((unsigned long)ASID(vma->vm_mm) << 48);
+       unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
 
        dsb(ishst);
        asm("tlbi       vale1is, %0" : : "r" (addr));
@@ -101,7 +108,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
                                     unsigned long start, unsigned long end,
                                     bool last_level)
 {
-       unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
+       unsigned long asid = ASID(vma->vm_mm) << 48;
        unsigned long addr;
 
        if ((end - start) > MAX_TLB_RANGE) {
@@ -154,9 +161,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
                                       unsigned long uaddr)
 {
-       unsigned long addr = uaddr >> 12 | ((unsigned long)ASID(mm) << 48);
+       unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
 
-       dsb(ishst);
        asm("tlbi       vae1is, %0" : : "r" (addr));
        dsb(ish);
 }
index 22dc9bc781be60d34f0285c7e7e295fe62e809dd..1b6bda2ff102a0e90ccd94ff1ad49d89c40f32df 100644 (file)
@@ -7,6 +7,8 @@ AFLAGS_head.o           := -DTEXT_OFFSET=$(TEXT_OFFSET)
 CFLAGS_efi-stub.o      := -DTEXT_OFFSET=$(TEXT_OFFSET)
 CFLAGS_armv8_deprecated.o := -I$(src)
 
+KASAN_SANITIZE_efi-stub.o      := n
+
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_insn.o = -pg
 CFLAGS_REMOVE_return_address.o = -pg
@@ -20,6 +22,14 @@ arm64-obj-y          := debug-monitors.o entry.o irq.o fpsimd.o              \
                           cpufeature.o alternative.o cacheinfo.o               \
                           smp.o smp_spin_table.o topology.o
 
+stub-obj                               := efi-stub.o efi-entry.o
+extra-y                                        := $(stub-obj)
+stub-obj                               := $(patsubst %.o,%.stub.o,$(stub-obj))
+
+OBJCOPYFLAGS := --prefix-symbols=__efistub_
+$(obj)/%.stub.o: $(obj)/%.o FORCE
+       $(call if_changed,objcopy)
+
 arm64-obj-$(CONFIG_COMPAT)             += sys32.o kuser32.o signal32.o         \
                                           sys_compat.o entry32.o               \
                                           ../../arm/kernel/opcodes.o
@@ -32,7 +42,7 @@ arm64-obj-$(CONFIG_CPU_PM)            += sleep.o suspend.o
 arm64-obj-$(CONFIG_CPU_IDLE)           += cpuidle.o
 arm64-obj-$(CONFIG_JUMP_LABEL)         += jump_label.o
 arm64-obj-$(CONFIG_KGDB)               += kgdb.o
-arm64-obj-$(CONFIG_EFI)                        += efi.o efi-stub.o efi-entry.o
+arm64-obj-$(CONFIG_EFI)                        += efi.o $(stub-obj)
 arm64-obj-$(CONFIG_PCI)                        += pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)   += armv8_deprecated.o
 arm64-obj-$(CONFIG_ACPI)               += acpi.o
@@ -40,7 +50,7 @@ arm64-obj-$(CONFIG_ACPI)              += acpi.o
 obj-y                                  += $(arm64-obj-y) vdso/
 obj-m                                  += $(arm64-obj-m)
 head-y                                 := head.o
-extra-y                                        := $(head-y) vmlinux.lds
+extra-y                                        += $(head-y) vmlinux.lds
 
 # vDSO - this must be built first to generate the symbol offsets
 $(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h
index a85843ddbde8892e456f29636fed7d7a66b03825..3b6d8cc9dfe00ce14b764a3102ad0cd73f546c82 100644 (file)
@@ -51,6 +51,9 @@ EXPORT_SYMBOL(strnlen);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
 EXPORT_SYMBOL(memchr);
 EXPORT_SYMBOL(memcmp);
 
index 8d89cf8dae5556851365e2cee335599b4d8eb359..25de8b244961312c4b55c02ffdc1e91e0e765b6c 100644 (file)
@@ -60,7 +60,7 @@ int main(void)
   DEFINE(S_SYSCALLNO,          offsetof(struct pt_regs, syscallno));
   DEFINE(S_FRAME_SIZE,         sizeof(struct pt_regs));
   BLANK();
-  DEFINE(MM_CONTEXT_ID,                offsetof(struct mm_struct, context.id));
+  DEFINE(MM_CONTEXT_ID,                offsetof(struct mm_struct, context.id.counter));
   BLANK();
   DEFINE(VMA_VM_MM,            offsetof(struct vm_area_struct, vm_mm));
   DEFINE(VMA_VM_FLAGS,         offsetof(struct vm_area_struct, vm_flags));
index 6ffd914385609d0aab875813e4e9e8b690976421..09eab326ef9378e496295e5c60afdebc790abfa5 100644 (file)
@@ -88,5 +88,5 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 
 void check_local_cpu_errata(void)
 {
-       check_cpu_capabilities(arm64_errata, "enabling workaround for");
+       update_cpu_capabilities(arm64_errata, "enabling workaround for");
 }
index 3c9aed32f70b2113773f671053a0f6ac87632b4a..d0d607452e1d8cbb684c74289ba4f60f76df666d 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#define pr_fmt(fmt) "alternatives: " fmt
+#define pr_fmt(fmt) "CPU features: " fmt
 
+#include <linux/bsearch.h>
+#include <linux/sort.h>
 #include <linux/types.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
+#include <asm/cpu_ops.h>
 #include <asm/processor.h>
+#include <asm/sysreg.h>
+
+unsigned long elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT       \
+                               (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+                                COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+                                COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+                                COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+                                COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
+                                COMPAT_HWCAP_LPAE)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+unsigned int compat_elf_hwcap2 __read_mostly;
+#endif
+
+DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+
+#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+       {                                               \
+               .strict = STRICT,                       \
+               .type = TYPE,                           \
+               .shift = SHIFT,                         \
+               .width = WIDTH,                         \
+               .safe_val = SAFE_VAL,                   \
+       }
+
+#define ARM64_FTR_END                                  \
+       {                                               \
+               .width = 0,                             \
+       }
+
+static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+       /* Linux doesn't care about the EL3 */
+       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
+       /* Linux shouldn't care about secure memory */
+       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
+       /*
+        * Differing PARange is fine as long as all peripherals and memory are mapped
+        * within the minimum PARange of all CPUs
+        */
+       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_ctr[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),        /* RAO */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),  /* CWG */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),   /* ERG */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1),   /* DminLine */
+       /*
+        * Linux can handle differing I-cache policies. Userspace JITs will
+        * make use of *minLine
+        */
+       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0),     /* L1Ip */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0),        /* RAZ */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),    /* IminLine */
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_id_mmfr0[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),        /* InnerShr */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),        /* FCSE */
+       ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),        /* AuxReg */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0),        /* TCM */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0),        /* ShareLvl */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* OuterShr */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_mvfr2[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0),        /* RAZ */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* FPMisc */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* SIMDMisc */
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_dczid[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0),        /* RAZ */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1),         /* DZP */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),    /* BS */
+       ARM64_FTR_END,
+};
+
+
+static struct arm64_ftr_bits ftr_id_isar5[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0),        /* RAZ */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_id_mmfr4[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0),        /* RAZ */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* ac2 */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* RAZ */
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_id_pfr0[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0),       /* RAZ */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0),        /* State3 */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0),         /* State2 */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* State1 */
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* State0 */
+       ARM64_FTR_END,
+};
+
+/*
+ * Common ftr bits for a 32bit register with all hidden, strict
+ * attributes, with 4bit feature fields and a default safe value of
+ * 0. Covers the following 32bit registers:
+ * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
+ */
+static struct arm64_ftr_bits ftr_generic_32bits[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
+       ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_generic[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_generic32[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
+       ARM64_FTR_END,
+};
+
+static struct arm64_ftr_bits ftr_aa64raz[] = {
+       ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
+       ARM64_FTR_END,
+};
+
+#define ARM64_FTR_REG(id, table)               \
+       {                                       \
+               .sys_id = id,                   \
+               .name = #id,                    \
+               .ftr_bits = &((table)[0]),      \
+       }
+
+static struct arm64_ftr_reg arm64_ftr_regs[] = {
+
+       /* Op1 = 0, CRn = 0, CRm = 1 */
+       ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
+       ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
+       ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
+
+       /* Op1 = 0, CRn = 0, CRm = 2 */
+       ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
+       ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
+
+       /* Op1 = 0, CRn = 0, CRm = 3 */
+       ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
+       ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
+
+       /* Op1 = 0, CRn = 0, CRm = 4 */
+       ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
+       ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
+
+       /* Op1 = 0, CRn = 0, CRm = 5 */
+       ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
+       ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
+
+       /* Op1 = 0, CRn = 0, CRm = 6 */
+       ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
+       ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
+
+       /* Op1 = 0, CRn = 0, CRm = 7 */
+       ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
+       ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+
+       /* Op1 = 3, CRn = 0, CRm = 0 */
+       ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
+       ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
+
+       /* Op1 = 3, CRn = 14, CRm = 0 */
+       ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
+};
+
+static int search_cmp_ftr_reg(const void *id, const void *regp)
+{
+       return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id;
+}
+
+/*
+ * get_arm64_ftr_reg - Lookup a feature register entry using its
+ * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
+ * ascending order of sys_id , we use binary search to find a matching
+ * entry.
+ *
+ * returns - Upon success,  matching ftr_reg entry for id.
+ *         - NULL on failure. It is upto the caller to decide
+ *          the impact of a failure.
+ */
+static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
+{
+       return bsearch((const void *)(unsigned long)sys_id,
+                       arm64_ftr_regs,
+                       ARRAY_SIZE(arm64_ftr_regs),
+                       sizeof(arm64_ftr_regs[0]),
+                       search_cmp_ftr_reg);
+}
+
+static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val)
+{
+       u64 mask = arm64_ftr_mask(ftrp);
+
+       reg &= ~mask;
+       reg |= (ftr_val << ftrp->shift) & mask;
+       return reg;
+}
+
+static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
+{
+       s64 ret = 0;
+
+       switch (ftrp->type) {
+       case FTR_EXACT:
+               ret = ftrp->safe_val;
+               break;
+       case FTR_LOWER_SAFE:
+               ret = new < cur ? new : cur;
+               break;
+       case FTR_HIGHER_SAFE:
+               ret = new > cur ? new : cur;
+               break;
+       default:
+               BUG();
+       }
+
+       return ret;
+}
+
+static int __init sort_cmp_ftr_regs(const void *a, const void *b)
+{
+       return ((const struct arm64_ftr_reg *)a)->sys_id -
+                ((const struct arm64_ftr_reg *)b)->sys_id;
+}
+
+static void __init swap_ftr_regs(void *a, void *b, int size)
+{
+       struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a;
+       *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b;
+       *(struct arm64_ftr_reg *)b = tmp;
+}
+
+static void __init sort_ftr_regs(void)
+{
+       /* Keep the array sorted so that we can do the binary search */
+       sort(arm64_ftr_regs,
+               ARRAY_SIZE(arm64_ftr_regs),
+               sizeof(arm64_ftr_regs[0]),
+               sort_cmp_ftr_regs,
+               swap_ftr_regs);
+}
+
+/*
+ * Initialise the CPU feature register from Boot CPU values.
+ * Also initiliases the strict_mask for the register.
+ */
+static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+{
+       u64 val = 0;
+       u64 strict_mask = ~0x0ULL;
+       struct arm64_ftr_bits *ftrp;
+       struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
+
+       BUG_ON(!reg);
+
+       for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
+               s64 ftr_new = arm64_ftr_value(ftrp, new);
+
+               val = arm64_ftr_set_value(ftrp, val, ftr_new);
+               if (!ftrp->strict)
+                       strict_mask &= ~arm64_ftr_mask(ftrp);
+       }
+       reg->sys_val = val;
+       reg->strict_mask = strict_mask;
+}
+
+void __init init_cpu_features(struct cpuinfo_arm64 *info)
+{
+       /* Before we start using the tables, make sure it is sorted */
+       sort_ftr_regs();
+
+       init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
+       init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
+       init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
+       init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
+       init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
+       init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
+       init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+       init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
+       init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
+       init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+       init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+       init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+       init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+       init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+       init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+       init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+       init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+       init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+       init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+       init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+       init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+       init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+       init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+       init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+       init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+}
+
+static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
+{
+       struct arm64_ftr_bits *ftrp;
+
+       for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
+               s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
+               s64 ftr_new = arm64_ftr_value(ftrp, new);
+
+               if (ftr_cur == ftr_new)
+                       continue;
+               /* Find a safe value */
+               ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
+               reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
+       }
+
+}
+
+static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
+{
+       struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
+
+       BUG_ON(!regp);
+       update_cpu_ftr_reg(regp, val);
+       if ((boot & regp->strict_mask) == (val & regp->strict_mask))
+               return 0;
+       pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
+                       regp->name, boot, cpu, val);
+       return 1;
+}
+
+/*
+ * Update system wide CPU feature registers with the values from a
+ * non-boot CPU. Also performs SANITY checks to make sure that there
+ * aren't any insane variations from that of the boot CPU.
+ */
+void update_cpu_features(int cpu,
+                        struct cpuinfo_arm64 *info,
+                        struct cpuinfo_arm64 *boot)
+{
+       int taint = 0;
+
+       /*
+        * The kernel can handle differing I-cache policies, but otherwise
+        * caches should look identical. Userspace JITs will make use of
+        * *minLine.
+        */
+       taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
+                                     info->reg_ctr, boot->reg_ctr);
+
+       /*
+        * Userspace may perform DC ZVA instructions. Mismatched block sizes
+        * could result in too much or too little memory being zeroed if a
+        * process is preempted and migrated between CPUs.
+        */
+       taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
+                                     info->reg_dczid, boot->reg_dczid);
+
+       /* If different, timekeeping will be broken (especially with KVM) */
+       taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
+                                     info->reg_cntfrq, boot->reg_cntfrq);
+
+       /*
+        * The kernel uses self-hosted debug features and expects CPUs to
+        * support identical debug features. We presently need CTX_CMPs, WRPs,
+        * and BRPs to be identical.
+        * ID_AA64DFR1 is currently RES0.
+        */
+       taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
+                                     info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
+       taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
+                                     info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
+       /*
+        * Even in big.LITTLE, processors should be identical instruction-set
+        * wise.
+        */
+       taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
+                                     info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
+       taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
+                                     info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
+
+       /*
+        * Differing PARange support is fine as long as all peripherals and
+        * memory are mapped within the minimum PARange of all CPUs.
+        * Linux should not care about secure memory.
+        */
+       taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
+                                     info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
+       taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
+                                     info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
+
+       /*
+        * EL3 is not our concern.
+        * ID_AA64PFR1 is currently RES0.
+        */
+       taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
+                                     info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
+       taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
+                                     info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
+
+       /*
+        * If we have AArch32, we care about 32-bit features for compat. These
+        * registers should be RES0 otherwise.
+        */
+       taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
+                                       info->reg_id_dfr0, boot->reg_id_dfr0);
+       taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
+                                       info->reg_id_isar0, boot->reg_id_isar0);
+       taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
+                                       info->reg_id_isar1, boot->reg_id_isar1);
+       taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
+                                       info->reg_id_isar2, boot->reg_id_isar2);
+       taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
+                                       info->reg_id_isar3, boot->reg_id_isar3);
+       taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
+                                       info->reg_id_isar4, boot->reg_id_isar4);
+       taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
+                                       info->reg_id_isar5, boot->reg_id_isar5);
+
+       /*
+        * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
+        * ACTLR formats could differ across CPUs and therefore would have to
+        * be trapped for virtualization anyway.
+        */
+       taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
+                                       info->reg_id_mmfr0, boot->reg_id_mmfr0);
+       taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
+                                       info->reg_id_mmfr1, boot->reg_id_mmfr1);
+       taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
+                                       info->reg_id_mmfr2, boot->reg_id_mmfr2);
+       taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
+                                       info->reg_id_mmfr3, boot->reg_id_mmfr3);
+       taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
+                                       info->reg_id_pfr0, boot->reg_id_pfr0);
+       taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
+                                       info->reg_id_pfr1, boot->reg_id_pfr1);
+       taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
+                                       info->reg_mvfr0, boot->reg_mvfr0);
+       taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
+                                       info->reg_mvfr1, boot->reg_mvfr1);
+       taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
+                                       info->reg_mvfr2, boot->reg_mvfr2);
+
+       /*
+        * Mismatched CPU features are a recipe for disaster. Don't even
+        * pretend to support them.
+        */
+       WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
+                       "Unsupported CPU feature variation.\n");
+}
+
+u64 read_system_reg(u32 id)
+{
+       struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
+
+       /* We shouldn't get a request for an unsupported register */
+       BUG_ON(!regp);
+       return regp->sys_val;
+}
 
 static bool
 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
@@ -31,34 +586,31 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
        return val >= entry->min_field_value;
 }
 
-#define __ID_FEAT_CHK(reg)                                             \
-static bool __maybe_unused                                             \
-has_##reg##_feature(const struct arm64_cpu_capabilities *entry)                \
-{                                                                      \
-       u64 val;                                                        \
-                                                                       \
-       val = read_cpuid(reg##_el1);                                    \
-       return feature_matches(val, entry);                             \
-}
+static bool
+has_cpuid_feature(const struct arm64_cpu_capabilities *entry)
+{
+       u64 val;
 
-__ID_FEAT_CHK(id_aa64pfr0);
-__ID_FEAT_CHK(id_aa64mmfr1);
-__ID_FEAT_CHK(id_aa64isar0);
+       val = read_system_reg(entry->sys_reg);
+       return feature_matches(val, entry);
+}
 
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
                .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
-               .matches = has_id_aa64pfr0_feature,
-               .field_pos = 24,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64PFR0_EL1,
+               .field_pos = ID_AA64PFR0_GIC_SHIFT,
                .min_field_value = 1,
        },
 #ifdef CONFIG_ARM64_PAN
        {
                .desc = "Privileged Access Never",
                .capability = ARM64_HAS_PAN,
-               .matches = has_id_aa64mmfr1_feature,
-               .field_pos = 20,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64MMFR1_EL1,
+               .field_pos = ID_AA64MMFR1_PAN_SHIFT,
                .min_field_value = 1,
                .enable = cpu_enable_pan,
        },
@@ -67,15 +619,101 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "LSE atomic instructions",
                .capability = ARM64_HAS_LSE_ATOMICS,
-               .matches = has_id_aa64isar0_feature,
-               .field_pos = 20,
+               .matches = has_cpuid_feature,
+               .sys_reg = SYS_ID_AA64ISAR0_EL1,
+               .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
                .min_field_value = 2,
        },
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
        {},
 };
 
-void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+#define HWCAP_CAP(reg, field, min_value, type, cap)            \
+       {                                                       \
+               .desc = #cap,                                   \
+               .matches = has_cpuid_feature,                   \
+               .sys_reg = reg,                                 \
+               .field_pos = field,                             \
+               .min_field_value = min_value,                   \
+               .hwcap_type = type,                             \
+               .hwcap = cap,                                   \
+       }
+
+static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 2, CAP_HWCAP, HWCAP_PMULL),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 1, CAP_HWCAP, HWCAP_AES),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 1, CAP_HWCAP, HWCAP_SHA1),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 1, CAP_HWCAP, HWCAP_SHA2),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 1, CAP_HWCAP, HWCAP_CRC32),
+       HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 2, CAP_HWCAP, HWCAP_ATOMICS),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 0, CAP_HWCAP, HWCAP_FP),
+       HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 0, CAP_HWCAP, HWCAP_ASIMD),
+#ifdef CONFIG_COMPAT
+       HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
+       HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
+       HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
+       HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
+       HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
+#endif
+       {},
+};
+
+static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+{
+       switch (cap->hwcap_type) {
+       case CAP_HWCAP:
+               elf_hwcap |= cap->hwcap;
+               break;
+#ifdef CONFIG_COMPAT
+       case CAP_COMPAT_HWCAP:
+               compat_elf_hwcap |= (u32)cap->hwcap;
+               break;
+       case CAP_COMPAT_HWCAP2:
+               compat_elf_hwcap2 |= (u32)cap->hwcap;
+               break;
+#endif
+       default:
+               WARN_ON(1);
+               break;
+       }
+}
+
+/* Check if we have a particular HWCAP enabled */
+static bool cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
+{
+       bool rc;
+
+       switch (cap->hwcap_type) {
+       case CAP_HWCAP:
+               rc = (elf_hwcap & cap->hwcap) != 0;
+               break;
+#ifdef CONFIG_COMPAT
+       case CAP_COMPAT_HWCAP:
+               rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
+               break;
+       case CAP_COMPAT_HWCAP2:
+               rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
+               break;
+#endif
+       default:
+               WARN_ON(1);
+               rc = false;
+       }
+
+       return rc;
+}
+
+static void setup_cpu_hwcaps(void)
+{
+       int i;
+       const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
+
+       for (i = 0; hwcaps[i].desc; i++)
+               if (hwcaps[i].matches(&hwcaps[i]))
+                       cap_set_hwcap(&hwcaps[i]);
+}
+
+void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
                            const char *info)
 {
        int i;
@@ -88,15 +726,178 @@ void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
                        pr_info("%s %s\n", info, caps[i].desc);
                cpus_set_cap(caps[i].capability);
        }
+}
+
+/*
+ * Run through the enabled capabilities and enable() it on all active
+ * CPUs
+ */
+void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+{
+       int i;
+
+       for (i = 0; caps[i].desc; i++)
+               if (caps[i].enable && cpus_have_cap(caps[i].capability))
+                       on_each_cpu(caps[i].enable, NULL, true);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Flag to indicate if we have computed the system wide
+ * capabilities based on the boot time active CPUs. This
+ * will be used to determine if a new booting CPU should
+ * go through the verification process to make sure that it
+ * supports the system capabilities, without using a hotplug
+ * notifier.
+ */
+static bool sys_caps_initialised;
+
+static inline void set_sys_caps_initialised(void)
+{
+       sys_caps_initialised = true;
+}
+
+/*
+ * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
+ */
+static u64 __raw_read_system_reg(u32 sys_id)
+{
+       switch (sys_id) {
+       case SYS_ID_PFR0_EL1:           return (u64)read_cpuid(ID_PFR0_EL1);
+       case SYS_ID_PFR1_EL1:           return (u64)read_cpuid(ID_PFR1_EL1);
+       case SYS_ID_DFR0_EL1:           return (u64)read_cpuid(ID_DFR0_EL1);
+       case SYS_ID_MMFR0_EL1:          return (u64)read_cpuid(ID_MMFR0_EL1);
+       case SYS_ID_MMFR1_EL1:          return (u64)read_cpuid(ID_MMFR1_EL1);
+       case SYS_ID_MMFR2_EL1:          return (u64)read_cpuid(ID_MMFR2_EL1);
+       case SYS_ID_MMFR3_EL1:          return (u64)read_cpuid(ID_MMFR3_EL1);
+       case SYS_ID_ISAR0_EL1:          return (u64)read_cpuid(ID_ISAR0_EL1);
+       case SYS_ID_ISAR1_EL1:          return (u64)read_cpuid(ID_ISAR1_EL1);
+       case SYS_ID_ISAR2_EL1:          return (u64)read_cpuid(ID_ISAR2_EL1);
+       case SYS_ID_ISAR3_EL1:          return (u64)read_cpuid(ID_ISAR3_EL1);
+       case SYS_ID_ISAR4_EL1:          return (u64)read_cpuid(ID_ISAR4_EL1);
+       case SYS_ID_ISAR5_EL1:          return (u64)read_cpuid(ID_ISAR4_EL1);
+       case SYS_MVFR0_EL1:             return (u64)read_cpuid(MVFR0_EL1);
+       case SYS_MVFR1_EL1:             return (u64)read_cpuid(MVFR1_EL1);
+       case SYS_MVFR2_EL1:             return (u64)read_cpuid(MVFR2_EL1);
+
+       case SYS_ID_AA64PFR0_EL1:       return (u64)read_cpuid(ID_AA64PFR0_EL1);
+       case SYS_ID_AA64PFR1_EL1:       return (u64)read_cpuid(ID_AA64PFR0_EL1);
+       case SYS_ID_AA64DFR0_EL1:       return (u64)read_cpuid(ID_AA64DFR0_EL1);
+       case SYS_ID_AA64DFR1_EL1:       return (u64)read_cpuid(ID_AA64DFR0_EL1);
+       case SYS_ID_AA64MMFR0_EL1:      return (u64)read_cpuid(ID_AA64MMFR0_EL1);
+       case SYS_ID_AA64MMFR1_EL1:      return (u64)read_cpuid(ID_AA64MMFR1_EL1);
+       case SYS_ID_AA64ISAR0_EL1:      return (u64)read_cpuid(ID_AA64ISAR0_EL1);
+       case SYS_ID_AA64ISAR1_EL1:      return (u64)read_cpuid(ID_AA64ISAR1_EL1);
+
+       case SYS_CNTFRQ_EL0:            return (u64)read_cpuid(CNTFRQ_EL0);
+       case SYS_CTR_EL0:               return (u64)read_cpuid(CTR_EL0);
+       case SYS_DCZID_EL0:             return (u64)read_cpuid(DCZID_EL0);
+       default:
+               BUG();
+               return 0;
+       }
+}
+
+/*
+ * Park the CPU which doesn't have the capability as advertised
+ * by the system.
+ */
+static void fail_incapable_cpu(char *cap_type,
+                                const struct arm64_cpu_capabilities *cap)
+{
+       int cpu = smp_processor_id();
+
+       pr_crit("CPU%d: missing %s : %s\n", cpu, cap_type, cap->desc);
+       /* Mark this CPU absent */
+       set_cpu_present(cpu, 0);
+
+       /* Check if we can park ourselves */
+       if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
+               cpu_ops[cpu]->cpu_die(cpu);
+       asm(
+       "1:     wfe\n"
+       "       wfi\n"
+       "       b       1b");
+}
 
-       /* second pass allows enable() to consider interacting capabilities */
+/*
+ * Run through the enabled system capabilities and enable() it on this CPU.
+ * The capabilities were decided based on the available CPUs at the boot time.
+ * Any new CPU should match the system wide status of the capability. If the
+ * new CPU doesn't have a capability which the system now has enabled, we
+ * cannot do anything to fix it up and could cause unexpected failures. So
+ * we park the CPU.
+ */
+void verify_local_cpu_capabilities(void)
+{
+       int i;
+       const struct arm64_cpu_capabilities *caps;
+
+       /*
+        * If we haven't computed the system capabilities, there is nothing
+        * to verify.
+        */
+       if (!sys_caps_initialised)
+               return;
+
+       caps = arm64_features;
        for (i = 0; caps[i].desc; i++) {
-               if (cpus_have_cap(caps[i].capability) && caps[i].enable)
-                       caps[i].enable();
+               if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
+                       continue;
+               /*
+                * If the new CPU misses an advertised feature, we cannot proceed
+                * further, park the cpu.
+                */
+               if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
+                       fail_incapable_cpu("arm64_features", &caps[i]);
+               if (caps[i].enable)
+                       caps[i].enable(NULL);
        }
+
+       for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
+               if (!cpus_have_hwcap(&caps[i]))
+                       continue;
+               if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
+                       fail_incapable_cpu("arm64_hwcaps", &caps[i]);
+       }
+}
+
+#else  /* !CONFIG_HOTPLUG_CPU */
+
+static inline void set_sys_caps_initialised(void)
+{
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static void setup_feature_capabilities(void)
+{
+       update_cpu_capabilities(arm64_features, "detected feature:");
+       enable_cpu_capabilities(arm64_features);
 }
 
-void check_local_cpu_features(void)
+void __init setup_cpu_features(void)
 {
-       check_cpu_capabilities(arm64_features, "detected feature:");
+       u32 cwg;
+       int cls;
+
+       /* Set the CPU feature capabilies */
+       setup_feature_capabilities();
+       setup_cpu_hwcaps();
+
+       /* Advertise that we have computed the system capabilities */
+       set_sys_caps_initialised();
+
+       /*
+        * Check for sane CTR_EL0.CWG value.
+        */
+       cwg = cache_type_cwg();
+       cls = cache_line_size();
+       if (!cwg)
+               pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
+                       cls);
+       if (L1_CACHE_BYTES < cls)
+               pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
+                       L1_CACHE_BYTES, cls);
 }
index 75d5a867e7fb1420172ef5e7cb281c9d9aa1f206..706679d0a0b4227c4a7267cd85142192576d7ab3 100644 (file)
 #include <linux/bug.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/personality.h>
 #include <linux/preempt.h>
 #include <linux/printk.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
 #include <linux/smp.h>
 
 /*
@@ -35,7 +38,6 @@
  */
 DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
 static struct cpuinfo_arm64 boot_cpu_data;
-static bool mixed_endian_el0 = true;
 
 static char *icache_policy_str[] = {
        [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
@@ -46,157 +48,148 @@ static char *icache_policy_str[] = {
 
 unsigned long __icache_flags;
 
-static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
+static const char *const hwcap_str[] = {
+       "fp",
+       "asimd",
+       "evtstrm",
+       "aes",
+       "pmull",
+       "sha1",
+       "sha2",
+       "crc32",
+       "atomics",
+       NULL
+};
+
+#ifdef CONFIG_COMPAT
+static const char *const compat_hwcap_str[] = {
+       "swp",
+       "half",
+       "thumb",
+       "26bit",
+       "fastmult",
+       "fpa",
+       "vfp",
+       "edsp",
+       "java",
+       "iwmmxt",
+       "crunch",
+       "thumbee",
+       "neon",
+       "vfpv3",
+       "vfpv3d16",
+       "tls",
+       "vfpv4",
+       "idiva",
+       "idivt",
+       "vfpd32",
+       "lpae",
+       "evtstrm"
+};
+
+static const char *const compat_hwcap2_str[] = {
+       "aes",
+       "pmull",
+       "sha1",
+       "sha2",
+       "crc32",
+       NULL
+};
+#endif /* CONFIG_COMPAT */
+
+static int c_show(struct seq_file *m, void *v)
 {
-       unsigned int cpu = smp_processor_id();
-       u32 l1ip = CTR_L1IP(info->reg_ctr);
+       int i, j;
+
+       for_each_online_cpu(i) {
+               struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+               u32 midr = cpuinfo->reg_midr;
 
-       if (l1ip != ICACHE_POLICY_PIPT) {
                /*
-                * VIPT caches are non-aliasing if the VA always equals the PA
-                * in all bit positions that are covered by the index. This is
-                * the case if the size of a way (# of sets * line size) does
-                * not exceed PAGE_SIZE.
+                * glibc reads /proc/cpuinfo to determine the number of
+                * online processors, looking for lines beginning with
+                * "processor".  Give glibc what it expects.
                 */
-               u32 waysize = icache_get_numsets() * icache_get_linesize();
+               seq_printf(m, "processor\t: %d\n", i);
 
-               if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
-                       set_bit(ICACHEF_ALIASING, &__icache_flags);
+               /*
+                * Dump out the common processor features in a single line.
+                * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+                * rather than attempting to parse this, but there's a body of
+                * software which does already (at least for 32-bit).
+                */
+               seq_puts(m, "Features\t:");
+               if (personality(current->personality) == PER_LINUX32) {
+#ifdef CONFIG_COMPAT
+                       for (j = 0; compat_hwcap_str[j]; j++)
+                               if (compat_elf_hwcap & (1 << j))
+                                       seq_printf(m, " %s", compat_hwcap_str[j]);
+
+                       for (j = 0; compat_hwcap2_str[j]; j++)
+                               if (compat_elf_hwcap2 & (1 << j))
+                                       seq_printf(m, " %s", compat_hwcap2_str[j]);
+#endif /* CONFIG_COMPAT */
+               } else {
+                       for (j = 0; hwcap_str[j]; j++)
+                               if (elf_hwcap & (1 << j))
+                                       seq_printf(m, " %s", hwcap_str[j]);
+               }
+               seq_puts(m, "\n");
+
+               seq_printf(m, "CPU implementer\t: 0x%02x\n",
+                          MIDR_IMPLEMENTOR(midr));
+               seq_printf(m, "CPU architecture: 8\n");
+               seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
+               seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
+               seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
        }
-       if (l1ip == ICACHE_POLICY_AIVIVT)
-               set_bit(ICACHEF_AIVIVT, &__icache_flags);
 
-       pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
-}
-
-bool cpu_supports_mixed_endian_el0(void)
-{
-       return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
-}
-
-bool system_supports_mixed_endian_el0(void)
-{
-       return mixed_endian_el0;
+       return 0;
 }
 
-static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
+static void *c_start(struct seq_file *m, loff_t *pos)
 {
-       mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
+       return *pos < 1 ? (void *)1 : NULL;
 }
 
-static void update_cpu_features(struct cpuinfo_arm64 *info)
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
 {
-       update_mixed_endian_el0_support(info);
+       ++*pos;
+       return NULL;
 }
 
-static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
+static void c_stop(struct seq_file *m, void *v)
 {
-       if ((boot & mask) == (cur & mask))
-               return 0;
-
-       pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
-               name, (unsigned long)boot, cpu, (unsigned long)cur);
-
-       return 1;
 }
 
-#define CHECK_MASK(field, mask, boot, cur, cpu) \
-       check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
-
-#define CHECK(field, boot, cur, cpu) \
-       CHECK_MASK(field, ~0ULL, boot, cur, cpu)
+const struct seq_operations cpuinfo_op = {
+       .start  = c_start,
+       .next   = c_next,
+       .stop   = c_stop,
+       .show   = c_show
+};
 
-/*
- * Verify that CPUs don't have unexpected differences that will cause problems.
- */
-static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
+static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
 {
        unsigned int cpu = smp_processor_id();
-       struct cpuinfo_arm64 *boot = &boot_cpu_data;
-       unsigned int diff = 0;
-
-       /*
-        * The kernel can handle differing I-cache policies, but otherwise
-        * caches should look identical. Userspace JITs will make use of
-        * *minLine.
-        */
-       diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
-
-       /*
-        * Userspace may perform DC ZVA instructions. Mismatched block sizes
-        * could result in too much or too little memory being zeroed if a
-        * process is preempted and migrated between CPUs.
-        */
-       diff |= CHECK(dczid, boot, cur, cpu);
-
-       /* If different, timekeeping will be broken (especially with KVM) */
-       diff |= CHECK(cntfrq, boot, cur, cpu);
-
-       /*
-        * The kernel uses self-hosted debug features and expects CPUs to
-        * support identical debug features. We presently need CTX_CMPs, WRPs,
-        * and BRPs to be identical.
-        * ID_AA64DFR1 is currently RES0.
-        */
-       diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
-       diff |= CHECK(id_aa64dfr1, boot, cur, cpu);
-
-       /*
-        * Even in big.LITTLE, processors should be identical instruction-set
-        * wise.
-        */
-       diff |= CHECK(id_aa64isar0, boot, cur, cpu);
-       diff |= CHECK(id_aa64isar1, boot, cur, cpu);
-
-       /*
-        * Differing PARange support is fine as long as all peripherals and
-        * memory are mapped within the minimum PARange of all CPUs.
-        * Linux should not care about secure memory.
-        * ID_AA64MMFR1 is currently RES0.
-        */
-       diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
-       diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
-
-       /*
-        * EL3 is not our concern.
-        * ID_AA64PFR1 is currently RES0.
-        */
-       diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
-       diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
+       u32 l1ip = CTR_L1IP(info->reg_ctr);
 
-       /*
-        * If we have AArch32, we care about 32-bit features for compat. These
-        * registers should be RES0 otherwise.
-        */
-       diff |= CHECK(id_dfr0, boot, cur, cpu);
-       diff |= CHECK(id_isar0, boot, cur, cpu);
-       diff |= CHECK(id_isar1, boot, cur, cpu);
-       diff |= CHECK(id_isar2, boot, cur, cpu);
-       diff |= CHECK(id_isar3, boot, cur, cpu);
-       diff |= CHECK(id_isar4, boot, cur, cpu);
-       diff |= CHECK(id_isar5, boot, cur, cpu);
-       /*
-        * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
-        * ACTLR formats could differ across CPUs and therefore would have to
-        * be trapped for virtualization anyway.
-        */
-       diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
-       diff |= CHECK(id_mmfr1, boot, cur, cpu);
-       diff |= CHECK(id_mmfr2, boot, cur, cpu);
-       diff |= CHECK(id_mmfr3, boot, cur, cpu);
-       diff |= CHECK(id_pfr0, boot, cur, cpu);
-       diff |= CHECK(id_pfr1, boot, cur, cpu);
+       if (l1ip != ICACHE_POLICY_PIPT) {
+               /*
+                * VIPT caches are non-aliasing if the VA always equals the PA
+                * in all bit positions that are covered by the index. This is
+                * the case if the size of a way (# of sets * line size) does
+                * not exceed PAGE_SIZE.
+                */
+               u32 waysize = icache_get_numsets() * icache_get_linesize();
 
-       diff |= CHECK(mvfr0, boot, cur, cpu);
-       diff |= CHECK(mvfr1, boot, cur, cpu);
-       diff |= CHECK(mvfr2, boot, cur, cpu);
+               if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
+                       set_bit(ICACHEF_ALIASING, &__icache_flags);
+       }
+       if (l1ip == ICACHE_POLICY_AIVIVT)
+               set_bit(ICACHEF_AIVIVT, &__icache_flags);
 
-       /*
-        * Mismatched CPU features are a recipe for disaster. Don't even
-        * pretend to support them.
-        */
-       WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
-                       "Unsupported CPU feature variation.\n");
+       pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
 }
 
 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
@@ -236,15 +229,13 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
        cpuinfo_detect_icache_policy(info);
 
        check_local_cpu_errata();
-       check_local_cpu_features();
-       update_cpu_features(info);
 }
 
 void cpuinfo_store_cpu(void)
 {
        struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
        __cpuinfo_store_cpu(info);
-       cpuinfo_sanity_check(info);
+       update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
 }
 
 void __init cpuinfo_store_boot_cpu(void)
@@ -253,4 +244,5 @@ void __init cpuinfo_store_boot_cpu(void)
        __cpuinfo_store_cpu(info);
 
        boot_cpu_data = *info;
+       init_cpu_features(&boot_cpu_data);
 }
index cebf78661a553775003bfee8ec89f65e33e3ec55..1429044b143c50cb09eaecf1d7332373b6fc8618 100644 (file)
 #include <linux/stat.h>
 #include <linux/uaccess.h>
 
-#include <asm/debug-monitors.h>
+#include <asm/cpufeature.h>
 #include <asm/cputype.h>
+#include <asm/debug-monitors.h>
 #include <asm/system_misc.h>
 
 /* Determine debug architecture. */
 u8 debug_monitors_arch(void)
 {
-       return read_cpuid(ID_AA64DFR0_EL1) & 0xf;
+       return cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1),
+                                               ID_AA64DFR0_DEBUGVER_SHIFT);
 }
 
 /*
@@ -201,7 +203,7 @@ void unregister_step_hook(struct step_hook *hook)
 }
 
 /*
- * Call registered single step handers
+ * Call registered single step handlers
  * There is no Syndrome info to check for determining the handler.
  * So we call all the registered handlers, until the right handler is
  * found which returns zero.
index 8ce9b0577442395df912ca934f0fca338948b4a8..a773db92908b03d325c26dba0dc5ea9c287ef49d 100644 (file)
@@ -29,7 +29,7 @@
         * we want to be. The kernel image wants to be placed at TEXT_OFFSET
         * from start of RAM.
         */
-ENTRY(efi_stub_entry)
+ENTRY(entry)
        /*
         * Create a stack frame to save FP/LR with extra space
         * for image_addr variable passed to efi_entry().
@@ -86,8 +86,8 @@ ENTRY(efi_stub_entry)
         * entries for the VA range of the current image, so no maintenance is
         * necessary.
         */
-       adr     x0, efi_stub_entry
-       adr     x1, efi_stub_entry_end
+       adr     x0, entry
+       adr     x1, entry_end
        sub     x1, x1, x0
        bl      __flush_dcache_area
 
@@ -120,5 +120,5 @@ efi_load_fail:
        ldp     x29, x30, [sp], #32
        ret
 
-efi_stub_entry_end:
-ENDPROC(efi_stub_entry)
+entry_end:
+ENDPROC(entry)
index e8ca6eaedd0252e2056530d71d519f423931d323..a48d1f477b2e8c4e3852cd9677c766fcf243b7c2 100644 (file)
@@ -48,7 +48,6 @@ static struct mm_struct efi_mm = {
        .mmap_sem               = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
        .page_table_lock        = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
        .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
-       INIT_MM_CONTEXT(efi_mm)
 };
 
 static int uefi_debug __initdata;
@@ -258,7 +257,8 @@ static bool __init efi_virtmap_init(void)
                 */
                if (!is_normal_ram(md))
                        prot = __pgprot(PROT_DEVICE_nGnRE);
-               else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+               else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+                        !PAGE_ALIGNED(md->phys_addr))
                        prot = PAGE_KERNEL_EXEC;
                else
                        prot = PAGE_KERNEL;
@@ -343,9 +343,9 @@ static void efi_set_pgd(struct mm_struct *mm)
        else
                cpu_switch_mm(mm->pgd, mm);
 
-       flush_tlb_all();
+       local_flush_tlb_all();
        if (icache_is_aivivt())
-               __flush_icache_all();
+               __local_flush_icache_all();
 }
 
 void efi_virtmap_load(void)
index 08cafc518b9a57ad724530b9dbb144d50683c13f..0f03a8fe23144e777b3ead0a6ea18e038b5d1066 100644 (file)
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
 ENDPROC(ftrace_stub)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* save return value regs*/
+       .macro save_return_regs
+       sub sp, sp, #64
+       stp x0, x1, [sp]
+       stp x2, x3, [sp, #16]
+       stp x4, x5, [sp, #32]
+       stp x6, x7, [sp, #48]
+       .endm
+
+       /* restore return value regs*/
+       .macro restore_return_regs
+       ldp x0, x1, [sp]
+       ldp x2, x3, [sp, #16]
+       ldp x4, x5, [sp, #32]
+       ldp x6, x7, [sp, #48]
+       add sp, sp, #64
+       .endm
+
 /*
  * void ftrace_graph_caller(void)
  *
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
  * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
  */
 ENTRY(return_to_handler)
-       str     x0, [sp, #-16]!
+       save_return_regs
        mov     x0, x29                 //     parent's fp
        bl      ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
        mov     x30, x0                 // restore the original return address
-       ldr     x0, [sp], #16
+       restore_return_regs
        ret
 END(return_to_handler)
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 4306c937b1ffc4fb224f88d176d30e8fb65a3c70..7ed3d75f630418b56a1add8c91b308b48cd69774 100644 (file)
@@ -430,6 +430,8 @@ el0_sync_compat:
        b.eq    el0_fpsimd_acc
        cmp     x24, #ESR_ELx_EC_FP_EXC32       // FP/ASIMD exception
        b.eq    el0_fpsimd_exc
+       cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
+       b.eq    el0_sp_pc
        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
        b.eq    el0_undef
        cmp     x24, #ESR_ELx_EC_CP15_32        // CP15 MRC/MCR trap
index c56956a16d3f0c9875e0bef4ff09e05e3fe2506d..4c46c54a3ad7ad817b8ba410565b8eff47cd3c08 100644 (file)
@@ -332,21 +332,15 @@ static inline void fpsimd_hotplug_init(void) { }
  */
 static int __init fpsimd_init(void)
 {
-       u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
-
-       if (pfr & (0xf << 16)) {
+       if (elf_hwcap & HWCAP_FP) {
+               fpsimd_pm_init();
+               fpsimd_hotplug_init();
+       } else {
                pr_notice("Floating-point is not implemented\n");
-               return 0;
        }
-       elf_hwcap |= HWCAP_FP;
 
-       if (pfr & (0xf << 20))
+       if (!(elf_hwcap & HWCAP_ASIMD))
                pr_notice("Advanced SIMD is not implemented\n");
-       else
-               elf_hwcap |= HWCAP_ASIMD;
-
-       fpsimd_pm_init();
-       fpsimd_hotplug_init();
 
        return 0;
 }
index 90d09eddd5b27368e358efd44ab552db8330d39c..514c1cc9fdc5bc5da0a3b5a1cf30ae8523bb3f24 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
 #include <asm/cputype.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
-#include <asm/thread_info.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
+#include <asm/sysreg.h>
+#include <asm/thread_info.h>
 #include <asm/virt.h>
 
 #define __PHYS_OFFSET  (KERNEL_START - TEXT_OFFSET)
 #error TEXT_OFFSET must be less than 2MB
 #endif
 
-#ifdef CONFIG_ARM64_64K_PAGES
-#define BLOCK_SHIFT    PAGE_SHIFT
-#define BLOCK_SIZE     PAGE_SIZE
-#define TABLE_SHIFT    PMD_SHIFT
-#else
-#define BLOCK_SHIFT    SECTION_SHIFT
-#define BLOCK_SIZE     SECTION_SIZE
-#define TABLE_SHIFT    PUD_SHIFT
-#endif
-
 #define KERNEL_START   _text
 #define KERNEL_END     _end
 
-/*
- * Initial memory map attributes.
- */
-#define PTE_FLAGS      PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
-#define PMD_FLAGS      PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
-
-#ifdef CONFIG_ARM64_64K_PAGES
-#define MM_MMUFLAGS    PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
-#else
-#define MM_MMUFLAGS    PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
-#endif
-
 /*
  * Kernel startup entry point.
  * ---------------------------
@@ -120,8 +100,8 @@ efi_head:
 #endif
 
 #ifdef CONFIG_EFI
-       .globl  stext_offset
-       .set    stext_offset, stext - efi_head
+       .globl  __efistub_stext_offset
+       .set    __efistub_stext_offset, stext - efi_head
        .align 3
 pe_header:
        .ascii  "PE"
@@ -144,8 +124,8 @@ optional_header:
        .long   _end - stext                    // SizeOfCode
        .long   0                               // SizeOfInitializedData
        .long   0                               // SizeOfUninitializedData
-       .long   efi_stub_entry - efi_head       // AddressOfEntryPoint
-       .long   stext_offset                    // BaseOfCode
+       .long   __efistub_entry - efi_head      // AddressOfEntryPoint
+       .long   __efistub_stext_offset          // BaseOfCode
 
 extra_header_fields:
        .quad   0                               // ImageBase
@@ -162,7 +142,7 @@ extra_header_fields:
        .long   _end - efi_head                 // SizeOfImage
 
        // Everything before the kernel image is considered part of the header
-       .long   stext_offset                    // SizeOfHeaders
+       .long   __efistub_stext_offset          // SizeOfHeaders
        .long   0                               // CheckSum
        .short  0xa                             // Subsystem (EFI application)
        .short  0                               // DllCharacteristics
@@ -207,9 +187,9 @@ section_table:
        .byte   0
        .byte   0                       // end of 0 padding of section name
        .long   _end - stext            // VirtualSize
-       .long   stext_offset            // VirtualAddress
+       .long   __efistub_stext_offset  // VirtualAddress
        .long   _edata - stext          // SizeOfRawData
-       .long   stext_offset            // PointerToRawData
+       .long   __efistub_stext_offset  // PointerToRawData
 
        .long   0               // PointerToRelocations (0 for executables)
        .long   0               // PointerToLineNumbers (0 for executables)
@@ -292,8 +272,11 @@ ENDPROC(preserve_boot_args)
  */
        .macro  create_pgd_entry, tbl, virt, tmp1, tmp2
        create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
-#if SWAPPER_PGTABLE_LEVELS == 3
-       create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
+#if SWAPPER_PGTABLE_LEVELS > 3
+       create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
+#endif
+#if SWAPPER_PGTABLE_LEVELS > 2
+       create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
 #endif
        .endm
 
@@ -305,15 +288,15 @@ ENDPROC(preserve_boot_args)
  * Corrupts:   phys, start, end, pstate
  */
        .macro  create_block_map, tbl, flags, phys, start, end
-       lsr     \phys, \phys, #BLOCK_SHIFT
-       lsr     \start, \start, #BLOCK_SHIFT
+       lsr     \phys, \phys, #SWAPPER_BLOCK_SHIFT
+       lsr     \start, \start, #SWAPPER_BLOCK_SHIFT
        and     \start, \start, #PTRS_PER_PTE - 1       // table index
-       orr     \phys, \flags, \phys, lsl #BLOCK_SHIFT  // table entry
-       lsr     \end, \end, #BLOCK_SHIFT
+       orr     \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT  // table entry
+       lsr     \end, \end, #SWAPPER_BLOCK_SHIFT
        and     \end, \end, #PTRS_PER_PTE - 1           // table end index
 9999:  str     \phys, [\tbl, \start, lsl #3]           // store the entry
        add     \start, \start, #1                      // next entry
-       add     \phys, \phys, #BLOCK_SIZE               // next block
+       add     \phys, \phys, #SWAPPER_BLOCK_SIZE               // next block
        cmp     \start, \end
        b.ls    9999b
        .endm
@@ -350,7 +333,7 @@ __create_page_tables:
        cmp     x0, x6
        b.lo    1b
 
-       ldr     x7, =MM_MMUFLAGS
+       ldr     x7, =SWAPPER_MM_MMUFLAGS
 
        /*
         * Create the identity mapping.
@@ -444,6 +427,9 @@ __mmap_switched:
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
        str_l   x24, memstart_addr, x6          // Save PHYS_OFFSET
        mov     x29, #0
+#ifdef CONFIG_KASAN
+       bl      kasan_early_init
+#endif
        b       start_kernel
 ENDPROC(__mmap_switched)
 
@@ -628,10 +614,17 @@ ENDPROC(__secondary_switched)
  *  x0  = SCTLR_EL1 value for turning on the MMU.
  *  x27 = *virtual* address to jump to upon completion
  *
- * other registers depend on the function called upon completion
+ * Other registers depend on the function called upon completion.
+ *
+ * Checks if the selected granule size is supported by the CPU.
+ * If it isn't, park the CPU
  */
        .section        ".idmap.text", "ax"
 __enable_mmu:
+       mrs     x1, ID_AA64MMFR0_EL1
+       ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+       cmp     x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
+       b.ne    __no_granule_support
        ldr     x5, =vectors
        msr     vbar_el1, x5
        msr     ttbr0_el1, x25                  // load TTBR0
@@ -649,3 +642,8 @@ __enable_mmu:
        isb
        br      x27
 ENDPROC(__enable_mmu)
+
+__no_granule_support:
+       wfe
+       b __no_granule_support
+ENDPROC(__no_granule_support)
index bba85c8f80373937ef9fe746e1a2ed4fc39f58ee..b45c95d34b8323e74992e0a4a56e6da0e1257c60 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/ptrace.h>
 #include <linux/smp.h>
 
+#include <asm/compat.h>
 #include <asm/current.h>
 #include <asm/debug-monitors.h>
 #include <asm/hw_breakpoint.h>
@@ -163,6 +164,20 @@ enum hw_breakpoint_ops {
        HW_BREAKPOINT_RESTORE
 };
 
+static int is_compat_bp(struct perf_event *bp)
+{
+       struct task_struct *tsk = bp->hw.target;
+
+       /*
+        * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
+        * In this case, use the native interface, since we don't have
+        * the notion of a "compat CPU" and could end up relying on
+        * deprecated behaviour if we use unaligned watchpoints in
+        * AArch64 state.
+        */
+       return tsk && is_compat_thread(task_thread_info(tsk));
+}
+
 /**
  * hw_breakpoint_slot_setup - Find and setup a perf slot according to
  *                           operations
@@ -420,7 +435,7 @@ static int arch_build_bp_info(struct perf_event *bp)
         * Watchpoints can be of length 1, 2, 4 or 8 bytes.
         */
        if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
-               if (is_compat_task()) {
+               if (is_compat_bp(bp)) {
                        if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
                            info->ctrl.len != ARM_BREAKPOINT_LEN_4)
                                return -EINVAL;
@@ -477,7 +492,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
         * AArch32 tasks expect some simple alignment fixups, so emulate
         * that here.
         */
-       if (is_compat_task()) {
+       if (is_compat_bp(bp)) {
                if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
                        alignment_mask = 0x7;
                else
index 8fae0756e1759bad77406b0e3c76cad64b998ae0..bc2abb8b1599576ae2dec02bce0c46c48fc707dd 100644 (file)
 #define __HEAD_FLAG_BE 0
 #endif
 
-#define __HEAD_FLAGS   (__HEAD_FLAG_BE << 0)
+#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
+
+#define __HEAD_FLAGS   ((__HEAD_FLAG_BE << 0) |        \
+                        (__HEAD_FLAG_PAGE_SIZE << 1))
 
 /*
  * These will output as part of the Image header, which should be little-endian
        _kernel_offset_le       = DATA_LE64(TEXT_OFFSET);       \
        _kernel_flags_le        = DATA_LE64(__HEAD_FLAGS);
 
+#ifdef CONFIG_EFI
+
+/*
+ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+ * isolate it from the kernel proper. The following symbols are legally
+ * accessed by the stub, so provide some aliases to make them accessible.
+ * Only include data symbols here, or text symbols of functions that are
+ * guaranteed to be safe when executed at another offset than they were
+ * linked at. The routines below are all implemented in assembler in a
+ * position independent manner
+ */
+__efistub_memcmp               = __pi_memcmp;
+__efistub_memchr               = __pi_memchr;
+__efistub_memcpy               = __pi_memcpy;
+__efistub_memmove              = __pi_memmove;
+__efistub_memset               = __pi_memset;
+__efistub_strlen               = __pi_strlen;
+__efistub_strcmp               = __pi_strcmp;
+__efistub_strncmp              = __pi_strncmp;
+__efistub___flush_dcache_area  = __pi___flush_dcache_area;
+
+#ifdef CONFIG_KASAN
+__efistub___memcpy             = __pi_memcpy;
+__efistub___memmove            = __pi_memmove;
+__efistub___memset             = __pi_memset;
+#endif
+
+__efistub__text                        = _text;
+__efistub__end                 = _end;
+__efistub__edata               = _edata;
+
+#endif
+
 #endif /* __ASM_IMAGE_H */
index 11dc3fd478537236387ff0dd795235d45c92e300..9f17ec071ee0e8a8b1380133cf319a9ad1c80de5 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/init.h>
 #include <linux/irqchip.h>
 #include <linux/seq_file.h>
-#include <linux/ratelimit.h>
 
 unsigned long irq_err_count;
 
@@ -54,64 +53,3 @@ void __init init_IRQ(void)
        if (!handle_arch_irq)
                panic("No interrupt controller found.");
 }
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
-       struct irq_data *d = irq_desc_get_irq_data(desc);
-       const struct cpumask *affinity = irq_data_get_affinity_mask(d);
-       struct irq_chip *c;
-       bool ret = false;
-
-       /*
-        * If this is a per-CPU interrupt, or the affinity does not
-        * include this CPU, then we have nothing to do.
-        */
-       if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
-               return false;
-
-       if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
-               affinity = cpu_online_mask;
-               ret = true;
-       }
-
-       c = irq_data_get_irq_chip(d);
-       if (!c->irq_set_affinity)
-               pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-               cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
-       return ret;
-}
-
-/*
- * The current CPU has been marked offline.  Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
-       unsigned int i;
-       struct irq_desc *desc;
-       unsigned long flags;
-
-       local_irq_save(flags);
-
-       for_each_irq_desc(i, desc) {
-               bool affinity_broken;
-
-               raw_spin_lock(&desc->lock);
-               affinity_broken = migrate_one_irq(desc);
-               raw_spin_unlock(&desc->lock);
-
-               if (affinity_broken)
-                       pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
-                                           i, smp_processor_id());
-       }
-
-       local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
index 876eb8df50bf3355ac8432a2ddf2a5c46810a5de..f4bc779e62e887547b7a17b7672487f0851e1479 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/bitops.h>
 #include <linux/elf.h>
 #include <linux/gfp.h>
+#include <linux/kasan.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/moduleloader.h>
 
 void *module_alloc(unsigned long size)
 {
-       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                                   GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
-                                   NUMA_NO_NODE, __builtin_return_address(0));
+       void *p;
+
+       p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
+                               GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+                               NUMA_NO_NODE, __builtin_return_address(0));
+
+       if (p && (kasan_module_alloc(p, size) < 0)) {
+               vfree(p);
+               return NULL;
+       }
+
+       return p;
 }
 
 enum aarch64_reloc_op {
index f9a74d4fff3b5ee8f37f1cdfedda00dfb8b5156d..5b1897e8ca2476b20336658c65e3e19665bf0cdd 100644 (file)
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
-#define pr_fmt(fmt) "hw perfevents: " fmt
-
-#include <linux/bitmap.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/of_device.h>
-#include <linux/perf_event.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/uaccess.h>
 
-#include <asm/cputype.h>
-#include <asm/irq.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
-
-/*
- * ARMv8 supports a maximum of 32 events.
- * The cycle counter is included in this total.
- */
-#define ARMPMU_MAX_HWEVENTS            32
-
-static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
-static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
-static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *cpu_pmu;
-
-int
-armpmu_get_max_events(void)
-{
-       int max_events = 0;
-
-       if (cpu_pmu != NULL)
-               max_events = cpu_pmu->num_events;
-
-       return max_events;
-}
-EXPORT_SYMBOL_GPL(armpmu_get_max_events);
-
-int perf_num_counters(void)
-{
-       return armpmu_get_max_events();
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
-#define HW_OP_UNSUPPORTED              0xFFFF
-
-#define C(_x) \
-       PERF_COUNT_HW_CACHE_##_x
-
-#define CACHE_OP_UNSUPPORTED           0xFFFF
-
-#define PERF_MAP_ALL_UNSUPPORTED                                       \
-       [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
-
-#define PERF_CACHE_MAP_ALL_UNSUPPORTED                                 \
-[0 ... C(MAX) - 1] = {                                                 \
-       [0 ... C(OP_MAX) - 1] = {                                       \
-               [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,       \
-       },                                                              \
-}
-
-static int
-armpmu_map_cache_event(const unsigned (*cache_map)
-                                     [PERF_COUNT_HW_CACHE_MAX]
-                                     [PERF_COUNT_HW_CACHE_OP_MAX]
-                                     [PERF_COUNT_HW_CACHE_RESULT_MAX],
-                      u64 config)
-{
-       unsigned int cache_type, cache_op, cache_result, ret;
-
-       cache_type = (config >>  0) & 0xff;
-       if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
-               return -EINVAL;
-
-       cache_op = (config >>  8) & 0xff;
-       if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
-               return -EINVAL;
-
-       cache_result = (config >> 16) & 0xff;
-       if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
-               return -EINVAL;
-
-       ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
-
-       if (ret == CACHE_OP_UNSUPPORTED)
-               return -ENOENT;
-
-       return ret;
-}
-
-static int
-armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
-{
-       int mapping;
-
-       if (config >= PERF_COUNT_HW_MAX)
-               return -EINVAL;
-
-       mapping = (*event_map)[config];
-       return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
-}
-
-static int
-armpmu_map_raw_event(u32 raw_event_mask, u64 config)
-{
-       return (int)(config & raw_event_mask);
-}
-
-static int map_cpu_event(struct perf_event *event,
-                        const unsigned (*event_map)[PERF_COUNT_HW_MAX],
-                        const unsigned (*cache_map)
-                                       [PERF_COUNT_HW_CACHE_MAX]
-                                       [PERF_COUNT_HW_CACHE_OP_MAX]
-                                       [PERF_COUNT_HW_CACHE_RESULT_MAX],
-                        u32 raw_event_mask)
-{
-       u64 config = event->attr.config;
-
-       switch (event->attr.type) {
-       case PERF_TYPE_HARDWARE:
-               return armpmu_map_event(event_map, config);
-       case PERF_TYPE_HW_CACHE:
-               return armpmu_map_cache_event(cache_map, config);
-       case PERF_TYPE_RAW:
-               return armpmu_map_raw_event(raw_event_mask, config);
-       }
-
-       return -ENOENT;
-}
-
-int
-armpmu_event_set_period(struct perf_event *event,
-                       struct hw_perf_event *hwc,
-                       int idx)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       s64 left = local64_read(&hwc->period_left);
-       s64 period = hwc->sample_period;
-       int ret = 0;
-
-       if (unlikely(left <= -period)) {
-               left = period;
-               local64_set(&hwc->period_left, left);
-               hwc->last_period = period;
-               ret = 1;
-       }
-
-       if (unlikely(left <= 0)) {
-               left += period;
-               local64_set(&hwc->period_left, left);
-               hwc->last_period = period;
-               ret = 1;
-       }
-
-       /*
-        * Limit the maximum period to prevent the counter value
-        * from overtaking the one we are about to program. In
-        * effect we are reducing max_period to account for
-        * interrupt latency (and we are being very conservative).
-        */
-       if (left > (armpmu->max_period >> 1))
-               left = armpmu->max_period >> 1;
-
-       local64_set(&hwc->prev_count, (u64)-left);
-
-       armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
-
-       perf_event_update_userpage(event);
-
-       return ret;
-}
-
-u64
-armpmu_event_update(struct perf_event *event,
-                   struct hw_perf_event *hwc,
-                   int idx)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       u64 delta, prev_raw_count, new_raw_count;
-
-again:
-       prev_raw_count = local64_read(&hwc->prev_count);
-       new_raw_count = armpmu->read_counter(idx);
-
-       if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
-                            new_raw_count) != prev_raw_count)
-               goto again;
-
-       delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
-
-       local64_add(delta, &event->count);
-       local64_sub(delta, &hwc->period_left);
-
-       return new_raw_count;
-}
-
-static void
-armpmu_read(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-
-       /* Don't read disabled counters! */
-       if (hwc->idx < 0)
-               return;
-
-       armpmu_event_update(event, hwc, hwc->idx);
-}
-
-static void
-armpmu_stop(struct perf_event *event, int flags)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct hw_perf_event *hwc = &event->hw;
-
-       /*
-        * ARM pmu always has to update the counter, so ignore
-        * PERF_EF_UPDATE, see comments in armpmu_start().
-        */
-       if (!(hwc->state & PERF_HES_STOPPED)) {
-               armpmu->disable(hwc, hwc->idx);
-               barrier(); /* why? */
-               armpmu_event_update(event, hwc, hwc->idx);
-               hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
-       }
-}
-
-static void
-armpmu_start(struct perf_event *event, int flags)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct hw_perf_event *hwc = &event->hw;
-
-       /*
-        * ARM pmu always has to reprogram the period, so ignore
-        * PERF_EF_RELOAD, see the comment below.
-        */
-       if (flags & PERF_EF_RELOAD)
-               WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
-       hwc->state = 0;
-       /*
-        * Set the period again. Some counters can't be stopped, so when we
-        * were stopped we simply disabled the IRQ source and the counter
-        * may have been left counting. If we don't do this step then we may
-        * get an interrupt too soon or *way* too late if the overflow has
-        * happened since disabling.
-        */
-       armpmu_event_set_period(event, hwc, hwc->idx);
-       armpmu->enable(hwc, hwc->idx);
-}
-
-static void
-armpmu_del(struct perf_event *event, int flags)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct pmu_hw_events *hw_events = armpmu->get_hw_events();
-       struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-
-       WARN_ON(idx < 0);
-
-       armpmu_stop(event, PERF_EF_UPDATE);
-       hw_events->events[idx] = NULL;
-       clear_bit(idx, hw_events->used_mask);
-
-       perf_event_update_userpage(event);
-}
-
-static int
-armpmu_add(struct perf_event *event, int flags)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct pmu_hw_events *hw_events = armpmu->get_hw_events();
-       struct hw_perf_event *hwc = &event->hw;
-       int idx;
-       int err = 0;
-
-       perf_pmu_disable(event->pmu);
-
-       /* If we don't have a space for the counter then finish early. */
-       idx = armpmu->get_event_idx(hw_events, hwc);
-       if (idx < 0) {
-               err = idx;
-               goto out;
-       }
-
-       /*
-        * If there is an event in the counter we are going to use then make
-        * sure it is disabled.
-        */
-       event->hw.idx = idx;
-       armpmu->disable(hwc, idx);
-       hw_events->events[idx] = event;
-
-       hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
-       if (flags & PERF_EF_START)
-               armpmu_start(event, PERF_EF_RELOAD);
-
-       /* Propagate our changes to the userspace mapping. */
-       perf_event_update_userpage(event);
-
-out:
-       perf_pmu_enable(event->pmu);
-       return err;
-}
-
-static int
-validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
-                               struct perf_event *event)
-{
-       struct arm_pmu *armpmu;
-       struct hw_perf_event fake_event = event->hw;
-       struct pmu *leader_pmu = event->group_leader->pmu;
-
-       if (is_software_event(event))
-               return 1;
-
-       /*
-        * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
-        * core perf code won't check that the pmu->ctx == leader->ctx
-        * until after pmu->event_init(event).
-        */
-       if (event->pmu != pmu)
-               return 0;
-
-       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
-               return 1;
-
-       if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
-               return 1;
-
-       armpmu = to_arm_pmu(event->pmu);
-       return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
-}
-
-static int
-validate_group(struct perf_event *event)
-{
-       struct perf_event *sibling, *leader = event->group_leader;
-       struct pmu_hw_events fake_pmu;
-       DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
-
-       /*
-        * Initialise the fake PMU. We only need to populate the
-        * used_mask for the purposes of validation.
-        */
-       memset(fake_used_mask, 0, sizeof(fake_used_mask));
-       fake_pmu.used_mask = fake_used_mask;
-
-       if (!validate_event(event->pmu, &fake_pmu, leader))
-               return -EINVAL;
-
-       list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
-               if (!validate_event(event->pmu, &fake_pmu, sibling))
-                       return -EINVAL;
-       }
-
-       if (!validate_event(event->pmu, &fake_pmu, event))
-               return -EINVAL;
-
-       return 0;
-}
-
-static void
-armpmu_disable_percpu_irq(void *data)
-{
-       unsigned int irq = *(unsigned int *)data;
-       disable_percpu_irq(irq);
-}
-
-static void
-armpmu_release_hardware(struct arm_pmu *armpmu)
-{
-       int irq;
-       unsigned int i, irqs;
-       struct platform_device *pmu_device = armpmu->plat_device;
-
-       irqs = min(pmu_device->num_resources, num_possible_cpus());
-       if (!irqs)
-               return;
-
-       irq = platform_get_irq(pmu_device, 0);
-       if (irq <= 0)
-               return;
-
-       if (irq_is_percpu(irq)) {
-               on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
-               free_percpu_irq(irq, &cpu_hw_events);
-       } else {
-               for (i = 0; i < irqs; ++i) {
-                       int cpu = i;
-
-                       if (armpmu->irq_affinity)
-                               cpu = armpmu->irq_affinity[i];
-
-                       if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
-                               continue;
-                       irq = platform_get_irq(pmu_device, i);
-                       if (irq > 0)
-                               free_irq(irq, armpmu);
-               }
-       }
-}
-
-static void
-armpmu_enable_percpu_irq(void *data)
-{
-       unsigned int irq = *(unsigned int *)data;
-       enable_percpu_irq(irq, IRQ_TYPE_NONE);
-}
-
-static int
-armpmu_reserve_hardware(struct arm_pmu *armpmu)
-{
-       int err, irq;
-       unsigned int i, irqs;
-       struct platform_device *pmu_device = armpmu->plat_device;
-
-       if (!pmu_device)
-               return -ENODEV;
-
-       irqs = min(pmu_device->num_resources, num_possible_cpus());
-       if (!irqs) {
-               pr_err("no irqs for PMUs defined\n");
-               return -ENODEV;
-       }
-
-       irq = platform_get_irq(pmu_device, 0);
-       if (irq <= 0) {
-               pr_err("failed to get valid irq for PMU device\n");
-               return -ENODEV;
-       }
-
-       if (irq_is_percpu(irq)) {
-               err = request_percpu_irq(irq, armpmu->handle_irq,
-                               "arm-pmu", &cpu_hw_events);
-
-               if (err) {
-                       pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
-                                       irq);
-                       armpmu_release_hardware(armpmu);
-                       return err;
-               }
-
-               on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
-       } else {
-               for (i = 0; i < irqs; ++i) {
-                       int cpu = i;
-
-                       err = 0;
-                       irq = platform_get_irq(pmu_device, i);
-                       if (irq <= 0)
-                               continue;
-
-                       if (armpmu->irq_affinity)
-                               cpu = armpmu->irq_affinity[i];
-
-                       /*
-                        * If we have a single PMU interrupt that we can't shift,
-                        * assume that we're running on a uniprocessor machine and
-                        * continue. Otherwise, continue without this interrupt.
-                        */
-                       if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
-                               pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
-                                               irq, cpu);
-                               continue;
-                       }
-
-                       err = request_irq(irq, armpmu->handle_irq,
-                                       IRQF_NOBALANCING | IRQF_NO_THREAD,
-                                       "arm-pmu", armpmu);
-                       if (err) {
-                               pr_err("unable to request IRQ%d for ARM PMU counters\n",
-                                               irq);
-                               armpmu_release_hardware(armpmu);
-                               return err;
-                       }
-
-                       cpumask_set_cpu(cpu, &armpmu->active_irqs);
-               }
-       }
-
-       return 0;
-}
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       atomic_t *active_events  = &armpmu->active_events;
-       struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
-
-       if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
-               armpmu_release_hardware(armpmu);
-               mutex_unlock(pmu_reserve_mutex);
-       }
-}
-
-static int
-event_requires_mode_exclusion(struct perf_event_attr *attr)
-{
-       return attr->exclude_idle || attr->exclude_user ||
-              attr->exclude_kernel || attr->exclude_hv;
-}
-
-static int
-__hw_perf_event_init(struct perf_event *event)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct hw_perf_event *hwc = &event->hw;
-       int mapping, err;
-
-       mapping = armpmu->map_event(event);
-
-       if (mapping < 0) {
-               pr_debug("event %x:%llx not supported\n", event->attr.type,
-                        event->attr.config);
-               return mapping;
-       }
-
-       /*
-        * We don't assign an index until we actually place the event onto
-        * hardware. Use -1 to signify that we haven't decided where to put it
-        * yet. For SMP systems, each core has it's own PMU so we can't do any
-        * clever allocation or constraints checking at this point.
-        */
-       hwc->idx                = -1;
-       hwc->config_base        = 0;
-       hwc->config             = 0;
-       hwc->event_base         = 0;
-
-       /*
-        * Check whether we need to exclude the counter from certain modes.
-        */
-       if ((!armpmu->set_event_filter ||
-            armpmu->set_event_filter(hwc, &event->attr)) &&
-            event_requires_mode_exclusion(&event->attr)) {
-               pr_debug("ARM performance counters do not support mode exclusion\n");
-               return -EPERM;
-       }
-
-       /*
-        * Store the event encoding into the config_base field.
-        */
-       hwc->config_base            |= (unsigned long)mapping;
-
-       if (!hwc->sample_period) {
-               /*
-                * For non-sampling runs, limit the sample_period to half
-                * of the counter width. That way, the new counter value
-                * is far less likely to overtake the previous one unless
-                * you have some serious IRQ latency issues.
-                */
-               hwc->sample_period  = armpmu->max_period >> 1;
-               hwc->last_period    = hwc->sample_period;
-               local64_set(&hwc->period_left, hwc->sample_period);
-       }
-
-       err = 0;
-       if (event->group_leader != event) {
-               err = validate_group(event);
-               if (err)
-                       return -EINVAL;
-       }
-
-       return err;
-}
-
-static int armpmu_event_init(struct perf_event *event)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       int err = 0;
-       atomic_t *active_events = &armpmu->active_events;
-
-       if (armpmu->map_event(event) == -ENOENT)
-               return -ENOENT;
-
-       event->destroy = hw_perf_event_destroy;
-
-       if (!atomic_inc_not_zero(active_events)) {
-               mutex_lock(&armpmu->reserve_mutex);
-               if (atomic_read(active_events) == 0)
-                       err = armpmu_reserve_hardware(armpmu);
-
-               if (!err)
-                       atomic_inc(active_events);
-               mutex_unlock(&armpmu->reserve_mutex);
-       }
 
-       if (err)
-               return err;
-
-       err = __hw_perf_event_init(event);
-       if (err)
-               hw_perf_event_destroy(event);
-
-       return err;
-}
-
-static void armpmu_enable(struct pmu *pmu)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(pmu);
-       struct pmu_hw_events *hw_events = armpmu->get_hw_events();
-       int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
-
-       if (enabled)
-               armpmu->start();
-}
-
-static void armpmu_disable(struct pmu *pmu)
-{
-       struct arm_pmu *armpmu = to_arm_pmu(pmu);
-       armpmu->stop();
-}
-
-static void __init armpmu_init(struct arm_pmu *armpmu)
-{
-       atomic_set(&armpmu->active_events, 0);
-       mutex_init(&armpmu->reserve_mutex);
-
-       armpmu->pmu = (struct pmu) {
-               .pmu_enable     = armpmu_enable,
-               .pmu_disable    = armpmu_disable,
-               .event_init     = armpmu_event_init,
-               .add            = armpmu_add,
-               .del            = armpmu_del,
-               .start          = armpmu_start,
-               .stop           = armpmu_stop,
-               .read           = armpmu_read,
-       };
-}
-
-int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
-{
-       armpmu_init(armpmu);
-       return perf_pmu_register(&armpmu->pmu, name, type);
-}
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
 
 /*
  * ARMv8 PMUv3 Performance Events handling code.
@@ -708,6 +69,21 @@ enum armv8_pmuv3_perf_types {
        ARMV8_PMUV3_PERFCTR_BUS_CYCLES                          = 0x1D,
 };
 
+/* ARMv8 Cortex-A53 specific event types. */
+enum armv8_a53_pmu_perf_types {
+       ARMV8_A53_PERFCTR_PREFETCH_LINEFILL                     = 0xC2,
+};
+
+/* ARMv8 Cortex-A57 specific event types. */
+enum armv8_a57_perf_types {
+       ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD                   = 0x40,
+       ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST                   = 0x41,
+       ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD                   = 0x42,
+       ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST                   = 0x43,
+       ARMV8_A57_PERFCTR_DTLB_REFILL_LD                        = 0x4c,
+       ARMV8_A57_PERFCTR_DTLB_REFILL_ST                        = 0x4d,
+};
+
 /* PMUv3 HW events mapping. */
 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
        PERF_MAP_ALL_UNSUPPORTED,
@@ -718,6 +94,28 @@ static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
        [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
 };
 
+/* ARM Cortex-A53 HW events mapping. */
+static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
+       PERF_MAP_ALL_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+       [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV8_PMUV3_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]              = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
+       PERF_MAP_ALL_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+       [PERF_COUNT_HW_CACHE_MISSES]            = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]              = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+};
+
 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -734,12 +132,60 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
        [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
 };
 
+static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                             [PERF_COUNT_HW_CACHE_OP_MAX]
+                                             [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+       [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+       [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+       [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+       [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+       [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
+
+       [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
+       [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+
+       [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+
+       [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+       [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+       [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+       [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                             [PERF_COUNT_HW_CACHE_OP_MAX]
+                                             [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+       [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
+       [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
+       [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
+       [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
+
+       [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
+       [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+
+       [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
+       [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
+
+       [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+
+       [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+       [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+       [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+       [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+
 /*
  * Perf Events' indices
  */
 #define        ARMV8_IDX_CYCLE_COUNTER 0
 #define        ARMV8_IDX_COUNTER0      1
-#define        ARMV8_IDX_COUNTER_LAST  (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+#define        ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
+       (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
 
 #define        ARMV8_MAX_COUNTERS      32
 #define        ARMV8_COUNTER_MASK      (ARMV8_MAX_COUNTERS - 1)
@@ -805,49 +251,34 @@ static inline int armv8pmu_has_overflowed(u32 pmovsr)
        return pmovsr & ARMV8_OVERFLOWED_MASK;
 }
 
-static inline int armv8pmu_counter_valid(int idx)
+static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
 {
-       return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST;
+       return idx >= ARMV8_IDX_CYCLE_COUNTER &&
+               idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
 }
 
 static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
 {
-       int ret = 0;
-       u32 counter;
-
-       if (!armv8pmu_counter_valid(idx)) {
-               pr_err("CPU%u checking wrong counter %d overflow status\n",
-                       smp_processor_id(), idx);
-       } else {
-               counter = ARMV8_IDX_TO_COUNTER(idx);
-               ret = pmnc & BIT(counter);
-       }
-
-       return ret;
+       return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
 }
 
 static inline int armv8pmu_select_counter(int idx)
 {
-       u32 counter;
-
-       if (!armv8pmu_counter_valid(idx)) {
-               pr_err("CPU%u selecting wrong PMNC counter %d\n",
-                       smp_processor_id(), idx);
-               return -EINVAL;
-       }
-
-       counter = ARMV8_IDX_TO_COUNTER(idx);
+       u32 counter = ARMV8_IDX_TO_COUNTER(idx);
        asm volatile("msr pmselr_el0, %0" :: "r" (counter));
        isb();
 
        return idx;
 }
 
-static inline u32 armv8pmu_read_counter(int idx)
+static inline u32 armv8pmu_read_counter(struct perf_event *event)
 {
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
        u32 value = 0;
 
-       if (!armv8pmu_counter_valid(idx))
+       if (!armv8pmu_counter_valid(cpu_pmu, idx))
                pr_err("CPU%u reading wrong counter %d\n",
                        smp_processor_id(), idx);
        else if (idx == ARMV8_IDX_CYCLE_COUNTER)
@@ -858,9 +289,13 @@ static inline u32 armv8pmu_read_counter(int idx)
        return value;
 }
 
-static inline void armv8pmu_write_counter(int idx, u32 value)
+static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
 {
-       if (!armv8pmu_counter_valid(idx))
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       int idx = hwc->idx;
+
+       if (!armv8pmu_counter_valid(cpu_pmu, idx))
                pr_err("CPU%u writing wrong counter %d\n",
                        smp_processor_id(), idx);
        else if (idx == ARMV8_IDX_CYCLE_COUNTER)
@@ -879,65 +314,34 @@ static inline void armv8pmu_write_evtype(int idx, u32 val)
 
 static inline int armv8pmu_enable_counter(int idx)
 {
-       u32 counter;
-
-       if (!armv8pmu_counter_valid(idx)) {
-               pr_err("CPU%u enabling wrong PMNC counter %d\n",
-                       smp_processor_id(), idx);
-               return -EINVAL;
-       }
-
-       counter = ARMV8_IDX_TO_COUNTER(idx);
+       u32 counter = ARMV8_IDX_TO_COUNTER(idx);
        asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter)));
        return idx;
 }
 
 static inline int armv8pmu_disable_counter(int idx)
 {
-       u32 counter;
-
-       if (!armv8pmu_counter_valid(idx)) {
-               pr_err("CPU%u disabling wrong PMNC counter %d\n",
-                       smp_processor_id(), idx);
-               return -EINVAL;
-       }
-
-       counter = ARMV8_IDX_TO_COUNTER(idx);
+       u32 counter = ARMV8_IDX_TO_COUNTER(idx);
        asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter)));
        return idx;
 }
 
 static inline int armv8pmu_enable_intens(int idx)
 {
-       u32 counter;
-
-       if (!armv8pmu_counter_valid(idx)) {
-               pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
-                       smp_processor_id(), idx);
-               return -EINVAL;
-       }
-
-       counter = ARMV8_IDX_TO_COUNTER(idx);
+       u32 counter = ARMV8_IDX_TO_COUNTER(idx);
        asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter)));
        return idx;
 }
 
 static inline int armv8pmu_disable_intens(int idx)
 {
-       u32 counter;
-
-       if (!armv8pmu_counter_valid(idx)) {
-               pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
-                       smp_processor_id(), idx);
-               return -EINVAL;
-       }
-
-       counter = ARMV8_IDX_TO_COUNTER(idx);
+       u32 counter = ARMV8_IDX_TO_COUNTER(idx);
        asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter)));
        isb();
        /* Clear the overflow flag in case an interrupt is pending. */
        asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter)));
        isb();
+
        return idx;
 }
 
@@ -955,10 +359,13 @@ static inline u32 armv8pmu_getreset_flags(void)
        return value;
 }
 
-static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void armv8pmu_enable_event(struct perf_event *event)
 {
        unsigned long flags;
-       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       struct hw_perf_event *hwc = &event->hw;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+       int idx = hwc->idx;
 
        /*
         * Enable counter and interrupt, and set the counter to count
@@ -989,10 +396,13 @@ static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx)
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx)
+static void armv8pmu_disable_event(struct perf_event *event)
 {
        unsigned long flags;
-       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       struct hw_perf_event *hwc = &event->hw;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+       int idx = hwc->idx;
 
        /*
         * Disable counter and interrupt
@@ -1016,7 +426,8 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
 {
        u32 pmovsr;
        struct perf_sample_data data;
-       struct pmu_hw_events *cpuc;
+       struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+       struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
        struct pt_regs *regs;
        int idx;
 
@@ -1036,7 +447,6 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
         */
        regs = get_irq_regs();
 
-       cpuc = this_cpu_ptr(&cpu_hw_events);
        for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
@@ -1053,13 +463,13 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx);
+               armpmu_event_update(event);
                perf_sample_data_init(&data, 0, hwc->last_period);
-               if (!armpmu_event_set_period(event, hwc, idx))
+               if (!armpmu_event_set_period(event))
                        continue;
 
                if (perf_event_overflow(event, &data, regs))
-                       cpu_pmu->disable(hwc, idx);
+                       cpu_pmu->disable(event);
        }
 
        /*
@@ -1074,10 +484,10 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
        return IRQ_HANDLED;
 }
 
-static void armv8pmu_start(void)
+static void armv8pmu_start(struct arm_pmu *cpu_pmu)
 {
        unsigned long flags;
-       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 
        raw_spin_lock_irqsave(&events->pmu_lock, flags);
        /* Enable all counters */
@@ -1085,10 +495,10 @@ static void armv8pmu_start(void)
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static void armv8pmu_stop(void)
+static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
 {
        unsigned long flags;
-       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
 
        raw_spin_lock_irqsave(&events->pmu_lock, flags);
        /* Disable all counters */
@@ -1097,10 +507,12 @@ static void armv8pmu_stop(void)
 }
 
 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
-                                 struct hw_perf_event *event)
+                                 struct perf_event *event)
 {
        int idx;
-       unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
 
        /* Always place a cycle counter into the cycle counter. */
        if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
@@ -1151,11 +563,14 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
 
 static void armv8pmu_reset(void *info)
 {
+       struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
        u32 idx, nb_cnt = cpu_pmu->num_events;
 
        /* The counter and interrupt enable registers are unknown at reset. */
-       for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
-               armv8pmu_disable_event(NULL, idx);
+       for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+               armv8pmu_disable_counter(idx);
+               armv8pmu_disable_intens(idx);
+       }
 
        /* Initialize & Reset PMNC: C and P bits. */
        armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
@@ -1166,169 +581,104 @@ static void armv8pmu_reset(void *info)
 
 static int armv8_pmuv3_map_event(struct perf_event *event)
 {
-       return map_cpu_event(event, &armv8_pmuv3_perf_map,
+       return armpmu_map_event(event, &armv8_pmuv3_perf_map,
                                &armv8_pmuv3_perf_cache_map,
                                ARMV8_EVTYPE_EVENT);
 }
 
-static struct arm_pmu armv8pmu = {
-       .handle_irq             = armv8pmu_handle_irq,
-       .enable                 = armv8pmu_enable_event,
-       .disable                = armv8pmu_disable_event,
-       .read_counter           = armv8pmu_read_counter,
-       .write_counter          = armv8pmu_write_counter,
-       .get_event_idx          = armv8pmu_get_event_idx,
-       .start                  = armv8pmu_start,
-       .stop                   = armv8pmu_stop,
-       .reset                  = armv8pmu_reset,
-       .max_period             = (1LLU << 32) - 1,
-};
+static int armv8_a53_map_event(struct perf_event *event)
+{
+       return armpmu_map_event(event, &armv8_a53_perf_map,
+                               &armv8_a53_perf_cache_map,
+                               ARMV8_EVTYPE_EVENT);
+}
 
-static u32 __init armv8pmu_read_num_pmnc_events(void)
+static int armv8_a57_map_event(struct perf_event *event)
 {
-       u32 nb_cnt;
+       return armpmu_map_event(event, &armv8_a57_perf_map,
+                               &armv8_a57_perf_cache_map,
+                               ARMV8_EVTYPE_EVENT);
+}
+
+static void armv8pmu_read_num_pmnc_events(void *info)
+{
+       int *nb_cnt = info;
 
        /* Read the nb of CNTx counters supported from PMNC */
-       nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+       *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
 
-       /* Add the CPU cycles counter and return */
-       return nb_cnt + 1;
+       /* Add the CPU cycles counter */
+       *nb_cnt += 1;
 }
 
-static struct arm_pmu *__init armv8_pmuv3_pmu_init(void)
+static int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
 {
-       armv8pmu.name                   = "arm/armv8-pmuv3";
-       armv8pmu.map_event              = armv8_pmuv3_map_event;
-       armv8pmu.num_events             = armv8pmu_read_num_pmnc_events();
-       armv8pmu.set_event_filter       = armv8pmu_set_event_filter;
-       return &armv8pmu;
+       return smp_call_function_any(&arm_pmu->supported_cpus,
+                                   armv8pmu_read_num_pmnc_events,
+                                   &arm_pmu->num_events, 1);
 }
 
-/*
- * Ensure the PMU has sane values out of reset.
- * This requires SMP to be available, so exists as a separate initcall.
- */
-static int __init
-cpu_pmu_reset(void)
+static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
 {
-       if (cpu_pmu && cpu_pmu->reset)
-               return on_each_cpu(cpu_pmu->reset, NULL, 1);
-       return 0;
+       cpu_pmu->handle_irq             = armv8pmu_handle_irq,
+       cpu_pmu->enable                 = armv8pmu_enable_event,
+       cpu_pmu->disable                = armv8pmu_disable_event,
+       cpu_pmu->read_counter           = armv8pmu_read_counter,
+       cpu_pmu->write_counter          = armv8pmu_write_counter,
+       cpu_pmu->get_event_idx          = armv8pmu_get_event_idx,
+       cpu_pmu->start                  = armv8pmu_start,
+       cpu_pmu->stop                   = armv8pmu_stop,
+       cpu_pmu->reset                  = armv8pmu_reset,
+       cpu_pmu->max_period             = (1LLU << 32) - 1,
+       cpu_pmu->set_event_filter       = armv8pmu_set_event_filter;
 }
-arch_initcall(cpu_pmu_reset);
-
-/*
- * PMU platform driver and devicetree bindings.
- */
-static const struct of_device_id armpmu_of_device_ids[] = {
-       {.compatible = "arm,armv8-pmuv3"},
-       {},
-};
 
-static int armpmu_device_probe(struct platform_device *pdev)
+static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
 {
-       int i, irq, *irqs;
-
-       if (!cpu_pmu)
-               return -ENODEV;
-
-       /* Don't bother with PPIs; they're already affine */
-       irq = platform_get_irq(pdev, 0);
-       if (irq >= 0 && irq_is_percpu(irq))
-               goto out;
-
-       irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
-       if (!irqs)
-               return -ENOMEM;
-
-       for (i = 0; i < pdev->num_resources; ++i) {
-               struct device_node *dn;
-               int cpu;
-
-               dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
-                                     i);
-               if (!dn) {
-                       pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
-                               of_node_full_name(pdev->dev.of_node), i);
-                       break;
-               }
-
-               for_each_possible_cpu(cpu)
-                       if (dn == of_cpu_device_node_get(cpu))
-                               break;
-
-               if (cpu >= nr_cpu_ids) {
-                       pr_warn("Failed to find logical CPU for %s\n",
-                               dn->name);
-                       of_node_put(dn);
-                       break;
-               }
-               of_node_put(dn);
-
-               irqs[i] = cpu;
-       }
-
-       if (i == pdev->num_resources)
-               cpu_pmu->irq_affinity = irqs;
-       else
-               kfree(irqs);
-
-out:
-       cpu_pmu->plat_device = pdev;
-       return 0;
+       armv8_pmu_init(cpu_pmu);
+       cpu_pmu->name                   = "armv8_pmuv3";
+       cpu_pmu->map_event              = armv8_pmuv3_map_event;
+       return armv8pmu_probe_num_events(cpu_pmu);
 }
 
-static struct platform_driver armpmu_driver = {
-       .driver         = {
-               .name   = "arm-pmu",
-               .of_match_table = armpmu_of_device_ids,
-       },
-       .probe          = armpmu_device_probe,
-};
-
-static int __init register_pmu_driver(void)
+static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return platform_driver_register(&armpmu_driver);
+       armv8_pmu_init(cpu_pmu);
+       cpu_pmu->name                   = "armv8_cortex_a53";
+       cpu_pmu->map_event              = armv8_a53_map_event;
+       return armv8pmu_probe_num_events(cpu_pmu);
 }
-device_initcall(register_pmu_driver);
 
-static struct pmu_hw_events *armpmu_get_cpu_events(void)
+static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return this_cpu_ptr(&cpu_hw_events);
+       armv8_pmu_init(cpu_pmu);
+       cpu_pmu->name                   = "armv8_cortex_a57";
+       cpu_pmu->map_event              = armv8_a57_map_event;
+       return armv8pmu_probe_num_events(cpu_pmu);
 }
 
-static void __init cpu_pmu_init(struct arm_pmu *armpmu)
-{
-       int cpu;
-       for_each_possible_cpu(cpu) {
-               struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
-               events->events = per_cpu(hw_events, cpu);
-               events->used_mask = per_cpu(used_mask, cpu);
-               raw_spin_lock_init(&events->pmu_lock);
-       }
-       armpmu->get_hw_events = armpmu_get_cpu_events;
-}
+static const struct of_device_id armv8_pmu_of_device_ids[] = {
+       {.compatible = "arm,armv8-pmuv3",       .data = armv8_pmuv3_init},
+       {.compatible = "arm,cortex-a53-pmu",    .data = armv8_a53_pmu_init},
+       {.compatible = "arm,cortex-a57-pmu",    .data = armv8_a57_pmu_init},
+       {},
+};
 
-static int __init init_hw_perf_events(void)
+static int armv8_pmu_device_probe(struct platform_device *pdev)
 {
-       u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
-
-       switch ((dfr >> 8) & 0xf) {
-       case 0x1:       /* PMUv3 */
-               cpu_pmu = armv8_pmuv3_pmu_init();
-               break;
-       }
+       return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
+}
 
-       if (cpu_pmu) {
-               pr_info("enabled with %s PMU driver, %d counters available\n",
-                       cpu_pmu->name, cpu_pmu->num_events);
-               cpu_pmu_init(cpu_pmu);
-               armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
-       } else {
-               pr_info("no hardware support available\n");
-       }
+static struct platform_driver armv8_pmu_driver = {
+       .driver         = {
+               .name   = "armv8-pmu",
+               .of_match_table = armv8_pmu_of_device_ids,
+       },
+       .probe          = armv8_pmu_device_probe,
+};
 
-       return 0;
+static int __init register_armv8_pmu_driver(void)
+{
+       return platform_driver_register(&armv8_pmu_driver);
 }
-early_initcall(init_hw_perf_events);
-
+device_initcall(register_armv8_pmu_driver);
index 223b093c9440933f46fb2aa4cbc83729be1e6f57..f75b540bc3b4b0daae4a8773cd0eddf1a86a90aa 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/personality.h>
 #include <linux/notifier.h>
+#include <trace/events/power.h>
 
 #include <asm/compat.h>
 #include <asm/cacheflush.h>
@@ -75,8 +76,10 @@ void arch_cpu_idle(void)
         * This should do all the clock switching and wait for interrupt
         * tricks
         */
+       trace_cpu_idle_rcuidle(1, smp_processor_id());
        cpu_do_idle();
        local_irq_enable();
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index 6bab21f84a9ff38402e70345016ed50ae8e95e30..556301fbeeefba1f1aedb7fd3f29c07bd80d3153 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/console.h>
 #include <linux/cache.h>
 #include <linux/bootmem.h>
-#include <linux/seq_file.h>
 #include <linux/screen_info.h>
 #include <linux/init.h>
 #include <linux/kexec.h>
@@ -44,7 +43,6 @@
 #include <linux/of_fdt.h>
 #include <linux/of_platform.h>
 #include <linux/efi.h>
-#include <linux/personality.h>
 #include <linux/psci.h>
 
 #include <asm/acpi.h>
@@ -54,6 +52,7 @@
 #include <asm/elf.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
+#include <asm/kasan.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/smp_plat.h>
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
 
-unsigned long elf_hwcap __read_mostly;
-EXPORT_SYMBOL_GPL(elf_hwcap);
-
-#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_HWCAP_DEFAULT       \
-                               (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
-                                COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
-                                COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
-                                COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
-                                COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
-                                COMPAT_HWCAP_LPAE)
-unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
-unsigned int compat_elf_hwcap2 __read_mostly;
-#endif
-
-DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
-
 phys_addr_t __fdt_pointer __initdata;
 
 /*
@@ -195,104 +177,6 @@ static void __init smp_build_mpidr_hash(void)
        __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
 }
 
-static void __init setup_processor(void)
-{
-       u64 features;
-       s64 block;
-       u32 cwg;
-       int cls;
-
-       printk("CPU: AArch64 Processor [%08x] revision %d\n",
-              read_cpuid_id(), read_cpuid_id() & 15);
-
-       sprintf(init_utsname()->machine, ELF_PLATFORM);
-       elf_hwcap = 0;
-
-       cpuinfo_store_boot_cpu();
-
-       /*
-        * Check for sane CTR_EL0.CWG value.
-        */
-       cwg = cache_type_cwg();
-       cls = cache_line_size();
-       if (!cwg)
-               pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
-                       cls);
-       if (L1_CACHE_BYTES < cls)
-               pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
-                       L1_CACHE_BYTES, cls);
-
-       /*
-        * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
-        * The blocks we test below represent incremental functionality
-        * for non-negative values. Negative values are reserved.
-        */
-       features = read_cpuid(ID_AA64ISAR0_EL1);
-       block = cpuid_feature_extract_field(features, 4);
-       if (block > 0) {
-               switch (block) {
-               default:
-               case 2:
-                       elf_hwcap |= HWCAP_PMULL;
-               case 1:
-                       elf_hwcap |= HWCAP_AES;
-               case 0:
-                       break;
-               }
-       }
-
-       if (cpuid_feature_extract_field(features, 8) > 0)
-               elf_hwcap |= HWCAP_SHA1;
-
-       if (cpuid_feature_extract_field(features, 12) > 0)
-               elf_hwcap |= HWCAP_SHA2;
-
-       if (cpuid_feature_extract_field(features, 16) > 0)
-               elf_hwcap |= HWCAP_CRC32;
-
-       block = cpuid_feature_extract_field(features, 20);
-       if (block > 0) {
-               switch (block) {
-               default:
-               case 2:
-                       elf_hwcap |= HWCAP_ATOMICS;
-               case 1:
-                       /* RESERVED */
-               case 0:
-                       break;
-               }
-       }
-
-#ifdef CONFIG_COMPAT
-       /*
-        * ID_ISAR5_EL1 carries similar information as above, but pertaining to
-        * the AArch32 32-bit execution state.
-        */
-       features = read_cpuid(ID_ISAR5_EL1);
-       block = cpuid_feature_extract_field(features, 4);
-       if (block > 0) {
-               switch (block) {
-               default:
-               case 2:
-                       compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
-               case 1:
-                       compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
-               case 0:
-                       break;
-               }
-       }
-
-       if (cpuid_feature_extract_field(features, 8) > 0)
-               compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
-
-       if (cpuid_feature_extract_field(features, 12) > 0)
-               compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
-
-       if (cpuid_feature_extract_field(features, 16) > 0)
-               compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
-#endif
-}
-
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
 {
        void *dt_virt = fixmap_remap_fdt(dt_phys);
@@ -404,8 +288,9 @@ u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
 void __init setup_arch(char **cmdline_p)
 {
-       setup_processor();
+       pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id());
 
+       sprintf(init_utsname()->machine, ELF_PLATFORM);
        init_mm.start_code = (unsigned long) _text;
        init_mm.end_code   = (unsigned long) _etext;
        init_mm.end_data   = (unsigned long) _edata;
@@ -434,6 +319,9 @@ void __init setup_arch(char **cmdline_p)
 
        paging_init();
        relocate_initrd();
+
+       kasan_init();
+
        request_standard_resources();
 
        early_ioremap_reset();
@@ -491,124 +379,3 @@ static int __init topology_init(void)
        return 0;
 }
 subsys_initcall(topology_init);
-
-static const char *hwcap_str[] = {
-       "fp",
-       "asimd",
-       "evtstrm",
-       "aes",
-       "pmull",
-       "sha1",
-       "sha2",
-       "crc32",
-       "atomics",
-       NULL
-};
-
-#ifdef CONFIG_COMPAT
-static const char *compat_hwcap_str[] = {
-       "swp",
-       "half",
-       "thumb",
-       "26bit",
-       "fastmult",
-       "fpa",
-       "vfp",
-       "edsp",
-       "java",
-       "iwmmxt",
-       "crunch",
-       "thumbee",
-       "neon",
-       "vfpv3",
-       "vfpv3d16",
-       "tls",
-       "vfpv4",
-       "idiva",
-       "idivt",
-       "vfpd32",
-       "lpae",
-       "evtstrm"
-};
-
-static const char *compat_hwcap2_str[] = {
-       "aes",
-       "pmull",
-       "sha1",
-       "sha2",
-       "crc32",
-       NULL
-};
-#endif /* CONFIG_COMPAT */
-
-static int c_show(struct seq_file *m, void *v)
-{
-       int i, j;
-
-       for_each_online_cpu(i) {
-               struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
-               u32 midr = cpuinfo->reg_midr;
-
-               /*
-                * glibc reads /proc/cpuinfo to determine the number of
-                * online processors, looking for lines beginning with
-                * "processor".  Give glibc what it expects.
-                */
-               seq_printf(m, "processor\t: %d\n", i);
-
-               /*
-                * Dump out the common processor features in a single line.
-                * Userspace should read the hwcaps with getauxval(AT_HWCAP)
-                * rather than attempting to parse this, but there's a body of
-                * software which does already (at least for 32-bit).
-                */
-               seq_puts(m, "Features\t:");
-               if (personality(current->personality) == PER_LINUX32) {
-#ifdef CONFIG_COMPAT
-                       for (j = 0; compat_hwcap_str[j]; j++)
-                               if (compat_elf_hwcap & (1 << j))
-                                       seq_printf(m, " %s", compat_hwcap_str[j]);
-
-                       for (j = 0; compat_hwcap2_str[j]; j++)
-                               if (compat_elf_hwcap2 & (1 << j))
-                                       seq_printf(m, " %s", compat_hwcap2_str[j]);
-#endif /* CONFIG_COMPAT */
-               } else {
-                       for (j = 0; hwcap_str[j]; j++)
-                               if (elf_hwcap & (1 << j))
-                                       seq_printf(m, " %s", hwcap_str[j]);
-               }
-               seq_puts(m, "\n");
-
-               seq_printf(m, "CPU implementer\t: 0x%02x\n",
-                          MIDR_IMPLEMENTOR(midr));
-               seq_printf(m, "CPU architecture: 8\n");
-               seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
-               seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
-               seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
-       }
-
-       return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
-       return *pos < 1 ? (void *)1 : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       ++*pos;
-       return NULL;
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
-       .start  = c_start,
-       .next   = c_next,
-       .stop   = c_stop,
-       .show   = c_show
-};
index dbdaacddd9a562bc709193284dbe7fdaf41aed24..2bbdc0e4fd140581706d17b54b8a9089b77e7630 100644 (file)
@@ -142,22 +142,27 @@ asmlinkage void secondary_start_kernel(void)
         */
        atomic_inc(&mm->mm_count);
        current->active_mm = mm;
-       cpumask_set_cpu(cpu, mm_cpumask(mm));
 
        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-       printk("CPU%u: Booted secondary processor\n", cpu);
 
        /*
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
         */
        cpu_set_reserved_ttbr0();
-       flush_tlb_all();
+       local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 
        preempt_disable();
        trace_hardirqs_off();
 
+       /*
+        * If the system has established the capabilities, make sure
+        * this CPU ticks all of those. If it doesn't, the CPU will
+        * fail to come online.
+        */
+       verify_local_cpu_capabilities();
+
        if (cpu_ops[cpu]->cpu_postboot)
                cpu_ops[cpu]->cpu_postboot();
 
@@ -178,6 +183,8 @@ asmlinkage void secondary_start_kernel(void)
         * the CPU migration code to notice that the CPU is online
         * before we continue.
         */
+       pr_info("CPU%u: Booted secondary processor [%08x]\n",
+                                        cpu, read_cpuid_id());
        set_cpu_online(cpu, true);
        complete(&cpu_running);
 
@@ -232,12 +239,7 @@ int __cpu_disable(void)
        /*
         * OK - migrate IRQs away from this CPU
         */
-       migrate_irqs();
-
-       /*
-        * Remove this CPU from the vm mask set of all processes.
-        */
-       clear_tasks_mm_cpumask(cpu);
+       irq_migrate_all_off_this_cpu();
 
        return 0;
 }
@@ -325,12 +327,14 @@ static void __init hyp_mode_check(void)
 void __init smp_cpus_done(unsigned int max_cpus)
 {
        pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+       setup_cpu_features();
        hyp_mode_check();
        apply_alternatives_all();
 }
 
 void __init smp_prepare_boot_cpu(void)
 {
+       cpuinfo_store_boot_cpu();
        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 }
 
index 8297d502217e13010bc891e7d68b5f7279ea62b9..3c5e4e6dcf687bc57f61c3212bbfd06aad637eb5 100644 (file)
@@ -90,7 +90,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
                else
                        cpu_switch_mm(mm->pgd, mm);
 
-               flush_tlb_all();
+               local_flush_tlb_all();
 
                /*
                 * Restore per-cpu offset before any kernel
index f93aae5e43075ccd4e6db45c2198965e566330a5..e9b9b53643936a121e8c73db99373d7e7cab9b48 100644 (file)
@@ -103,12 +103,12 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
        set_fs(fs);
 }
 
-static void dump_backtrace_entry(unsigned long where, unsigned long stack)
+static void dump_backtrace_entry(unsigned long where)
 {
+       /*
+        * Note that 'where' can have a physical address, but it's not handled.
+        */
        print_ip_sym(where);
-       if (in_exception_text(where))
-               dump_mem("", "Exception stack", stack,
-                        stack + sizeof(struct pt_regs), false);
 }
 
 static void dump_instr(const char *lvl, struct pt_regs *regs)
@@ -172,12 +172,17 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        pr_emerg("Call trace:\n");
        while (1) {
                unsigned long where = frame.pc;
+               unsigned long stack;
                int ret;
 
+               dump_backtrace_entry(where);
                ret = unwind_frame(&frame);
                if (ret < 0)
                        break;
-               dump_backtrace_entry(where, frame.sp);
+               stack = frame.sp;
+               if (in_exception_text(where))
+                       dump_mem("", "Exception stack", stack,
+                                stack + sizeof(struct pt_regs), false);
        }
 }
 
index 98073332e2d05b62c12be5c9f4172ddcc3363736..8a5d97b461c0834522b2b2ab272be9bb685006f3 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/page.h>
index 5c7e920e486132c5257255ff843d81a6616670b1..6a7d5cd772e6b73929fc33099d2c9193b1c0cb60 100644 (file)
@@ -19,6 +19,7 @@ if VIRTUALIZATION
 config KVM
        bool "Kernel-based Virtual Machine (KVM) support"
        depends on OF
+       depends on !ARM64_16K_PAGES
        select MMU_NOTIFIER
        select PREEMPT_NOTIFIERS
        select ANON_INODES
@@ -33,6 +34,8 @@ config KVM
        select HAVE_KVM_IRQFD
        ---help---
          Support hosting virtualized guest machines.
+         We don't support KVM with 16K page tables yet, due to the multiple
+         levels of fake page tables.
 
          If unsure, say N.
 
index 91cf5350b3283232cd6d88aca9af669ce972c697..f34745cb3d236fe0a4731f8d02031f8ff764d69c 100644 (file)
@@ -53,7 +53,7 @@ static bool cpu_has_32bit_el1(void)
 {
        u64 pfr0;
 
-       pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+       pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
        return !!(pfr0 & 0x20);
 }
 
index d03d3af17e7eef784d528479e2f4fed305ee5f75..87a64e8db04c4dac07a5d289ad0c2dd22860f0e2 100644 (file)
@@ -693,13 +693,13 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
        if (p->is_write) {
                return ignore_write(vcpu, p);
        } else {
-               u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
-               u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
-               u32 el3 = !!((pfr >> 12) & 0xf);
+               u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1);
+               u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
+               u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
 
-               *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
-                                         (((dfr >> 12) & 0xf) << 24) |
-                                         (((dfr >> 28) & 0xf) << 20) |
+               *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
+                                         (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
+                                         (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
                                          (6 << 16) | (el3 << 14) | (el3 << 12));
                return true;
        }
index 1be9ef27be9704b2ae99bade58d36356a894e78b..4699cd74f87e4af7bf69da8ce48a88a7f4f69b74 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
 
  * Returns:
  *     x0 - bytes not copied
  */
+
+       .macro ldrb1 ptr, regB, val
+       USER(9998f, ldrb  \ptr, [\regB], \val)
+       .endm
+
+       .macro strb1 ptr, regB, val
+       strb \ptr, [\regB], \val
+       .endm
+
+       .macro ldrh1 ptr, regB, val
+       USER(9998f, ldrh  \ptr, [\regB], \val)
+       .endm
+
+       .macro strh1 ptr, regB, val
+       strh \ptr, [\regB], \val
+       .endm
+
+       .macro ldr1 ptr, regB, val
+       USER(9998f, ldr \ptr, [\regB], \val)
+       .endm
+
+       .macro str1 ptr, regB, val
+       str \ptr, [\regB], \val
+       .endm
+
+       .macro ldp1 ptr, regB, regC, val
+       USER(9998f, ldp \ptr, \regB, [\regC], \val)
+       .endm
+
+       .macro stp1 ptr, regB, regC, val
+       stp \ptr, \regB, [\regC], \val
+       .endm
+
+end    .req    x5
 ENTRY(__copy_from_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
            CONFIG_ARM64_PAN)
-       add     x5, x1, x2                      // upper user buffer boundary
-       subs    x2, x2, #16
-       b.mi    1f
-0:
-USER(9f, ldp   x3, x4, [x1], #16)
-       subs    x2, x2, #16
-       stp     x3, x4, [x0], #16
-       b.pl    0b
-1:     adds    x2, x2, #8
-       b.mi    2f
-USER(9f, ldr   x3, [x1], #8    )
-       sub     x2, x2, #8
-       str     x3, [x0], #8
-2:     adds    x2, x2, #4
-       b.mi    3f
-USER(9f, ldr   w3, [x1], #4    )
-       sub     x2, x2, #4
-       str     w3, [x0], #4
-3:     adds    x2, x2, #2
-       b.mi    4f
-USER(9f, ldrh  w3, [x1], #2    )
-       sub     x2, x2, #2
-       strh    w3, [x0], #2
-4:     adds    x2, x2, #1
-       b.mi    5f
-USER(9f, ldrb  w3, [x1]        )
-       strb    w3, [x0]
-5:     mov     x0, #0
+       add     end, x0, x2
+#include "copy_template.S"
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
            CONFIG_ARM64_PAN)
+       mov     x0, #0                          // Nothing to copy
        ret
 ENDPROC(__copy_from_user)
 
        .section .fixup,"ax"
        .align  2
-9:     sub     x2, x5, x1
-       mov     x3, x2
-10:    strb    wzr, [x0], #1                   // zero remaining buffer space
-       subs    x3, x3, #1
-       b.ne    10b
-       mov     x0, x2                          // bytes not copied
+9998:
+       sub     x0, end, dst
+9999:
+       strb    wzr, [dst], #1                  // zero remaining buffer space
+       cmp     dst, end
+       b.lo    9999b
        ret
        .previous
index 1b94661e22b3f4dc3cf131f1afb6487333daee0b..81c8fc93c100b7be7da17ebf96b1edeeb806671f 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
 
  * Returns:
  *     x0 - bytes not copied
  */
+       .macro ldrb1 ptr, regB, val
+       USER(9998f, ldrb  \ptr, [\regB], \val)
+       .endm
+
+       .macro strb1 ptr, regB, val
+       USER(9998f, strb \ptr, [\regB], \val)
+       .endm
+
+       .macro ldrh1 ptr, regB, val
+       USER(9998f, ldrh  \ptr, [\regB], \val)
+       .endm
+
+       .macro strh1 ptr, regB, val
+       USER(9998f, strh \ptr, [\regB], \val)
+       .endm
+
+       .macro ldr1 ptr, regB, val
+       USER(9998f, ldr \ptr, [\regB], \val)
+       .endm
+
+       .macro str1 ptr, regB, val
+       USER(9998f, str \ptr, [\regB], \val)
+       .endm
+
+       .macro ldp1 ptr, regB, regC, val
+       USER(9998f, ldp \ptr, \regB, [\regC], \val)
+       .endm
+
+       .macro stp1 ptr, regB, regC, val
+       USER(9998f, stp \ptr, \regB, [\regC], \val)
+       .endm
+
+end    .req    x5
 ENTRY(__copy_in_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
            CONFIG_ARM64_PAN)
-       add     x5, x0, x2                      // upper user buffer boundary
-       subs    x2, x2, #16
-       b.mi    1f
-0:
-USER(9f, ldp   x3, x4, [x1], #16)
-       subs    x2, x2, #16
-USER(9f, stp   x3, x4, [x0], #16)
-       b.pl    0b
-1:     adds    x2, x2, #8
-       b.mi    2f
-USER(9f, ldr   x3, [x1], #8    )
-       sub     x2, x2, #8
-USER(9f, str   x3, [x0], #8    )
-2:     adds    x2, x2, #4
-       b.mi    3f
-USER(9f, ldr   w3, [x1], #4    )
-       sub     x2, x2, #4
-USER(9f, str   w3, [x0], #4    )
-3:     adds    x2, x2, #2
-       b.mi    4f
-USER(9f, ldrh  w3, [x1], #2    )
-       sub     x2, x2, #2
-USER(9f, strh  w3, [x0], #2    )
-4:     adds    x2, x2, #1
-       b.mi    5f
-USER(9f, ldrb  w3, [x1]        )
-USER(9f, strb  w3, [x0]        )
-5:     mov     x0, #0
+       add     end, x0, x2
+#include "copy_template.S"
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
            CONFIG_ARM64_PAN)
+       mov     x0, #0
        ret
 ENDPROC(__copy_in_user)
 
        .section .fixup,"ax"
        .align  2
-9:     sub     x0, x5, x0                      // bytes not copied
+9998:  sub     x0, end, dst                    // bytes not copied
        ret
        .previous
diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
new file mode 100644 (file)
index 0000000..410fbdb
--- /dev/null
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ *     x0 - dest
+ *     x1 - src
+ *     x2 - n
+ * Returns:
+ *     x0 - dest
+ */
+dstin  .req    x0
+src    .req    x1
+count  .req    x2
+tmp1   .req    x3
+tmp1w  .req    w3
+tmp2   .req    x4
+tmp2w  .req    w4
+dst    .req    x6
+
+A_l    .req    x7
+A_h    .req    x8
+B_l    .req    x9
+B_h    .req    x10
+C_l    .req    x11
+C_h    .req    x12
+D_l    .req    x13
+D_h    .req    x14
+
+       mov     dst, dstin
+       cmp     count, #16
+       /*When memory length is less than 16, the accessed are not aligned.*/
+       b.lo    .Ltiny15
+
+       neg     tmp2, src
+       ands    tmp2, tmp2, #15/* Bytes to reach alignment. */
+       b.eq    .LSrcAligned
+       sub     count, count, tmp2
+       /*
+       * Copy the leading memory data from src to dst in an increasing
+       * address order.By this way,the risk of overwritting the source
+       * memory data is eliminated when the distance between src and
+       * dst is less than 16. The memory accesses here are alignment.
+       */
+       tbz     tmp2, #0, 1f
+       ldrb1   tmp1w, src, #1
+       strb1   tmp1w, dst, #1
+1:
+       tbz     tmp2, #1, 2f
+       ldrh1   tmp1w, src, #2
+       strh1   tmp1w, dst, #2
+2:
+       tbz     tmp2, #2, 3f
+       ldr1    tmp1w, src, #4
+       str1    tmp1w, dst, #4
+3:
+       tbz     tmp2, #3, .LSrcAligned
+       ldr1    tmp1, src, #8
+       str1    tmp1, dst, #8
+
+.LSrcAligned:
+       cmp     count, #64
+       b.ge    .Lcpy_over64
+       /*
+       * Deal with small copies quickly by dropping straight into the
+       * exit block.
+       */
+.Ltail63:
+       /*
+       * Copy up to 48 bytes of data. At this point we only need the
+       * bottom 6 bits of count to be accurate.
+       */
+       ands    tmp1, count, #0x30
+       b.eq    .Ltiny15
+       cmp     tmp1w, #0x20
+       b.eq    1f
+       b.lt    2f
+       ldp1    A_l, A_h, src, #16
+       stp1    A_l, A_h, dst, #16
+1:
+       ldp1    A_l, A_h, src, #16
+       stp1    A_l, A_h, dst, #16
+2:
+       ldp1    A_l, A_h, src, #16
+       stp1    A_l, A_h, dst, #16
+.Ltiny15:
+       /*
+       * Prefer to break one ldp/stp into several load/store to access
+       * memory in an increasing address order,rather than to load/store 16
+       * bytes from (src-16) to (dst-16) and to backward the src to aligned
+       * address,which way is used in original cortex memcpy. If keeping
+       * the original memcpy process here, memmove need to satisfy the
+       * precondition that src address is at least 16 bytes bigger than dst
+       * address,otherwise some source data will be overwritten when memove
+       * call memcpy directly. To make memmove simpler and decouple the
+       * memcpy's dependency on memmove, withdrew the original process.
+       */
+       tbz     count, #3, 1f
+       ldr1    tmp1, src, #8
+       str1    tmp1, dst, #8
+1:
+       tbz     count, #2, 2f
+       ldr1    tmp1w, src, #4
+       str1    tmp1w, dst, #4
+2:
+       tbz     count, #1, 3f
+       ldrh1   tmp1w, src, #2
+       strh1   tmp1w, dst, #2
+3:
+       tbz     count, #0, .Lexitfunc
+       ldrb1   tmp1w, src, #1
+       strb1   tmp1w, dst, #1
+
+       b       .Lexitfunc
+
+.Lcpy_over64:
+       subs    count, count, #128
+       b.ge    .Lcpy_body_large
+       /*
+       * Less than 128 bytes to copy, so handle 64 here and then jump
+       * to the tail.
+       */
+       ldp1    A_l, A_h, src, #16
+       stp1    A_l, A_h, dst, #16
+       ldp1    B_l, B_h, src, #16
+       ldp1    C_l, C_h, src, #16
+       stp1    B_l, B_h, dst, #16
+       stp1    C_l, C_h, dst, #16
+       ldp1    D_l, D_h, src, #16
+       stp1    D_l, D_h, dst, #16
+
+       tst     count, #0x3f
+       b.ne    .Ltail63
+       b       .Lexitfunc
+
+       /*
+       * Critical loop.  Start at a new cache line boundary.  Assuming
+       * 64 bytes per line this ensures the entire loop is in one line.
+       */
+       .p2align        L1_CACHE_SHIFT
+.Lcpy_body_large:
+       /* pre-get 64 bytes data. */
+       ldp1    A_l, A_h, src, #16
+       ldp1    B_l, B_h, src, #16
+       ldp1    C_l, C_h, src, #16
+       ldp1    D_l, D_h, src, #16
+1:
+       /*
+       * interlace the load of next 64 bytes data block with store of the last
+       * loaded 64 bytes data.
+       */
+       stp1    A_l, A_h, dst, #16
+       ldp1    A_l, A_h, src, #16
+       stp1    B_l, B_h, dst, #16
+       ldp1    B_l, B_h, src, #16
+       stp1    C_l, C_h, dst, #16
+       ldp1    C_l, C_h, src, #16
+       stp1    D_l, D_h, dst, #16
+       ldp1    D_l, D_h, src, #16
+       subs    count, count, #64
+       b.ge    1b
+       stp1    A_l, A_h, dst, #16
+       stp1    B_l, B_h, dst, #16
+       stp1    C_l, C_h, dst, #16
+       stp1    D_l, D_h, dst, #16
+
+       tst     count, #0x3f
+       b.ne    .Ltail63
+.Lexitfunc:
index a257b47e2dc4934f0d37b3586e6a2ef5b29f3453..7512bbbc07ac39dbe8c963745281f25c2d60efa4 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
 
  * Returns:
  *     x0 - bytes not copied
  */
+       .macro ldrb1 ptr, regB, val
+       ldrb  \ptr, [\regB], \val
+       .endm
+
+       .macro strb1 ptr, regB, val
+       USER(9998f, strb \ptr, [\regB], \val)
+       .endm
+
+       .macro ldrh1 ptr, regB, val
+       ldrh  \ptr, [\regB], \val
+       .endm
+
+       .macro strh1 ptr, regB, val
+       USER(9998f, strh \ptr, [\regB], \val)
+       .endm
+
+       .macro ldr1 ptr, regB, val
+       ldr \ptr, [\regB], \val
+       .endm
+
+       .macro str1 ptr, regB, val
+       USER(9998f, str \ptr, [\regB], \val)
+       .endm
+
+       .macro ldp1 ptr, regB, regC, val
+       ldp \ptr, \regB, [\regC], \val
+       .endm
+
+       .macro stp1 ptr, regB, regC, val
+       USER(9998f, stp \ptr, \regB, [\regC], \val)
+       .endm
+
+end    .req    x5
 ENTRY(__copy_to_user)
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
            CONFIG_ARM64_PAN)
-       add     x5, x0, x2                      // upper user buffer boundary
-       subs    x2, x2, #16
-       b.mi    1f
-0:
-       ldp     x3, x4, [x1], #16
-       subs    x2, x2, #16
-USER(9f, stp   x3, x4, [x0], #16)
-       b.pl    0b
-1:     adds    x2, x2, #8
-       b.mi    2f
-       ldr     x3, [x1], #8
-       sub     x2, x2, #8
-USER(9f, str   x3, [x0], #8    )
-2:     adds    x2, x2, #4
-       b.mi    3f
-       ldr     w3, [x1], #4
-       sub     x2, x2, #4
-USER(9f, str   w3, [x0], #4    )
-3:     adds    x2, x2, #2
-       b.mi    4f
-       ldrh    w3, [x1], #2
-       sub     x2, x2, #2
-USER(9f, strh  w3, [x0], #2    )
-4:     adds    x2, x2, #1
-       b.mi    5f
-       ldrb    w3, [x1]
-USER(9f, strb  w3, [x0]        )
-5:     mov     x0, #0
+       add     end, x0, x2
+#include "copy_template.S"
 ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
            CONFIG_ARM64_PAN)
+       mov     x0, #0
        ret
 ENDPROC(__copy_to_user)
 
        .section .fixup,"ax"
        .align  2
-9:     sub     x0, x5, x0                      // bytes not copied
+9998:  sub     x0, end, dst                    // bytes not copied
        ret
        .previous
index 8636b7549163a20078734f26ad984e54a804d1c3..4444c1d25f4bb7217f540715e8cde1b27d28913d 100644 (file)
@@ -41,4 +41,4 @@ ENTRY(memchr)
        ret
 2:     mov     x0, #0
        ret
-ENDPROC(memchr)
+ENDPIPROC(memchr)
index 6ea0776ba6de1014c049c740fc041e30f33a5b2c..ffbdec00327d0463ca0bd608b7612e471a08347d 100644 (file)
@@ -255,4 +255,4 @@ CPU_LE( rev data2, data2 )
 .Lret0:
        mov     result, #0
        ret
-ENDPROC(memcmp)
+ENDPIPROC(memcmp)
index 8a9a96d3ddae04331828c9744b4d94368ef70620..67613937711f10d209b3be73ce6a322f674581b5 100644 (file)
  * Returns:
  *     x0 - dest
  */
-dstin  .req    x0
-src    .req    x1
-count  .req    x2
-tmp1   .req    x3
-tmp1w  .req    w3
-tmp2   .req    x4
-tmp2w  .req    w4
-tmp3   .req    x5
-tmp3w  .req    w5
-dst    .req    x6
+       .macro ldrb1 ptr, regB, val
+       ldrb  \ptr, [\regB], \val
+       .endm
 
-A_l    .req    x7
-A_h    .req    x8
-B_l    .req    x9
-B_h    .req    x10
-C_l    .req    x11
-C_h    .req    x12
-D_l    .req    x13
-D_h    .req    x14
+       .macro strb1 ptr, regB, val
+       strb \ptr, [\regB], \val
+       .endm
 
-ENTRY(memcpy)
-       mov     dst, dstin
-       cmp     count, #16
-       /*When memory length is less than 16, the accessed are not aligned.*/
-       b.lo    .Ltiny15
+       .macro ldrh1 ptr, regB, val
+       ldrh  \ptr, [\regB], \val
+       .endm
 
-       neg     tmp2, src
-       ands    tmp2, tmp2, #15/* Bytes to reach alignment. */
-       b.eq    .LSrcAligned
-       sub     count, count, tmp2
-       /*
-       * Copy the leading memory data from src to dst in an increasing
-       * address order.By this way,the risk of overwritting the source
-       * memory data is eliminated when the distance between src and
-       * dst is less than 16. The memory accesses here are alignment.
-       */
-       tbz     tmp2, #0, 1f
-       ldrb    tmp1w, [src], #1
-       strb    tmp1w, [dst], #1
-1:
-       tbz     tmp2, #1, 2f
-       ldrh    tmp1w, [src], #2
-       strh    tmp1w, [dst], #2
-2:
-       tbz     tmp2, #2, 3f
-       ldr     tmp1w, [src], #4
-       str     tmp1w, [dst], #4
-3:
-       tbz     tmp2, #3, .LSrcAligned
-       ldr     tmp1, [src],#8
-       str     tmp1, [dst],#8
+       .macro strh1 ptr, regB, val
+       strh \ptr, [\regB], \val
+       .endm
 
-.LSrcAligned:
-       cmp     count, #64
-       b.ge    .Lcpy_over64
-       /*
-       * Deal with small copies quickly by dropping straight into the
-       * exit block.
-       */
-.Ltail63:
-       /*
-       * Copy up to 48 bytes of data. At this point we only need the
-       * bottom 6 bits of count to be accurate.
-       */
-       ands    tmp1, count, #0x30
-       b.eq    .Ltiny15
-       cmp     tmp1w, #0x20
-       b.eq    1f
-       b.lt    2f
-       ldp     A_l, A_h, [src], #16
-       stp     A_l, A_h, [dst], #16
-1:
-       ldp     A_l, A_h, [src], #16
-       stp     A_l, A_h, [dst], #16
-2:
-       ldp     A_l, A_h, [src], #16
-       stp     A_l, A_h, [dst], #16
-.Ltiny15:
-       /*
-       * Prefer to break one ldp/stp into several load/store to access
-       * memory in an increasing address order,rather than to load/store 16
-       * bytes from (src-16) to (dst-16) and to backward the src to aligned
-       * address,which way is used in original cortex memcpy. If keeping
-       * the original memcpy process here, memmove need to satisfy the
-       * precondition that src address is at least 16 bytes bigger than dst
-       * address,otherwise some source data will be overwritten when memove
-       * call memcpy directly. To make memmove simpler and decouple the
-       * memcpy's dependency on memmove, withdrew the original process.
-       */
-       tbz     count, #3, 1f
-       ldr     tmp1, [src], #8
-       str     tmp1, [dst], #8
-1:
-       tbz     count, #2, 2f
-       ldr     tmp1w, [src], #4
-       str     tmp1w, [dst], #4
-2:
-       tbz     count, #1, 3f
-       ldrh    tmp1w, [src], #2
-       strh    tmp1w, [dst], #2
-3:
-       tbz     count, #0, .Lexitfunc
-       ldrb    tmp1w, [src]
-       strb    tmp1w, [dst]
+       .macro ldr1 ptr, regB, val
+       ldr \ptr, [\regB], \val
+       .endm
 
-.Lexitfunc:
-       ret
+       .macro str1 ptr, regB, val
+       str \ptr, [\regB], \val
+       .endm
 
-.Lcpy_over64:
-       subs    count, count, #128
-       b.ge    .Lcpy_body_large
-       /*
-       * Less than 128 bytes to copy, so handle 64 here and then jump
-       * to the tail.
-       */
-       ldp     A_l, A_h, [src],#16
-       stp     A_l, A_h, [dst],#16
-       ldp     B_l, B_h, [src],#16
-       ldp     C_l, C_h, [src],#16
-       stp     B_l, B_h, [dst],#16
-       stp     C_l, C_h, [dst],#16
-       ldp     D_l, D_h, [src],#16
-       stp     D_l, D_h, [dst],#16
+       .macro ldp1 ptr, regB, regC, val
+       ldp \ptr, \regB, [\regC], \val
+       .endm
 
-       tst     count, #0x3f
-       b.ne    .Ltail63
-       ret
+       .macro stp1 ptr, regB, regC, val
+       stp \ptr, \regB, [\regC], \val
+       .endm
 
-       /*
-       * Critical loop.  Start at a new cache line boundary.  Assuming
-       * 64 bytes per line this ensures the entire loop is in one line.
-       */
-       .p2align        L1_CACHE_SHIFT
-.Lcpy_body_large:
-       /* pre-get 64 bytes data. */
-       ldp     A_l, A_h, [src],#16
-       ldp     B_l, B_h, [src],#16
-       ldp     C_l, C_h, [src],#16
-       ldp     D_l, D_h, [src],#16
-1:
-       /*
-       * interlace the load of next 64 bytes data block with store of the last
-       * loaded 64 bytes data.
-       */
-       stp     A_l, A_h, [dst],#16
-       ldp     A_l, A_h, [src],#16
-       stp     B_l, B_h, [dst],#16
-       ldp     B_l, B_h, [src],#16
-       stp     C_l, C_h, [dst],#16
-       ldp     C_l, C_h, [src],#16
-       stp     D_l, D_h, [dst],#16
-       ldp     D_l, D_h, [src],#16
-       subs    count, count, #64
-       b.ge    1b
-       stp     A_l, A_h, [dst],#16
-       stp     B_l, B_h, [dst],#16
-       stp     C_l, C_h, [dst],#16
-       stp     D_l, D_h, [dst],#16
-
-       tst     count, #0x3f
-       b.ne    .Ltail63
+       .weak memcpy
+ENTRY(__memcpy)
+ENTRY(memcpy)
+#include "copy_template.S"
        ret
-ENDPROC(memcpy)
+ENDPIPROC(memcpy)
+ENDPROC(__memcpy)
index 57b19ea2dad467d885f09991b902d0a52bd6f747..a5a4459013b1a59d5a54b2a70fd1ad8e0f256fd9 100644 (file)
@@ -57,12 +57,14 @@ C_h .req    x12
 D_l    .req    x13
 D_h    .req    x14
 
+       .weak memmove
+ENTRY(__memmove)
 ENTRY(memmove)
        cmp     dstin, src
-       b.lo    memcpy
+       b.lo    __memcpy
        add     tmp1, src, count
        cmp     dstin, tmp1
-       b.hs    memcpy          /* No overlap.  */
+       b.hs    __memcpy                /* No overlap.  */
 
        add     dst, dstin, count
        add     src, src, count
@@ -194,4 +196,5 @@ ENTRY(memmove)
        tst     count, #0x3f
        b.ne    .Ltail63
        ret
-ENDPROC(memmove)
+ENDPIPROC(memmove)
+ENDPROC(__memmove)
index 7c72dfd36b6396a921b7d2b7d66e5880f8314d72..f2670a9f218c919ff68a1ffcfdbce468e693ae60 100644 (file)
@@ -54,6 +54,8 @@ dst           .req    x8
 tmp3w          .req    w9
 tmp3           .req    x9
 
+       .weak memset
+ENTRY(__memset)
 ENTRY(memset)
        mov     dst, dstin      /* Preserve return value.  */
        and     A_lw, val, #255
@@ -213,4 +215,5 @@ ENTRY(memset)
        ands    count, count, zva_bits_x
        b.ne    .Ltail_maybe_long
        ret
-ENDPROC(memset)
+ENDPIPROC(memset)
+ENDPROC(__memset)
index 42f828b06c59a4daab35562e03aa70516a5d864a..471fe61760ef661213007542df37c7c8fdcb1a18 100644 (file)
@@ -231,4 +231,4 @@ CPU_BE(     orr     syndrome, diff, has_nul )
        lsr     data1, data1, #56
        sub     result, data1, data2, lsr #56
        ret
-ENDPROC(strcmp)
+ENDPIPROC(strcmp)
index 987b68b9ce4474bb9cfd44e77ef61d3a871d40f8..55ccc8e24c08440399034d41bf8aa04699e09812 100644 (file)
@@ -123,4 +123,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
        csinv   data1, data1, xzr, le
        csel    data2, data2, data2a, le
        b       .Lrealigned
-ENDPROC(strlen)
+ENDPIPROC(strlen)
index 0224cf5a55334a297c1cc079f08644fc09707156..e267044761c6f2c1b4cadcba729e8d0dbe79f766 100644 (file)
@@ -307,4 +307,4 @@ CPU_BE( orr syndrome, diff, has_nul )
 .Lret0:
        mov     result, #0
        ret
-ENDPROC(strncmp)
+ENDPIPROC(strncmp)
index 773d37a14039d9bb456f0896e022c4a3e1415773..57f57fde5722a99075f257012c059aff9f836078 100644 (file)
@@ -4,3 +4,6 @@ obj-y                           := dma-mapping.o extable.o fault.o init.o \
                                   context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 obj-$(CONFIG_ARM64_PTDUMP)     += dump.o
+
+obj-$(CONFIG_KASAN)            += kasan_init.o
+KASAN_SANITIZE_kasan_init.o    := n
index eb48d5df4a0f7252462bd34b6209f27960d95e93..cfa44a6adc0ad5ec29f78228196b7e834b65df40 100644 (file)
@@ -98,7 +98,7 @@ ENTRY(__flush_dcache_area)
        b.lo    1b
        dsb     sy
        ret
-ENDPROC(__flush_dcache_area)
+ENDPIPROC(__flush_dcache_area)
 
 /*
  *     __inval_cache_range(start, end)
@@ -131,7 +131,7 @@ __dma_inv_range:
        b.lo    2b
        dsb     sy
        ret
-ENDPROC(__inval_cache_range)
+ENDPIPROC(__inval_cache_range)
 ENDPROC(__dma_inv_range)
 
 /*
@@ -171,7 +171,7 @@ ENTRY(__dma_flush_range)
        b.lo    1b
        dsb     sy
        ret
-ENDPROC(__dma_flush_range)
+ENDPIPROC(__dma_flush_range)
 
 /*
  *     __dma_map_area(start, size, dir)
@@ -184,7 +184,7 @@ ENTRY(__dma_map_area)
        cmp     w2, #DMA_FROM_DEVICE
        b.eq    __dma_inv_range
        b       __dma_clean_range
-ENDPROC(__dma_map_area)
+ENDPIPROC(__dma_map_area)
 
 /*
  *     __dma_unmap_area(start, size, dir)
@@ -197,4 +197,4 @@ ENTRY(__dma_unmap_area)
        cmp     w2, #DMA_TO_DEVICE
        b.ne    __dma_inv_range
        ret
-ENDPROC(__dma_unmap_area)
+ENDPIPROC(__dma_unmap_area)
index d70ff14dbdbdd33ef4ed65b75caa4fa575c6497b..f636a2639f031dd03d0b5058ccf378721d67dbf4 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/init.h>
+#include <linux/bitops.h>
 #include <linux/sched.h>
+#include <linux/slab.h>
 #include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
 
+#include <asm/cpufeature.h>
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
-#include <asm/cachetype.h>
 
-#define asid_bits(reg) \
-       (((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8)
+static u32 asid_bits;
+static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
 
-#define ASID_FIRST_VERSION     (1 << MAX_ASID_BITS)
+static atomic64_t asid_generation;
+static unsigned long *asid_map;
 
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
-unsigned int cpu_last_asid = ASID_FIRST_VERSION;
+static DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(u64, reserved_asids);
+static cpumask_t tlb_flush_pending;
 
-/*
- * We fork()ed a process, and we need a new context for the child to run in.
- */
-void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-       mm->context.id = 0;
-       raw_spin_lock_init(&mm->context.id_lock);
-}
+#define ASID_MASK              (~GENMASK(asid_bits - 1, 0))
+#define ASID_FIRST_VERSION     (1UL << asid_bits)
+#define NUM_USER_ASIDS         ASID_FIRST_VERSION
 
-static void flush_context(void)
+static void flush_context(unsigned int cpu)
 {
-       /* set the reserved TTBR0 before flushing the TLB */
-       cpu_set_reserved_ttbr0();
-       flush_tlb_all();
-       if (icache_is_aivivt())
-               __flush_icache_all();
-}
+       int i;
+       u64 asid;
 
-static void set_mm_context(struct mm_struct *mm, unsigned int asid)
-{
-       unsigned long flags;
+       /* Update the list of reserved ASIDs and the ASID bitmap. */
+       bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
 
        /*
-        * Locking needed for multi-threaded applications where the same
-        * mm->context.id could be set from different CPUs during the
-        * broadcast. This function is also called via IPI so the
-        * mm->context.id_lock has to be IRQ-safe.
+        * Ensure the generation bump is observed before we xchg the
+        * active_asids.
         */
-       raw_spin_lock_irqsave(&mm->context.id_lock, flags);
-       if (likely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
+       smp_wmb();
+
+       for_each_possible_cpu(i) {
+               asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
                /*
-                * Old version of ASID found. Set the new one and reset
-                * mm_cpumask(mm).
+                * If this CPU has already been through a
+                * rollover, but hasn't run another task in
+                * the meantime, we must preserve its reserved
+                * ASID, as this is the only trace we have of
+                * the process it is still running.
                 */
-               mm->context.id = asid;
-               cpumask_clear(mm_cpumask(mm));
+               if (asid == 0)
+                       asid = per_cpu(reserved_asids, i);
+               __set_bit(asid & ~ASID_MASK, asid_map);
+               per_cpu(reserved_asids, i) = asid;
        }
-       raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
 
-       /*
-        * Set the mm_cpumask(mm) bit for the current CPU.
-        */
-       cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+       /* Queue a TLB invalidate and flush the I-cache if necessary. */
+       cpumask_setall(&tlb_flush_pending);
+
+       if (icache_is_aivivt())
+               __flush_icache_all();
 }
 
-/*
- * Reset the ASID on the current CPU. This function call is broadcast from the
- * CPU handling the ASID rollover and holding cpu_asid_lock.
- */
-static void reset_context(void *info)
+static int is_reserved_asid(u64 asid)
+{
+       int cpu;
+       for_each_possible_cpu(cpu)
+               if (per_cpu(reserved_asids, cpu) == asid)
+                       return 1;
+       return 0;
+}
+
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
 {
-       unsigned int asid;
-       unsigned int cpu = smp_processor_id();
-       struct mm_struct *mm = current->active_mm;
+       static u32 cur_idx = 1;
+       u64 asid = atomic64_read(&mm->context.id);
+       u64 generation = atomic64_read(&asid_generation);
+
+       if (asid != 0) {
+               /*
+                * If our current ASID was active during a rollover, we
+                * can continue to use it and this was just a false alarm.
+                */
+               if (is_reserved_asid(asid))
+                       return generation | (asid & ~ASID_MASK);
+
+               /*
+                * We had a valid ASID in a previous life, so try to re-use
+                * it if possible.
+                */
+               asid &= ~ASID_MASK;
+               if (!__test_and_set_bit(asid, asid_map))
+                       goto bump_gen;
+       }
 
        /*
-        * current->active_mm could be init_mm for the idle thread immediately
-        * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
-        * the reserved value, so no need to reset any context.
+        * Allocate a free ASID. If we can't find one, take a note of the
+        * currently active ASIDs and mark the TLBs as requiring flushes.
+        * We always count from ASID #1, as we use ASID #0 when setting a
+        * reserved TTBR0 for the init_mm.
         */
-       if (mm == &init_mm)
-               return;
+       asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+       if (asid != NUM_USER_ASIDS)
+               goto set_asid;
 
-       smp_rmb();
-       asid = cpu_last_asid + cpu;
+       /* We're out of ASIDs, so increment the global generation count */
+       generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
+                                                &asid_generation);
+       flush_context(cpu);
 
-       flush_context();
-       set_mm_context(mm, asid);
+       /* We have at least 1 ASID per CPU, so this will always succeed */
+       asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 
-       /* set the new ASID */
-       cpu_switch_mm(mm->pgd, mm);
+set_asid:
+       __set_bit(asid, asid_map);
+       cur_idx = asid;
+
+bump_gen:
+       asid |= generation;
+       return asid;
 }
 
-void __new_context(struct mm_struct *mm)
+void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
 {
-       unsigned int asid;
-       unsigned int bits = asid_bits();
+       unsigned long flags;
+       u64 asid;
+
+       asid = atomic64_read(&mm->context.id);
 
-       raw_spin_lock(&cpu_asid_lock);
        /*
-        * Check the ASID again, in case the change was broadcast from another
-        * CPU before we acquired the lock.
+        * The memory ordering here is subtle. We rely on the control
+        * dependency between the generation read and the update of
+        * active_asids to ensure that we are synchronised with a
+        * parallel rollover (i.e. this pairs with the smp_wmb() in
+        * flush_context).
         */
-       if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
-               cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-               raw_spin_unlock(&cpu_asid_lock);
-               return;
+       if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits)
+           && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid))
+               goto switch_mm_fastpath;
+
+       raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+       /* Check that our ASID belongs to the current generation. */
+       asid = atomic64_read(&mm->context.id);
+       if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
+               asid = new_context(mm, cpu);
+               atomic64_set(&mm->context.id, asid);
        }
-       /*
-        * At this point, it is guaranteed that the current mm (with an old
-        * ASID) isn't active on any other CPU since the ASIDs are changed
-        * simultaneously via IPI.
-        */
-       asid = ++cpu_last_asid;
 
-       /*
-        * If we've used up all our ASIDs, we need to start a new version and
-        * flush the TLB.
-        */
-       if (unlikely((asid & ((1 << bits) - 1)) == 0)) {
-               /* increment the ASID version */
-               cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits);
-               if (cpu_last_asid == 0)
-                       cpu_last_asid = ASID_FIRST_VERSION;
-               asid = cpu_last_asid + smp_processor_id();
-               flush_context();
-               smp_wmb();
-               smp_call_function(reset_context, NULL, 1);
-               cpu_last_asid += NR_CPUS - 1;
+       if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+               local_flush_tlb_all();
+
+       atomic64_set(&per_cpu(active_asids, cpu), asid);
+       raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
+
+switch_mm_fastpath:
+       cpu_switch_mm(mm->pgd, mm);
+}
+
+static int asids_init(void)
+{
+       int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4);
+
+       switch (fld) {
+       default:
+               pr_warn("Unknown ASID size (%d); assuming 8-bit\n", fld);
+               /* Fallthrough */
+       case 0:
+               asid_bits = 8;
+               break;
+       case 2:
+               asid_bits = 16;
        }
 
-       set_mm_context(mm, asid);
-       raw_spin_unlock(&cpu_asid_lock);
+       /* If we end up with more CPUs than ASIDs, expect things to crash */
+       WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+       atomic64_set(&asid_generation, ASID_FIRST_VERSION);
+       asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
+                          GFP_KERNEL);
+       if (!asid_map)
+               panic("Failed to allocate bitmap for %lu ASIDs\n",
+                     NUM_USER_ASIDS);
+
+       pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
+       return 0;
 }
+early_initcall(asids_init);
index f3d6221cd5bdd4c7bf59fd99c71415d3922e2572..5a22a119a74c87b4b5b54e114701b3c6eed233e6 100644 (file)
@@ -67,6 +67,12 @@ static struct addr_marker address_markers[] = {
        { -1,                   NULL },
 };
 
+/*
+ * The page dumper groups page table entries of the same type into a single
+ * description. It uses pg_state to track the range information while
+ * iterating over the pte entries. When the continuity is broken it then
+ * dumps out a description of the range.
+ */
 struct pg_state {
        struct seq_file *seq;
        const struct addr_marker *marker;
@@ -113,6 +119,16 @@ static const struct prot_bits pte_bits[] = {
                .val    = PTE_NG,
                .set    = "NG",
                .clear  = "  ",
+       }, {
+               .mask   = PTE_CONT,
+               .val    = PTE_CONT,
+               .set    = "CON",
+               .clear  = "   ",
+       }, {
+               .mask   = PTE_TABLE_BIT,
+               .val    = PTE_TABLE_BIT,
+               .set    = "   ",
+               .clear  = "BLK",
        }, {
                .mask   = PTE_UXN,
                .val    = PTE_UXN,
@@ -198,7 +214,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
                unsigned long delta;
 
                if (st->current_prot) {
-                       seq_printf(st->seq, "0x%16lx-0x%16lx   ",
+                       seq_printf(st->seq, "0x%016lx-0x%016lx   ",
                                   st->start_address, addr);
 
                        delta = (addr - st->start_address) >> 10;
index aba9ead1384c036a0d6a441c92ced63cfd7ed4ae..066d5aea16c6242f2b6ebba56df555a717241514 100644 (file)
@@ -555,7 +555,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
 }
 
 #ifdef CONFIG_ARM64_PAN
-void cpu_enable_pan(void)
+void cpu_enable_pan(void *__unused)
 {
        config_sctlr_el1(SCTLR_EL1_SPAN, 0);
 }
index f5c0680d17d9efd701f7c261fd3a8b0210ad9d7a..7a1f9a05dc8269f891a1a7240cd5d030cdff3e9c 100644 (file)
@@ -298,6 +298,9 @@ void __init mem_init(void)
 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
 
        pr_notice("Virtual kernel memory layout:\n"
+#ifdef CONFIG_KASAN
+                 "    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n"
+#endif
                  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
@@ -310,6 +313,9 @@ void __init mem_init(void)
                  "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
                  "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
                  "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+#ifdef CONFIG_KASAN
+                 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
+#endif
                  MLG(VMALLOC_START, VMALLOC_END),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  MLG((unsigned long)vmemmap,
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
new file mode 100644 (file)
index 0000000..cf038c7
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * This file contains kasan initialization code for ARM64.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/start_kernel.h>
+
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
+
+static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
+                                       unsigned long end)
+{
+       pte_t *pte;
+       unsigned long next;
+
+       if (pmd_none(*pmd))
+               pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+
+       pte = pte_offset_kernel(pmd, addr);
+       do {
+               next = addr + PAGE_SIZE;
+               set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
+                                       PAGE_KERNEL));
+       } while (pte++, addr = next, addr != end && pte_none(*pte));
+}
+
+static void __init kasan_early_pmd_populate(pud_t *pud,
+                                       unsigned long addr,
+                                       unsigned long end)
+{
+       pmd_t *pmd;
+       unsigned long next;
+
+       if (pud_none(*pud))
+               pud_populate(&init_mm, pud, kasan_zero_pmd);
+
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               kasan_early_pte_populate(pmd, addr, next);
+       } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
+}
+
+static void __init kasan_early_pud_populate(pgd_t *pgd,
+                                       unsigned long addr,
+                                       unsigned long end)
+{
+       pud_t *pud;
+       unsigned long next;
+
+       if (pgd_none(*pgd))
+               pgd_populate(&init_mm, pgd, kasan_zero_pud);
+
+       pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+               kasan_early_pmd_populate(pud, addr, next);
+       } while (pud++, addr = next, addr != end && pud_none(*pud));
+}
+
+static void __init kasan_map_early_shadow(void)
+{
+       unsigned long addr = KASAN_SHADOW_START;
+       unsigned long end = KASAN_SHADOW_END;
+       unsigned long next;
+       pgd_t *pgd;
+
+       pgd = pgd_offset_k(addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               kasan_early_pud_populate(pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+}
+
+asmlinkage void __init kasan_early_init(void)
+{
+       BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
+       BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+       BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+       kasan_map_early_shadow();
+}
+
+static void __init clear_pgds(unsigned long start,
+                       unsigned long end)
+{
+       /*
+        * Remove references to kasan page tables from
+        * swapper_pg_dir. pgd_clear() can't be used
+        * here because it's nop on 2,3-level pagetable setups
+        */
+       for (; start < end; start += PGDIR_SIZE)
+               set_pgd(pgd_offset_k(start), __pgd(0));
+}
+
+static void __init cpu_set_ttbr1(unsigned long ttbr1)
+{
+       asm(
+       "       msr     ttbr1_el1, %0\n"
+       "       isb"
+       :
+       : "r" (ttbr1));
+}
+
+void __init kasan_init(void)
+{
+       struct memblock_region *reg;
+
+       /*
+        * We are going to perform proper setup of shadow memory.
+        * At first we should unmap early shadow (clear_pgds() call bellow).
+        * However, instrumented code couldn't execute without shadow memory.
+        * tmp_pg_dir used to keep early shadow mapped until full shadow
+        * setup will be finished.
+        */
+       memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
+       cpu_set_ttbr1(__pa(tmp_pg_dir));
+       flush_tlb_all();
+
+       clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+       kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
+                       kasan_mem_to_shadow((void *)MODULES_VADDR));
+
+       for_each_memblock(memory, reg) {
+               void *start = (void *)__phys_to_virt(reg->base);
+               void *end = (void *)__phys_to_virt(reg->base + reg->size);
+
+               if (start >= end)
+                       break;
+
+               /*
+                * end + 1 here is intentional. We check several shadow bytes in
+                * advance to slightly speed up fastpath. In some rare cases
+                * we could cross boundary of mapped shadow, so we just map
+                * some more here.
+                */
+               vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
+                               (unsigned long)kasan_mem_to_shadow(end) + 1,
+                               pfn_to_nid(virt_to_pfn(start)));
+       }
+
+       memset(kasan_zero_page, 0, PAGE_SIZE);
+       cpu_set_ttbr1(__pa(swapper_pg_dir));
+       flush_tlb_all();
+
+       /* At this point kasan is fully initialized. Enable error messages */
+       init_task.kasan_depth = 0;
+       pr_info("KernelAddressSanitizer initialized\n");
+}
index 9211b8527f2580aeb561b8c7cc3cdc75f728571f..c2fa6b56613c23ba111c94310df522454e2ea732 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <asm/cputype.h>
 #include <asm/fixmap.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
@@ -80,19 +81,55 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
        do {
                /*
                 * Need to have the least restrictive permissions available
-                * permissions will be fixed up later
+                * permissions will be fixed up later. Default the new page
+                * range as contiguous ptes.
                 */
-               set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+               set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT));
                pfn++;
        } while (pte++, i++, i < PTRS_PER_PTE);
 }
 
+/*
+ * Given a PTE with the CONT bit set, determine where the CONT range
+ * starts, and clear the entire range of PTE CONT bits.
+ */
+static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
+{
+       int i;
+
+       pte -= CONT_RANGE_OFFSET(addr);
+       for (i = 0; i < CONT_PTES; i++) {
+               set_pte(pte, pte_mknoncont(*pte));
+               pte++;
+       }
+       flush_tlb_all();
+}
+
+/*
+ * Given a range of PTEs set the pfn and provided page protection flags
+ */
+static void __populate_init_pte(pte_t *pte, unsigned long addr,
+                               unsigned long end, phys_addr_t phys,
+                               pgprot_t prot)
+{
+       unsigned long pfn = __phys_to_pfn(phys);
+
+       do {
+               /* clear all the bits except the pfn, then apply the prot */
+               set_pte(pte, pfn_pte(pfn, prot));
+               pte++;
+               pfn++;
+               addr += PAGE_SIZE;
+       } while (addr != end);
+}
+
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
-                                 unsigned long end, unsigned long pfn,
+                                 unsigned long end, phys_addr_t phys,
                                  pgprot_t prot,
                                  void *(*alloc)(unsigned long size))
 {
        pte_t *pte;
+       unsigned long next;
 
        if (pmd_none(*pmd) || pmd_sect(*pmd)) {
                pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -105,9 +142,27 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               set_pte(pte, pfn_pte(pfn, prot));
-               pfn++;
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+               next = min(end, (addr + CONT_SIZE) & CONT_MASK);
+               if (((addr | next | phys) & ~CONT_MASK) == 0) {
+                       /* a block of CONT_PTES  */
+                       __populate_init_pte(pte, addr, next, phys,
+                                           prot | __pgprot(PTE_CONT));
+               } else {
+                       /*
+                        * If the range being split is already inside of a
+                        * contiguous range but this PTE isn't going to be
+                        * contiguous, then we want to unmark the adjacent
+                        * ranges, then update the portion of the range we
+                        * are interrested in.
+                        */
+                        clear_cont_pte_range(pte, addr);
+                        __populate_init_pte(pte, addr, next, phys, prot);
+               }
+
+               pte += (next - addr) >> PAGE_SHIFT;
+               phys += next - addr;
+               addr = next;
+       } while (addr != end);
 }
 
 void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -168,8 +223,7 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                                }
                        }
                } else {
-                       alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
-                                      prot, alloc);
+                       alloc_init_pte(pmd, addr, next, phys, prot, alloc);
                }
                phys += next - addr;
        } while (pmd++, addr = next, addr != end);
@@ -353,14 +407,11 @@ static void __init map_mem(void)
         * memory addressable from the initial direct kernel mapping.
         *
         * The initial direct kernel mapping, located at swapper_pg_dir, gives
-        * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
-        * PHYS_OFFSET (which must be aligned to 2MB as per
-        * Documentation/arm64/booting.txt).
+        * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
+        * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
+        * per Documentation/arm64/booting.txt).
         */
-       if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
-               limit = PHYS_OFFSET + PMD_SIZE;
-       else
-               limit = PHYS_OFFSET + PUD_SIZE;
+       limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
        memblock_set_current_limit(limit);
 
        /* map all the memory banks */
@@ -371,21 +422,24 @@ static void __init map_mem(void)
                if (start >= end)
                        break;
 
-#ifndef CONFIG_ARM64_64K_PAGES
-               /*
-                * For the first memory bank align the start address and
-                * current memblock limit to prevent create_mapping() from
-                * allocating pte page tables from unmapped memory.
-                * When 64K pages are enabled, the pte page table for the
-                * first PGDIR_SIZE is already present in swapper_pg_dir.
-                */
-               if (start < limit)
-                       start = ALIGN(start, PMD_SIZE);
-               if (end < limit) {
-                       limit = end & PMD_MASK;
-                       memblock_set_current_limit(limit);
+               if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+                       /*
+                        * For the first memory bank align the start address and
+                        * current memblock limit to prevent create_mapping() from
+                        * allocating pte page tables from unmapped memory. With
+                        * the section maps, if the first block doesn't end on section
+                        * size boundary, create_mapping() will try to allocate a pte
+                        * page, which may be returned from an unmapped area.
+                        * When section maps are not used, the pte page table for the
+                        * current limit is already present in swapper_pg_dir.
+                        */
+                       if (start < limit)
+                               start = ALIGN(start, SECTION_SIZE);
+                       if (end < limit) {
+                               limit = end & SECTION_MASK;
+                               memblock_set_current_limit(limit);
+                       }
                }
-#endif
                __map_memblock(start, end);
        }
 
@@ -456,7 +510,7 @@ void __init paging_init(void)
         * point to zero page to avoid speculatively fetching new entries.
         */
        cpu_set_reserved_ttbr0();
-       flush_tlb_all();
+       local_flush_tlb_all();
        cpu_set_default_tcr_t0sz();
 }
 
@@ -498,12 +552,12 @@ int kern_addr_valid(unsigned long addr)
        return pfn_valid(pte_pfn(*pte));
 }
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-#ifdef CONFIG_ARM64_64K_PAGES
+#if !ARM64_SWAPPER_USES_SECTION_MAPS
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 {
        return vmemmap_populate_basepages(start, end, node);
 }
-#else  /* !CONFIG_ARM64_64K_PAGES */
+#else  /* !ARM64_SWAPPER_USES_SECTION_MAPS */
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
 {
        unsigned long addr = start;
@@ -638,7 +692,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 {
        const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
        pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
-       int granularity, size, offset;
+       int size, offset;
        void *dt_virt;
 
        /*
@@ -664,24 +718,15 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
         */
        BUILD_BUG_ON(dt_virt_base % SZ_2M);
 
-       if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
-               BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
-                            __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
-
-               granularity = PAGE_SIZE;
-       } else {
-               BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
-                            __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
-
-               granularity = PMD_SIZE;
-       }
+       BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
+                    __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
 
-       offset = dt_phys % granularity;
+       offset = dt_phys % SWAPPER_BLOCK_SIZE;
        dt_virt = (void *)dt_virt_base + offset;
 
        /* map the first chunk so we can read the size from the header */
-       create_mapping(round_down(dt_phys, granularity), dt_virt_base,
-                      granularity, prot);
+       create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+                      SWAPPER_BLOCK_SIZE, prot);
 
        if (fdt_check_header(dt_virt) != 0)
                return NULL;
@@ -690,9 +735,9 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
        if (size > MAX_FDT_SIZE)
                return NULL;
 
-       if (offset + size > granularity)
-               create_mapping(round_down(dt_phys, granularity), dt_virt_base,
-                              round_up(offset + size, granularity), prot);
+       if (offset + size > SWAPPER_BLOCK_SIZE)
+               create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+                              round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
 
        memblock_reserve(dt_phys, size);
 
index 71ca104f97bde3a7b9f11e2aa4e443357e2e29b9..cb3ba1b812e74dcd1acbc167756d60da331d105f 100644 (file)
@@ -28,8 +28,6 @@
 
 #include "mm.h"
 
-#define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
-
 static struct kmem_cache *pgd_cache;
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
index e4ee7bd8830aed60b9b0b493c4c4dc93863d419c..3a4b8b19978bb304c9913e51da9db9f072ff49ff 100644 (file)
@@ -30,7 +30,9 @@
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define TCR_TG_FLAGS   TCR_TG0_64K | TCR_TG1_64K
-#else
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define TCR_TG_FLAGS   TCR_TG0_16K | TCR_TG1_16K
+#else /* CONFIG_ARM64_4K_PAGES */
 #define TCR_TG_FLAGS   TCR_TG0_4K | TCR_TG1_4K
 #endif
 
@@ -130,7 +132,7 @@ ENDPROC(cpu_do_resume)
  *     - pgd_phys - physical address of new TTB
  */
 ENTRY(cpu_do_switch_mm)
-       mmid    w1, x1                          // get mm->context.id
+       mmid    x1, x1                          // get mm->context.id
        bfi     x0, x1, #48, #16                // set the ASID
        msr     ttbr0_el1, x0                   // set TTBR0
        isb
@@ -146,8 +148,8 @@ ENDPROC(cpu_do_switch_mm)
  *     value of the SCTLR_EL1 register.
  */
 ENTRY(__cpu_setup)
-       tlbi    vmalle1is                       // invalidate I + D TLBs
-       dsb     ish
+       tlbi    vmalle1                         // Invalidate local TLB
+       dsb     nsh
 
        mov     x0, #3 << 20
        msr     cpacr_el1, x0                   // Enable FP/ASIMD
index f61f2dd67464746c728474a7b5503dd7dfbdcb67..241b9b9729d821510fb2addde4b276e73e6185a7 100644 (file)
@@ -20,4 +20,5 @@ generic-y += sections.h
 generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 61cd1e786a142c440caa231a665349ed3d8f8e01..91d49c0a31185041055e627689ec09b2ab708bd1 100644 (file)
@@ -46,4 +46,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index f17c4dc6050c7d23ade635f749aa75eed8f0ac1d..945544ec603ee12408928c0abd4db19cfbc784d9 100644 (file)
@@ -59,4 +59,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index b7f68192d15b52cb4e6c34c78eac88a02fa971d5..1778805f63809d378a5cafb6c920b517ea753c7b 100644 (file)
@@ -43,4 +43,5 @@ generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += types.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 8e47b832cc7684af7a10d2a0680651cf5bf8cc0d..1fa084cf1a4398934889658b8b21f66154bf0ab6 100644 (file)
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index daee37bd09991a3edf732451587e59a8db001d54..db8ddabc6bd2819ba579c4c407a569e2232daa51 100644 (file)
@@ -58,4 +58,5 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 9de3ba12f6b97c0f8722e2f929c70fbae429fd34..502a91d8dbbd80df039d9a111de04c1caadf0e3c 100644 (file)
@@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
 generic-y += vtime.h
+generic-y += word-at-a-time.h
index e0eb704ca1fa93755d678e82c2336c8e188a34f0..fd104bd221ced1dff2c9485bdcb1be520171df7f 100644 (file)
@@ -9,3 +9,4 @@ generic-y += module.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 0b6b40d37b95b5acd2f0561a82f12b3859b35880..5b4ec541ba7c99936d8f5072dc5716a778751235 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index eeb3a8991fc411e952cd6a20b3e44602b80f38e0..6e5198e2c124f89b86d6b5267280f5596448cc0a 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -344,6 +349,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 3a7006654ce97c9df5bd521d66af5c29c61171fb..f75600b0ca23f78ec028948babb81b5b2c02b3ed 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -355,6 +360,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 0586b323a673791fff0661eb5804347bc7ad16fb..a42d91c389a6fbba1c2c8f27069acb53332dd56f 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index ad1dbce07aa4b532fdb49fe515db2f2a19c37aa7..77f4a11083e9964050022f4ff22625b69b8672dd 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -345,6 +350,7 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index b44acacaecf41f43bae326e63318c3c8c7e7d0f8..5a329f77329b155ed5a4dc0f06ca6973c82aae94 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -364,6 +369,7 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 8afca3753db1f8f52245f2b4265c35f74268ae5e..83c80d2030ec96aa4740615101422e1fca1884e5 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index ef00875994d9ac34191f2bd6a572d9c44118ef4d..6cb42c3bf5a280d1a537d515396a463049d4a250 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 387c2bd90ff1490a8a1c1fcb08a11753c8b2b6b1..c7508c30330c43ee32c0f75d7c7c8112f16cdc35 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 35355c1bc714000d1e3d44b5a429b97fabeac84a..64b71664a3036aa2827984f34005fc2cd044de44 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -354,6 +359,7 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PLIP=m
@@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 8442d267b877202e5293c0c5178eb340113e69f8..9a4cab78a2ea82ce3043be9c8a5582328774f21b 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
index 0e1b542e155582a3685340bf0ce8651fcc5d8948..1a2eaac13dbdd540f223aea5c67c660723e3cf54 100644 (file)
@@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
@@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m
 # CONFIG_INET_LRO is not set
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
+CONFIG_IPV6=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_ILA=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_GRE=m
 CONFIG_NETFILTER=y
@@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_DUP_IPV4=m
 CONFIG_NF_TABLES_ARP=m
 CONFIG_NF_LOG_ARP=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
@@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_DUP_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
 CONFIG_NFT_REDIR_IPV6=m
@@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=m
 CONFIG_MPLS_ROUTING=m
+CONFIG_MPLS_IPTUNNEL=m
 # CONFIG_WIRELESS is not set
 # CONFIG_UEVENT_HELPER is not set
 CONFIG_DEVTMPFS=y
@@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
@@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_CRYPTO_RSA=m
index 5a822bb790f72135862dcc0b7a97296954d73bd9..066e74f666ae95e4c9f58c42010bc857ca6454ca 100644 (file)
@@ -4,4 +4,34 @@
 #define __ALIGN .align 4
 #define __ALIGN_STR ".align 4"
 
+/*
+ * Make sure the compiler doesn't do anything stupid with the
+ * arguments on the stack - they are owned by the *caller*, not
+ * the callee. This just fools gcc into not spilling into them,
+ * and keeps it from doing tailcall recursion and/or using the
+ * stack slots for temporaries, since they are live and "used"
+ * all the way to the end of the function.
+ */
+#define asmlinkage_protect(n, ret, args...) \
+       __asmlinkage_protect##n(ret, ##args)
+#define __asmlinkage_protect_n(ret, args...) \
+       __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
+#define __asmlinkage_protect0(ret) \
+       __asmlinkage_protect_n(ret)
+#define __asmlinkage_protect1(ret, arg1) \
+       __asmlinkage_protect_n(ret, "m" (arg1))
+#define __asmlinkage_protect2(ret, arg1, arg2) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
+#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
+#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4))
+#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5))
+#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
+       __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
+                             "m" (arg4), "m" (arg5), "m" (arg6))
+
 #endif
index 244e0dbe45dbeda359e233cde23b4652f0ce13dc..0793a7f174176e6d590ca4d9567a9e3523c42c50 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            356
+#define NR_syscalls            375
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 61fb6cb9d2ae3c66a1c0c6dec1ac95adb83dd810..5e6fae6c275f9b110464cb80bbb47187c2251dcd 100644 (file)
 #define __NR_memfd_create      353
 #define __NR_bpf               354
 #define __NR_execveat          355
+#define __NR_socket            356
+#define __NR_socketpair                357
+#define __NR_bind              358
+#define __NR_connect           359
+#define __NR_listen            360
+#define __NR_accept4           361
+#define __NR_getsockopt                362
+#define __NR_setsockopt                363
+#define __NR_getsockname       364
+#define __NR_getpeername       365
+#define __NR_sendto            366
+#define __NR_sendmsg           367
+#define __NR_recvfrom          368
+#define __NR_recvmsg           369
+#define __NR_shutdown          370
+#define __NR_recvmmsg          371
+#define __NR_sendmmsg          372
+#define __NR_userfaultfd       373
+#define __NR_membarrier                374
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index a0ec4303f2c8e57a04fb353178d43b0be6a461fe..5dd0e80042f51107e63e0fcd832f4c46c85b826c 100644 (file)
@@ -376,4 +376,22 @@ ENTRY(sys_call_table)
        .long sys_memfd_create
        .long sys_bpf
        .long sys_execveat              /* 355 */
-
+       .long sys_socket
+       .long sys_socketpair
+       .long sys_bind
+       .long sys_connect
+       .long sys_listen                /* 360 */
+       .long sys_accept4
+       .long sys_getsockopt
+       .long sys_setsockopt
+       .long sys_getsockname
+       .long sys_getpeername           /* 365 */
+       .long sys_sendto
+       .long sys_sendmsg
+       .long sys_recvfrom
+       .long sys_recvmsg
+       .long sys_shutdown              /* 370 */
+       .long sys_recvmmsg
+       .long sys_sendmmsg
+       .long sys_userfaultfd
+       .long sys_membarrier
index df31353fd2001dc0e357feafcf975aa6e5537622..29acb89daaaa55f2b1640c3d751233ed034b43b7 100644 (file)
@@ -54,4 +54,5 @@ generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 2f222f355c4bbc69842ccd62f3419b0cbd1732a4..b0ae88c9fed922a4ba95be0498f421d3927eb40e 100644 (file)
@@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += syscalls.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 15ecb4831e125838477a4805810381e1f1bb6725..eeb3953ed8ac8d8051ccbe3df9af8270915329b3 100644 (file)
@@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init(
 
        return 0;
 }
-IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc",
-               ath79_misc_intc_of_init);
+
+static int __init ar7100_misc_intc_of_init(
+       struct device_node *node, struct device_node *parent)
+{
+       ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
+       return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc",
+               ar7100_misc_intc_of_init);
+
+static int __init ar7240_misc_intc_of_init(
+       struct device_node *node, struct device_node *parent)
+{
+       ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
+       return ath79_misc_intc_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc",
+               ar7240_misc_intc_of_init);
 
 static int __init ar79_cpu_intc_of_init(
        struct device_node *node, struct device_node *parent)
index 89a628455bc253b01c2cc7b9f3cc2a6d9a426493..bd634259eab9d6b0f7d1b8e746d0b11c01f0950d 100644 (file)
@@ -933,7 +933,7 @@ void __init plat_mem_setup(void)
        while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
                && (total < MAX_MEMORY)) {
                memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
-                                               __pa_symbol(&__init_end), -1,
+                                               __pa_symbol(&_end), -1,
                                                0x100000,
                                                CVMX_BOOTMEM_FLAG_NO_LOCKING);
                if (memory >= 0) {
index 40ec4ca3f946a9238afa6e0edd25c0440f86a36b..c7fe4d01e79c61bbaee58aa25b9b50bf5b9a380e 100644 (file)
@@ -17,4 +17,5 @@ generic-y += segment.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += user.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 9801ac9826554ca0d8ab2e27e21ad6305f18da67..fe67f12ac2393b23705b4a094bbf8c3b7cc77166 100644 (file)
@@ -20,6 +20,9 @@
 #ifndef cpu_has_tlb
 #define cpu_has_tlb            (cpu_data[0].options & MIPS_CPU_TLB)
 #endif
+#ifndef cpu_has_ftlb
+#define cpu_has_ftlb           (cpu_data[0].options & MIPS_CPU_FTLB)
+#endif
 #ifndef cpu_has_tlbinv
 #define cpu_has_tlbinv         (cpu_data[0].options & MIPS_CPU_TLBINV)
 #endif
index cd89e9855775276ea7c3a185d9e61b3702f06b6f..82ad15f11049284c2f347fc66b68ea5091fb810d 100644 (file)
@@ -385,6 +385,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_CDMM          0x4000000000ull /* CPU has Common Device Memory Map */
 #define MIPS_CPU_BP_GHIST      0x8000000000ull /* R12K+ Branch Prediction Global History */
 #define MIPS_CPU_SP            0x10000000000ull /* Small (1KB) page support */
+#define MIPS_CPU_FTLB          0x20000000000ull /* CPU has Fixed-page-size TLB */
 
 /*
  * CPU ASE encodings
index b02891f9caaf1d8ffecf14b0e0fa7be5b7fb33c8..21d9607c80d7deaa1ef2174c0dde62bda9985bde 100644 (file)
@@ -65,6 +65,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
        back_to_back_c0_hazard();
 }
 
+/**
+ * maar_init() - initialise MAARs
+ *
+ * Performs initialisation of MAARs for the current CPU, making use of the
+ * platforms implementation of platform_maar_init where necessary and
+ * duplicating the setup it provides on secondary CPUs.
+ */
+extern void maar_init(void);
+
 /**
  * struct maar_config - MAAR configuration data
  * @lower:     The lowest address that the MAAR pair will affect. Must be
index d75b75e78ebb4749355f95a1709202b9f2d069af..1f1927ab42690b284257faa8991f4cff31a3264d 100644 (file)
@@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask,              MIPS_CM_GCB_OFS + 0xc8)
 BUILD_CM_R_(gic_status,                MIPS_CM_GCB_OFS + 0xd0)
 BUILD_CM_R_(cpc_status,                MIPS_CM_GCB_OFS + 0xf0)
 BUILD_CM_RW(l2_config,         MIPS_CM_GCB_OFS + 0x130)
+BUILD_CM_RW(sys_config2,       MIPS_CM_GCB_OFS + 0x150)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,  0x00)
@@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority,    0x80)
 #define CM_GCR_L2_CONFIG_ASSOC_SHF             0
 #define CM_GCR_L2_CONFIG_ASSOC_MSK             (_ULCAST_(0xff) << 0)
 
+/* GCR_SYS_CONFIG2 register fields */
+#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF          0
+#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK          (_ULCAST_(0xf) << 0)
+
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF    0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK    (_ULCAST_(0xff) << 0)
@@ -405,4 +410,38 @@ static inline int mips_cm_revision(void)
        return read_gcr_rev();
 }
 
+/**
+ * mips_cm_max_vp_width() - return the width in bits of VP indices
+ *
+ * Return: the width, in bits, of VP indices in fields that combine core & VP
+ * indices.
+ */
+static inline unsigned int mips_cm_max_vp_width(void)
+{
+       extern int smp_num_siblings;
+
+       if (mips_cm_revision() >= CM_REV_CM3)
+               return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
+
+       return smp_num_siblings;
+}
+
+/**
+ * mips_cm_vp_id() - calculate the hardware VP ID for a CPU
+ * @cpu: the CPU whose VP ID to calculate
+ *
+ * Hardware such as the GIC uses identifiers for VPs which may not match the
+ * CPU numbers used by Linux. This function calculates the hardware VP
+ * identifier corresponding to a given CPU.
+ *
+ * Return: the VP ID for the CPU.
+ */
+static inline unsigned int mips_cm_vp_id(unsigned int cpu)
+{
+       unsigned int core = cpu_data[cpu].core;
+       unsigned int vp = cpu_vpe_id(&cpu_data[cpu]);
+
+       return (core * mips_cm_max_vp_width()) + vp;
+}
+
 #endif /* __MIPS_ASM_MIPS_CM_H__ */
index d3cd8eac81e3a76baf455dd95e881cfd9c72eed9..c64781cf649f86b4ca4eec0fee38ba4c4da523e7 100644 (file)
 
 /* Bits specific to the MIPS32/64 PRA. */
 #define MIPS_CONF_MT           (_ULCAST_(7) <<  7)
+#define MIPS_CONF_MT_TLB       (_ULCAST_(1) <<  7)
+#define MIPS_CONF_MT_FTLB      (_ULCAST_(4) <<  7)
 #define MIPS_CONF_AR           (_ULCAST_(7) << 10)
 #define MIPS_CONF_AT           (_ULCAST_(3) << 13)
 #define MIPS_CONF_M            (_ULCAST_(1) << 31)
index c03088f9f514e7c21f7ae0e185f8be0456af372b..cfabadb135d9fe94912080ea41ca761e6f1eca08 100644 (file)
 #define __NR_memfd_create              (__NR_Linux + 354)
 #define __NR_bpf                       (__NR_Linux + 355)
 #define __NR_execveat                  (__NR_Linux + 356)
+#define __NR_userfaultfd               (__NR_Linux + 357)
+#define __NR_membarrier                        (__NR_Linux + 358)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            356
+#define __NR_Linux_syscalls            358
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                356
+#define __NR_O32_Linux_syscalls                358
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_memfd_create              (__NR_Linux + 314)
 #define __NR_bpf                       (__NR_Linux + 315)
 #define __NR_execveat                  (__NR_Linux + 316)
+#define __NR_userfaultfd               (__NR_Linux + 317)
+#define __NR_membarrier                        (__NR_Linux + 318)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            316
+#define __NR_Linux_syscalls            318
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         316
+#define __NR_64_Linux_syscalls         318
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_memfd_create              (__NR_Linux + 318)
 #define __NR_bpf                       (__NR_Linux + 319)
 #define __NR_execveat                  (__NR_Linux + 320)
+#define __NR_userfaultfd               (__NR_Linux + 321)
+#define __NR_membarrier                        (__NR_Linux + 322)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            320
+#define __NR_Linux_syscalls            322
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                320
+#define __NR_N32_Linux_syscalls                322
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 4e62bf85d0b0f910cc56c726167cb933b35045d0..459cb017306c21eb63259b8cb0a406c65080cc8b 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/power/jz4740-battery.h>
 #include <linux/power/gpio-charger.h>
 
+#include <asm/mach-jz4740/gpio.h>
 #include <asm/mach-jz4740/jz4740_fb.h>
 #include <asm/mach-jz4740/jz4740_mmc.h>
 #include <asm/mach-jz4740/jz4740_nand.h>
index a74e181058b0fc8ce001fffc68bce8f95bc7f02d..8c6d76c9b2d69bf6603e32598e3b8d74cba15bf6 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/seq_file.h>
 
 #include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/gpio.h>
 
 #define JZ4740_GPIO_BASE_A (32*0)
 #define JZ4740_GPIO_BASE_B (32*1)
index 9f71c06aebf6313c4a0b2bac9b1bbdc6228cee31..209ded16806bf5a295ff202258f5b7ff42070940 100644 (file)
@@ -39,6 +39,7 @@
         mfc0   \dest, CP0_CONFIG, 3
        andi    \dest, \dest, MIPS_CONF3_MT
        beqz    \dest, \nomt
+        nop
        .endm
 
 .section .text.cps-vec
@@ -223,10 +224,9 @@ LEAF(excep_ejtag)
        END(excep_ejtag)
 
 LEAF(mips_cps_core_init)
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
        /* Check that the core implements the MT ASE */
        has_mt  t0, 3f
-        nop
 
        .set    push
        .set    mips64r2
@@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
        PTR_ADDU t0, t0, t1
 
        /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+       li      t9, 0
+#ifdef CONFIG_MIPS_MT_SMP
        has_mt  ta2, 1f
-        li     t9, 0
 
        /* Find the number of VPEs present in the core */
        mfc0    t1, CP0_MVPCONF0
@@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
        /* Retrieve the VPE ID from EBase.CPUNum */
        mfc0    t9, $15, 1
        and     t9, t9, t1
+#endif
 
 1:     /* Calculate a pointer to this VPEs struct vpe_boot_config */
        li      t1, VPEBOOTCFG_SIZE
@@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
        PTR_L   ta3, COREBOOTCFG_VPECONFIG(t0)
        PTR_ADDU v0, v0, ta3
 
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_MIPS_MT_SMP
 
        /* If the core doesn't support MT then return */
        bnez    ta2, 1f
@@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
 
 2:     .set    pop
 
-#endif /* CONFIG_MIPS_MT */
+#endif /* CONFIG_MIPS_MT_SMP */
 
        /* Return */
        jr      ra
index 571a8e6ea5bd0048a840bc539135f27c32fec621..09a51d091941be3aa75ae37b53b386714a6d3db5 100644 (file)
@@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
 {
        unsigned int config0;
-       int isa;
+       int isa, mt;
 
        config0 = read_c0_config();
 
        /*
         * Look for Standard TLB or Dual VTLB and FTLB
         */
-       if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
-           (((config0 & MIPS_CONF_MT) >> 7) == 4))
+       mt = config0 & MIPS_CONF_MT;
+       if (mt == MIPS_CONF_MT_TLB)
                c->options |= MIPS_CPU_TLB;
+       else if (mt == MIPS_CONF_MT_FTLB)
+               c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
 
        isa = (config0 & MIPS_CONF_AT) >> 13;
        switch (isa) {
@@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
        if (cpu_has_tlb) {
                if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
                        c->options |= MIPS_CPU_TLBINV;
+
                /*
-                * This is a bit ugly. R6 has dropped that field from
-                * config4 and the only valid configuration is VTLB+FTLB so
-                * set a good value for mmuextdef for that case.
+                * R6 has dropped the MMUExtDef field from config4.
+                * On R6 the fields always describe the FTLB, and only if it is
+                * present according to Config.MT.
                 */
-               if (cpu_has_mips_r6)
+               if (!cpu_has_mips_r6)
+                       mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+               else if (cpu_has_ftlb)
                        mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
                else
-                       mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+                       mmuextdef = 0;
 
                switch (mmuextdef) {
                case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
index 423ae83af1fb7043a1daff5d06a079658446de5a..3375745b91980013d76ad2e7d9cd632d31b26af9 100644 (file)
@@ -18,7 +18,7 @@
        .set pop
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                    struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti)
  */
        .align  7
        LEAF(resume)
        cpu_save_nonscratch a0
        LONG_S  ra, THREAD_REG31(a0)
 
-       /*
-        * check if we need to save FPU registers
-        */
-       .set push
-       .set noreorder
-       beqz    a3, 1f
-        PTR_L  t3, TASK_THREAD_INFO(a0)
-       .set pop
-
-       /*
-        * clear saved user stack CU1 bit
-        */
-       LONG_L  t0, ST_OFF(t3)
-       li      t1, ~ST0_CU1
-       and     t0, t0, t1
-       LONG_S  t0, ST_OFF(t3)
-
-       .set push
-       .set arch=mips64r2
-       fpu_save_double a0 t0 t1                # c0_status passed in t0
-                                               # clobbers t1
-       .set pop
-1:
-
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
        /* Check if we need to store CVMSEG state */
        dmfc0   t0, $11,7       /* CvmMemCtl */
index 5087a4b72e6b9a4ae12fc8acdb6f7f8f318784c5..ac27ef7d4d0ebd8be86798d8191e32c720263625 100644 (file)
  */
 #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
 
-/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by TIF_USEDFPU.  In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *                    struct thread_info *next_ti, int usedfpu)
+ *                    struct thread_info *next_ti)
  */
 LEAF(resume)
        mfc0    t1, CP0_STATUS
@@ -50,22 +40,6 @@ LEAF(resume)
        cpu_save_nonscratch a0
        sw      ra, THREAD_REG31(a0)
 
-       beqz    a3, 1f
-
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-
-       /*
-        * clear saved user stack CU1 bit
-        */
-       lw      t0, ST_OFF(t3)
-       li      t1, ~ST0_CU1
-       and     t0, t0, t1
-       sw      t0, ST_OFF(t3)
-
-       fpu_save_single a0, t0                  # clobbers t0
-
-1:
-
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
        PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
index 4cc13508d967c4076e0b720b120487281003ed72..65a74e4f0f456d1de9d29ed83a62e1bcc6e644fc 100644 (file)
@@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp)
        lw      t1, PT_EPC(sp)          # skip syscall on return
 
        subu    v0, v0, __NR_O32_Linux  # check syscall number
-       sltiu   t0, v0, __NR_O32_Linux_syscalls + 1
        addiu   t1, 4                   # skip to next instruction
        sw      t1, PT_EPC(sp)
-       beqz    t0, illegal_syscall
-
-       sll     t0, v0, 2
-       la      t1, sys_call_table
-       addu    t1, t0
-       lw      t2, (t1)                # syscall routine
-       beqz    t2, illegal_syscall
 
        sw      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -96,6 +88,16 @@ loads_done:
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        and     t0, t1
        bnez    t0, syscall_trace_entry # -> yes
+syscall_common:
+       sltiu   t0, v0, __NR_O32_Linux_syscalls + 1
+       beqz    t0, illegal_syscall
+
+       sll     t0, v0, 2
+       la      t1, sys_call_table
+       addu    t1, t0
+       lw      t2, (t1)                # syscall routine
+
+       beqz    t2, illegal_syscall
 
        jalr    t2                      # Do The Real Thing (TM)
 
@@ -116,7 +118,7 @@ o32_syscall_exit:
 
 syscall_trace_entry:
        SAVE_STATIC
-       move    s0, t2
+       move    s0, v0
        move    a0, sp
 
        /*
@@ -129,27 +131,18 @@ syscall_trace_entry:
 
 1:     jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
+
+       move    v0, s0                  # restore syscall
 
-       move    t0, s0
        RESTORE_STATIC
        lw      a0, PT_R4(sp)           # Restore argument registers
        lw      a1, PT_R5(sp)
        lw      a2, PT_R6(sp)
        lw      a3, PT_R7(sp)
-       jalr    t0
-
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sw      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       lw      t1, PT_R2(sp)           # syscall number
-       negu    v0                      # error
-       sw      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sw      v0, PT_R2(sp)           # result
+       j       syscall_common
 
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -599,3 +592,5 @@ EXPORT(sys_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
        PTR     sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
index a6f6b762c47a4c5a2d395e13a1d564964595abe1..e732981cf99fde26181f1db3bcb65ebd86ea4a0d 100644 (file)
@@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp)
        .set    at
 #endif
 
-       dsubu   t0, v0, __NR_64_Linux   # check syscall number
-       sltiu   t0, t0, __NR_64_Linux_syscalls + 1
 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
        ld      t1, PT_EPC(sp)          # skip syscall on return
        daddiu  t1, 4                   # skip to next instruction
        sd      t1, PT_EPC(sp)
 #endif
-       beqz    t0, illegal_syscall
-
-       dsll    t0, v0, 3               # offset into table
-       ld      t2, (sys_call_table - (__NR_64_Linux * 8))(t0)
-                                       # syscall routine
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp)
        and     t0, t1, t0
        bnez    t0, syscall_trace_entry
 
+syscall_common:
+       dsubu   t2, v0, __NR_64_Linux
+       sltiu   t0, t2, __NR_64_Linux_syscalls + 1
+       beqz    t0, illegal_syscall
+
+       dsll    t0, t2, 3               # offset into table
+       dla     t2, sys_call_table
+       daddu   t0, t2, t0
+       ld      t2, (t0)                # syscall routine
+       beqz    t2, illegal_syscall
+
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -78,14 +82,14 @@ n64_syscall_exit:
 
 syscall_trace_entry:
        SAVE_STATIC
-       move    s0, t2
+       move    s0, v0
        move    a0, sp
        move    a1, v0
        jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    v0, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -93,19 +97,9 @@ syscall_trace_entry:
        ld      a3, PT_R7(sp)
        ld      a4, PT_R8(sp)
        ld      a5, PT_R9(sp)
-       jalr    t0
-
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
+       j       syscall_common
 
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 illegal_syscall:
        /* This also isn't a 64-bit syscall, throw an error.  */
@@ -436,4 +430,6 @@ EXPORT(sys_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 5315 */
        PTR     sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sys_call_table,.-sys_call_table
index 4b2010654c463158b7dee80194de736195c04595..c794843975845df2e0a9d9c0c0fd14dad1ed17ec 100644 (file)
@@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
        and     t0, t1, t0
        bnez    t0, n32_syscall_trace_entry
 
+syscall_common:
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -75,9 +76,9 @@ n32_syscall_trace_entry:
        move    a1, v0
        jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    t2, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -85,19 +86,9 @@ n32_syscall_trace_entry:
        ld      a3, PT_R7(sp)
        ld      a4, PT_R8(sp)
        ld      a5, PT_R9(sp)
-       jalr    t0
+       j       syscall_common
 
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
-
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 not_n32_scall:
        /* This is not an n32 compatibility syscall, pass it on to
@@ -429,4 +420,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf
        PTR     compat_sys_execveat             /* 6320 */
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sysn32_call_table,.-sysn32_call_table
index f543ff4feef99f8c4ce02554f4dd54da1752b6de..6369cfd390c6330269b05eb095020dafa6cdc048 100644 (file)
@@ -87,6 +87,7 @@ loads_done:
        and     t0, t1, t0
        bnez    t0, trace_a_syscall
 
+syscall_common:
        jalr    t2                      # Do The Real Thing (TM)
 
        li      t0, -EMAXERRNO - 1      # error?
@@ -130,9 +131,9 @@ trace_a_syscall:
 
 1:     jal     syscall_trace_enter
 
-       bltz    v0, 2f                  # seccomp failed? Skip syscall
+       bltz    v0, 1f                  # seccomp failed? Skip syscall
 
-       move    t0, s0
+       move    t2, s0
        RESTORE_STATIC
        ld      a0, PT_R4(sp)           # Restore argument registers
        ld      a1, PT_R5(sp)
@@ -142,19 +143,9 @@ trace_a_syscall:
        ld      a5, PT_R9(sp)
        ld      a6, PT_R10(sp)
        ld      a7, PT_R11(sp)          # For indirect syscalls
-       jalr    t0
+       j       syscall_common
 
-       li      t0, -EMAXERRNO - 1      # error?
-       sltu    t0, t0, v0
-       sd      t0, PT_R7(sp)           # set error flag
-       beqz    t0, 1f
-
-       ld      t1, PT_R2(sp)           # syscall number
-       dnegu   v0                      # error
-       sd      t1, PT_R0(sp)           # save it for syscall restarting
-1:     sd      v0, PT_R2(sp)           # result
-
-2:     j       syscall_exit
+1:     j       syscall_exit
 
 /* ------------------------------------------------------------------------ */
 
@@ -584,4 +575,6 @@ EXPORT(sys32_call_table)
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
        PTR     compat_sys_execveat
+       PTR     sys_userfaultfd
+       PTR     sys_membarrier
        .size   sys32_call_table,.-sys32_call_table
index 35b8316002f8420d863696cdf31255d1311ead4b..479515109e5badec96942ee594e3a450ab011f11 100644 (file)
@@ -338,7 +338,7 @@ static void __init bootmem_init(void)
                if (end <= reserved_end)
                        continue;
 #ifdef CONFIG_BLK_DEV_INITRD
-               /* mapstart should be after initrd_end */
+               /* Skip zones before initrd and initrd itself */
                if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
                        continue;
 #endif
@@ -371,6 +371,14 @@ static void __init bootmem_init(void)
                max_low_pfn = PFN_DOWN(HIGHMEM_START);
        }
 
+#ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * mapstart should be after initrd_end
+        */
+       if (initrd_end)
+               mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
        /*
         * Initialize the boot-time allocator with low memory only.
         */
index a31896c33716d424bb30397c17b29af07c6728bb..bd4385a8e6e86f7fbb9ca6d988f5eee155b9a8c7 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/time.h>
 #include <asm/setup.h>
+#include <asm/maar.h>
 
 cpumask_t cpu_callin_map;              /* Bitmask of started secondaries */
 
@@ -157,6 +158,7 @@ asmlinkage void start_secondary(void)
        mips_clockevent_init();
        mp_ops->init_secondary();
        cpu_report();
+       maar_init();
 
        /*
         * XXX parity protection should be folded in here when it's converted
index f6c44dd332e2a388d28e5882a0c47e84ed3ca3f9..d6d07ad56180fa7438595994833aef5e60628350 100644 (file)
@@ -64,6 +64,9 @@ void __init prom_init_env(void)
        }
        if (memsize == 0)
                memsize = 256;
+
+       loongson_sysconf.nr_uarts = 1;
+
        pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
 #else
        struct boot_params *boot_p;
index a914dc1cb6d1bc339cf44cc0c5aeac887a2e5f74..d8117be729a20ee26d2df8bb42a6f58b9670f513 100644 (file)
@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
        else
 #endif
 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
-            if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
                dma_flag = __GFP_DMA;
        else
 #endif
index 66d0f49c5bec4bab02d8e2e194570527d9ccd4e8..8770e619185eb034b317ce3de837c5185ba05511 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/pgalloc.h>
 #include <asm/tlb.h>
 #include <asm/fixmap.h>
+#include <asm/maar.h>
 
 /*
  * We have up to 8 empty zeroed pages so we can map one of the right colour
@@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end,
 #endif
 }
 
+unsigned __weak platform_maar_init(unsigned num_pairs)
+{
+       struct maar_config cfg[BOOT_MEM_MAP_MAX];
+       unsigned i, num_configured, num_cfg = 0;
+       phys_addr_t skip;
+
+       for (i = 0; i < boot_mem_map.nr_map; i++) {
+               switch (boot_mem_map.map[i].type) {
+               case BOOT_MEM_RAM:
+               case BOOT_MEM_INIT_RAM:
+                       break;
+               default:
+                       continue;
+               }
+
+               skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+               cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+               cfg[num_cfg].lower += skip;
+
+               cfg[num_cfg].upper = cfg[num_cfg].lower;
+               cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+               cfg[num_cfg].upper -= skip;
+
+               cfg[num_cfg].attrs = MIPS_MAAR_S;
+               num_cfg++;
+       }
+
+       num_configured = maar_config(cfg, num_cfg, num_pairs);
+       if (num_configured < num_cfg)
+               pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+                       num_pairs, num_cfg);
+
+       return num_configured;
+}
+
+void maar_init(void)
+{
+       unsigned num_maars, used, i;
+       phys_addr_t lower, upper, attr;
+       static struct {
+               struct maar_config cfgs[3];
+               unsigned used;
+       } recorded = { { { 0 } }, 0 };
+
+       if (!cpu_has_maar)
+               return;
+
+       /* Detect the number of MAARs */
+       write_c0_maari(~0);
+       back_to_back_c0_hazard();
+       num_maars = read_c0_maari() + 1;
+
+       /* MAARs should be in pairs */
+       WARN_ON(num_maars % 2);
+
+       /* Set MAARs using values we recorded already */
+       if (recorded.used) {
+               used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
+               BUG_ON(used != recorded.used);
+       } else {
+               /* Configure the required MAARs */
+               used = platform_maar_init(num_maars / 2);
+       }
+
+       /* Disable any further MAARs */
+       for (i = (used * 2); i < num_maars; i++) {
+               write_c0_maari(i);
+               back_to_back_c0_hazard();
+               write_c0_maar(0);
+               back_to_back_c0_hazard();
+       }
+
+       if (recorded.used)
+               return;
+
+       pr_info("MAAR configuration:\n");
+       for (i = 0; i < num_maars; i += 2) {
+               write_c0_maari(i);
+               back_to_back_c0_hazard();
+               upper = read_c0_maar();
+
+               write_c0_maari(i + 1);
+               back_to_back_c0_hazard();
+               lower = read_c0_maar();
+
+               attr = lower & upper;
+               lower = (lower & MIPS_MAAR_ADDR) << 4;
+               upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
+
+               pr_info("  [%d]: ", i / 2);
+               if (!(attr & MIPS_MAAR_V)) {
+                       pr_cont("disabled\n");
+                       continue;
+               }
+
+               pr_cont("%pa-%pa", &lower, &upper);
+
+               if (attr & MIPS_MAAR_S)
+                       pr_cont(" speculate");
+
+               pr_cont("\n");
+
+               /* Record the setup for use on secondary CPUs */
+               if (used <= ARRAY_SIZE(recorded.cfgs)) {
+                       recorded.cfgs[recorded.used].lower = lower;
+                       recorded.cfgs[recorded.used].upper = upper;
+                       recorded.cfgs[recorded.used].attrs = attr;
+                       recorded.used++;
+               }
+       }
+}
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 int page_is_ram(unsigned long pagenr)
 {
@@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void)
 #endif
 }
 
-unsigned __weak platform_maar_init(unsigned num_pairs)
-{
-       struct maar_config cfg[BOOT_MEM_MAP_MAX];
-       unsigned i, num_configured, num_cfg = 0;
-       phys_addr_t skip;
-
-       for (i = 0; i < boot_mem_map.nr_map; i++) {
-               switch (boot_mem_map.map[i].type) {
-               case BOOT_MEM_RAM:
-               case BOOT_MEM_INIT_RAM:
-                       break;
-               default:
-                       continue;
-               }
-
-               skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
-
-               cfg[num_cfg].lower = boot_mem_map.map[i].addr;
-               cfg[num_cfg].lower += skip;
-
-               cfg[num_cfg].upper = cfg[num_cfg].lower;
-               cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
-               cfg[num_cfg].upper -= skip;
-
-               cfg[num_cfg].attrs = MIPS_MAAR_S;
-               num_cfg++;
-       }
-
-       num_configured = maar_config(cfg, num_cfg, num_pairs);
-       if (num_configured < num_cfg)
-               pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
-                       num_pairs, num_cfg);
-
-       return num_configured;
-}
-
-static void maar_init(void)
-{
-       unsigned num_maars, used, i;
-
-       if (!cpu_has_maar)
-               return;
-
-       /* Detect the number of MAARs */
-       write_c0_maari(~0);
-       back_to_back_c0_hazard();
-       num_maars = read_c0_maari() + 1;
-
-       /* MAARs should be in pairs */
-       WARN_ON(num_maars % 2);
-
-       /* Configure the required MAARs */
-       used = platform_maar_init(num_maars / 2);
-
-       /* Disable any further MAARs */
-       for (i = (used * 2); i < num_maars; i++) {
-               write_c0_maari(i);
-               back_to_back_c0_hazard();
-               write_c0_maar(0);
-               back_to_back_c0_hazard();
-       }
-}
-
 void __init mem_init(void)
 {
 #ifdef CONFIG_HIGHMEM
index e92726099be0e40d4a7c1e0903eac239c1ad2497..5d2e0c8d29c0bd0003bae3f7337edbcab5a403c4 100644 (file)
 
 LEAF(sk_load_word)
        is_offset_negative(word)
-       .globl sk_load_word_positive
-sk_load_word_positive:
+FEXPORT(sk_load_word_positive)
        is_offset_in_header(4, word)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
+       .set    reorder
        lw      $r_A, 0(t1)
+       .set    noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_A
        rotr    $r_A, t0, 16
+# else
+       sll     t0, $r_A, 24
+       srl     t1, $r_A, 24
+       srl     t2, $r_A, 8
+       or      t0, t0, t1
+       andi    t2, t2, 0xff00
+       andi    t1, $r_A, 0xff00
+       or      t0, t0, t2
+       sll     t1, t1, 8
+       or      $r_A, t0, t1
+# endif
 #endif
        jr      $r_ra
         move   $r_ret, zero
@@ -73,15 +86,24 @@ sk_load_word_positive:
 
 LEAF(sk_load_half)
        is_offset_negative(half)
-       .globl sk_load_half_positive
-sk_load_half_positive:
+FEXPORT(sk_load_half_positive)
        is_offset_in_header(2, half)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
+       .set    reorder
        lh      $r_A, 0(t1)
+       .set    noreorder
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_A
        seh     $r_A, t0
+# else
+       sll     t0, $r_A, 24
+       andi    t1, $r_A, 0xff00
+       sra     t0, t0, 16
+       srl     t1, t1, 8
+       or      $r_A, t0, t1
+# endif
 #endif
        jr      $r_ra
         move   $r_ret, zero
@@ -89,8 +111,7 @@ sk_load_half_positive:
 
 LEAF(sk_load_byte)
        is_offset_negative(byte)
-       .globl sk_load_byte_positive
-sk_load_byte_positive:
+FEXPORT(sk_load_byte_positive)
        is_offset_in_header(1, byte)
        /* Offset within header boundaries */
        PTR_ADDU t1, $r_skb_data, offset
@@ -148,23 +169,47 @@ sk_load_byte_positive:
 NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
        bpf_slow_path_common(4)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        wsbh    t0, $r_s0
        jr      $r_ra
         rotr   $r_A, t0, 16
-#endif
+# else
+       sll     t0, $r_s0, 24
+       srl     t1, $r_s0, 24
+       srl     t2, $r_s0, 8
+       or      t0, t0, t1
+       andi    t2, t2, 0xff00
+       andi    t1, $r_s0, 0xff00
+       or      t0, t0, t2
+       sll     t1, t1, 8
+       jr      $r_ra
+        or     $r_A, t0, t1
+# endif
+#else
        jr      $r_ra
-       move    $r_A, $r_s0
+        move   $r_A, $r_s0
+#endif
 
        END(bpf_slow_path_word)
 
 NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
        bpf_slow_path_common(2)
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
        jr      $r_ra
         wsbh   $r_A, $r_s0
-#endif
+# else
+       sll     t0, $r_s0, 8
+       andi    t1, $r_s0, 0xff00
+       andi    t0, t0, 0xff00
+       srl     t1, t1, 8
+       jr      $r_ra
+        or     $r_A, t0, t1
+# endif
+#else
        jr      $r_ra
         move   $r_A, $r_s0
+#endif
 
        END(bpf_slow_path_half)
 
index 6edb9ee6128ebc4d45de622afdf118f0bf5d12ee..1c8dd0f5cd5d1567126f42b67112b5b0f91962a8 100644 (file)
@@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 914864eb5a25daf87688f1565b33700b289be0bf..d63330e88379dcc591e29e645b5363f063122f9d 100644 (file)
@@ -61,4 +61,5 @@ generic-y += types.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index ab9f4e0ed4cfcfd48a8d232fe20d0482739a22c5..ac1662956e0c4d4dfe8a0a7fff432e9f2a592f2c 100644 (file)
@@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += rwsem.h
 generic-y += vtime.h
+generic-y += word-at-a-time.h
index 5ad26dd94d77e83fedeba5c7f71c8eba0ff2ab29..9043d2e1e2ae0b3c01a7b6588bed848f44dd92ff 100644 (file)
@@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
index 92ffe397b893c553c8504f10525a5e3d9d1e9e34..a05218ff3fe465b6e4812d7655360dc1b495a519 100644 (file)
@@ -13,3 +13,4 @@ generic-y += sections.h
 generic-y += trace_clock.h
 generic-y += xor.h
 generic-y += serial.h
+generic-y += word-at-a-time.h
index ee186e13dfe6fde92c9127aa07dccc474d1253d2..f102048d9c0e78a31b6773715a491e85e755a89c 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/string.h>
 
 #include <gxio/iorpc_globals.h>
 #include <gxio/iorpc_mpipe.h>
 /* HACK: Avoid pointless "shadow" warnings. */
 #define link link_shadow
 
-/**
- * strscpy - Copy a C-string into a sized buffer, but only if it fits
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Use this routine to avoid copying too-long strings.
- * The routine returns the total number of bytes copied
- * (including the trailing NUL) or zero if the buffer wasn't
- * big enough.  To ensure that programmers pay attention
- * to the return code, the destination has a single NUL
- * written at the front (if size is non-zero) when the
- * buffer is not big enough.
- */
-static size_t strscpy(char *dest, const char *src, size_t size)
-{
-       size_t len = strnlen(src, size) + 1;
-       if (len > size) {
-               if (size)
-                       dest[0] = '\0';
-               return 0;
-       }
-       memcpy(dest, src, len);
-       return len;
-}
-
 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
 {
        char file[32];
@@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name)
        if (!context)
                return GXIO_ERR_NO_DEVICE;
 
-       if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+       if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
                return GXIO_ERR_NO_DEVICE;
 
        return gxio_mpipe_info_instance_aux(context, name);
@@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
 
        rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
        if (rv >= 0) {
-               if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
+               if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
                        return GXIO_ERR_INVAL_MEMORY_SIZE;
                memcpy(link_mac, mac.mac, sizeof(mac.mac));
        }
@@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
        _gxio_mpipe_link_name_t name;
        int rv;
 
-       if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+       if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
                return GXIO_ERR_NO_DEVICE;
 
        rv = gxio_mpipe_link_open_aux(context, name, flags);
index ba35c41c71fff33b2b2fe95f566ad8b3dc192c32..0b6cacaad9333a4165bfd9447a180feadedb4db4 100644 (file)
@@ -40,4 +40,5 @@ generic-y += termbits.h
 generic-y += termios.h
 generic-y += trace_clock.h
 generic-y += types.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index f0da5a237e94077ced050b6f3d746c89d44bd341..9f1e05e12255b84d94d59f67ecbc7cc9330d889a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/platform_device.h>
 #include <linux/usb/tilegx.h>
 #include <linux/init.h>
+#include <linux/module.h>
 #include <linux/types.h>
 
 static u64 ehci_dmamask = DMA_BIT_MASK(32);
index 149ec55f9c46abd97cbb9b69c7a55afa23e23393..904f3ebf4220153f816a1deca118381190f44ec4 100644 (file)
@@ -25,4 +25,5 @@ generic-y += preempt.h
 generic-y += switch_to.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 1fc7a286dc6f342319ec06a81b53a087b9708ef9..256c45b3ae343c983e667b01404d8fb3e3667b4a 100644 (file)
@@ -62,4 +62,5 @@ generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index e6cf2ad350d15a8e6ca207a2618c77d820aacc22..9727b3b48bd174c8ae8297bd94e897484375618a 100644 (file)
 #define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_HWP                ( 7*32+ 10) /* "hwp" Intel HWP */
-#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
+#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
 #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
 #define X86_FEATURE_HWP_EPP    ( 7*32+13) /* Intel HWP_EPP */
 #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
index ab5f1d447ef981088afc70e92c353b0dbe75f27e..ae68be92f75587ce2bfdc2d09f18711ef9cd9c8a 100644 (file)
@@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...);
 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
                                        u32 type, u64 attribute);
 
+#ifdef CONFIG_KASAN
 /*
  * CONFIG_KASAN may redefine memset to __memset.  __memset function is present
  * only in kernel binary.  Since the EFI stub linked into a separate binary it
@@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
 #undef memcpy
 #undef memset
 #undef memmove
+#endif
 
 #endif /* CONFIG_X86_32 */
 
index b98b471a3b7e660c35910ec5bde5d3580af3f498..b8c14bb7fc8f37ee004dc10342a99c8579110e1d 100644 (file)
 #define DEBUGCTLMSR_BTS_OFF_USR                (1UL << 10)
 #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
 
+#define MSR_PEBS_FRONTEND              0x000003f7
+
 #define MSR_IA32_POWER_CTL             0x000001fc
 
 #define MSR_IA32_MC0_CTL               0x00000400
index 655e07a48f6cfa9c09114108f7d8b4cb466fc705..67f08230103aa5ab5f9f74ce90622577b76caef4 100644 (file)
@@ -41,6 +41,7 @@ struct pvclock_wall_clock {
 
 #define PVCLOCK_TSC_STABLE_BIT (1 << 0)
 #define PVCLOCK_GUEST_STOPPED  (1 << 1)
+/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
 #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_PVCLOCK_ABI_H */
index b0ae1c4dc79142d9284d14e76ac181f1c271ad64..217909b4d6f56d84892655f680f974eaf83ec78e 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ASM_X86_BITSPERLONG_H
 #define __ASM_X86_BITSPERLONG_H
 
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(__ILP32__)
 # define __BITS_PER_LONG 64
 #else
 # define __BITS_PER_LONG 32
index 381c8b9b3a33570fcf88b82fa69d1fc43298d89f..20e242ea1bc46b5f5828c7b95071d920853b7609 100644 (file)
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
 
-static void (*hv_kexec_handler)(void);
-static void (*hv_crash_handler)(struct pt_regs *regs);
-
 #if IS_ENABLED(CONFIG_HYPERV)
 static void (*vmbus_handler)(void);
+static void (*hv_kexec_handler)(void);
+static void (*hv_crash_handler)(struct pt_regs *regs);
 
 void hyperv_vector_handler(struct pt_regs *regs)
 {
@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void)
        hv_crash_handler = NULL;
 }
 EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
-#endif
 
+#ifdef CONFIG_KEXEC_CORE
 static void hv_machine_shutdown(void)
 {
        if (kexec_in_progress && hv_kexec_handler)
@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
                hv_crash_handler(regs);
        native_machine_crash_shutdown(regs);
 }
-
+#endif /* CONFIG_KEXEC_CORE */
+#endif /* CONFIG_HYPERV */
 
 static uint32_t  __init ms_hyperv_platform(void)
 {
@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void)
        no_timer_check = 1;
 #endif
 
+#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
        machine_ops.shutdown = hv_machine_shutdown;
        machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+#endif
        mark_tsc_unstable("running on Hyper-V");
 }
 
index 5edf6d868fc16c1e24633d1ea1b727b69fb68584..165be83a7fa48a105fe67c67791e19d0b92c22ad 100644 (file)
@@ -47,6 +47,7 @@ enum extra_reg_type {
        EXTRA_REG_RSP_1 = 1,    /* offcore_response_1 */
        EXTRA_REG_LBR   = 2,    /* lbr_select */
        EXTRA_REG_LDLAT = 3,    /* ld_lat_threshold */
+       EXTRA_REG_FE    = 4,    /* fe_* */
 
        EXTRA_REG_MAX           /* number of entries needed */
 };
index 3fefebfbdf4bb4f68e5a5bf1647550ad0cd71a75..f63360be22387d4fb4cb30728f1834ee4cbd6228 100644 (file)
@@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+       /*
+        * Note the low 8 bits eventsel code is not a continuous field, containing
+        * some #GPing bits. These are masked out.
+        */
+       INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
        EVENT_EXTRA_END
 };
 
@@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
-       INTEL_EVENT_CONSTRAINT(0xa3, 0x4),      /* CYCLE_ACTIVITY.* */
+       INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4),    /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
        EVENT_CONSTRAINT_END
 };
 
@@ -2891,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
 
+PMU_FORMAT_ATTR(frontend, "config1:0-23");
+
 static struct attribute *intel_arch3_formats_attr[] = {
        &format_attr_event.attr,
        &format_attr_umask.attr,
@@ -2907,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = {
        NULL,
 };
 
+static struct attribute *skl_format_attr[] = {
+       &format_attr_frontend.attr,
+       NULL,
+};
+
 static __initconst const struct x86_pmu core_pmu = {
        .name                   = "core",
        .handle_irq             = x86_pmu_handle_irq,
@@ -3516,7 +3528,8 @@ __init int intel_pmu_init(void)
 
                x86_pmu.hw_config = hsw_hw_config;
                x86_pmu.get_event_constraints = hsw_get_event_constraints;
-               x86_pmu.cpu_events = hsw_events_attrs;
+               x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
+                                                 skl_format_attr);
                WARN_ON(!x86_pmu.format_attrs);
                x86_pmu.cpu_events = hsw_events_attrs;
                pr_cont("Skylake events, ");
index 086b12eae79493329c8792538ea8c6f179aee4fb..f32ac13934f2310c1b61f54884246089c7e06e01 100644 (file)
@@ -10,12 +10,12 @@ enum perf_msr_id {
        PERF_MSR_EVENT_MAX,
 };
 
-bool test_aperfmperf(int idx)
+static bool test_aperfmperf(int idx)
 {
        return boot_cpu_has(X86_FEATURE_APERFMPERF);
 }
 
-bool test_intel(int idx)
+static bool test_intel(int idx)
 {
        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
            boot_cpu_data.x86 != 6)
index 3d423a101fae05ccd722a4e564b83ba5e0112b6e..608fb26c72544c5ee0fd7793c0703f642c7ed60f 100644 (file)
@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
                { X86_FEATURE_PLN,              CR_EAX, 4, 0x00000006, 0 },
                { X86_FEATURE_PTS,              CR_EAX, 6, 0x00000006, 0 },
                { X86_FEATURE_HWP,              CR_EAX, 7, 0x00000006, 0 },
-               { X86_FEATURE_HWP_NOITFY,       CR_EAX, 8, 0x00000006, 0 },
+               { X86_FEATURE_HWP_NOTIFY,       CR_EAX, 8, 0x00000006, 0 },
                { X86_FEATURE_HWP_ACT_WINDOW,   CR_EAX, 9, 0x00000006, 0 },
                { X86_FEATURE_HWP_EPP,          CR_EAX,10, 0x00000006, 0 },
                { X86_FEATURE_HWP_PKG_REQ,      CR_EAX,11, 0x00000006, 0 },
index e068d6683dba6bab6bd4c9f9804a621385e3baee..74ca2fe7a0b3a60d7fc6c71c2d5dda30fcf3e767 100644 (file)
@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_KEXEC_FILE
-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
-                               unsigned long nr_pfn, void *arg)
+static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
 {
-       int *nr_ranges = arg;
+       unsigned int *nr_ranges = arg;
 
        (*nr_ranges)++;
        return 0;
@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
 
        ced->image = image;
 
-       walk_system_ram_range(0, -1, &nr_ranges,
+       walk_system_ram_res(0, -1, &nr_ranges,
                                get_nr_ram_ranges_callback);
 
        ced->max_nr_ranges = nr_ranges;
index 6d0e62ae8516760d6ae4af7deaa31418e82238fa..39e585a554b71d2a469c8b8c3ba487be7edc87a7 100644 (file)
@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
        return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
 }
 
+/*
+ * Called from fs/proc with a reference on @p to find the function
+ * which called into schedule(). This needs to be done carefully
+ * because the task might wake up and we might look at a stack
+ * changing under us.
+ */
+unsigned long get_wchan(struct task_struct *p)
+{
+       unsigned long start, bottom, top, sp, fp, ip;
+       int count = 0;
+
+       if (!p || p == current || p->state == TASK_RUNNING)
+               return 0;
+
+       start = (unsigned long)task_stack_page(p);
+       if (!start)
+               return 0;
+
+       /*
+        * Layout of the stack page:
+        *
+        * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
+        * PADDING
+        * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
+        * stack
+        * ----------- bottom = start + sizeof(thread_info)
+        * thread_info
+        * ----------- start
+        *
+        * The tasks stack pointer points at the location where the
+        * framepointer is stored. The data on the stack is:
+        * ... IP FP ... IP FP
+        *
+        * We need to read FP and IP, so we need to adjust the upper
+        * bound by another unsigned long.
+        */
+       top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
+       top -= 2 * sizeof(unsigned long);
+       bottom = start + sizeof(struct thread_info);
+
+       sp = READ_ONCE(p->thread.sp);
+       if (sp < bottom || sp > top)
+               return 0;
+
+       fp = READ_ONCE(*(unsigned long *)sp);
+       do {
+               if (fp < bottom || fp > top)
+                       return 0;
+               ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
+               if (!in_sched_functions(ip))
+                       return ip;
+               fp = READ_ONCE(*(unsigned long *)fp);
+       } while (count++ < 16 && p->state != TASK_RUNNING);
+       return 0;
+}
index c13df2c735f82765015a4924b08aed9904c27419..737527b40e5bf40bb1e757b635cc30994db911bd 100644 (file)
@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        return prev_p;
 }
-
-#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
-#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long bp, sp, ip;
-       unsigned long stack_page;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack_page = (unsigned long)task_stack_page(p);
-       sp = p->thread.sp;
-       if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
-               return 0;
-       /* include/asm-i386/system.h:switch_to() pushes bp last. */
-       bp = *(unsigned long *) sp;
-       do {
-               if (bp < stack_page || bp > top_ebp+stack_page)
-                       return 0;
-               ip = *(unsigned long *) (bp+4);
-               if (!in_sched_functions(ip))
-                       return ip;
-               bp = *(unsigned long *) bp;
-       } while (count++ < 16);
-       return 0;
-}
-
index 3c1bbcf129245aa7909708af46489d73f2e9c297..b35921a670b25b03878e3f9ac2e96abfae0e910c 100644 (file)
@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32)
 }
 EXPORT_SYMBOL_GPL(set_personality_ia32);
 
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long stack;
-       u64 fp, ip;
-       int count = 0;
-
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       stack = (unsigned long)task_stack_page(p);
-       if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
-               return 0;
-       fp = *(u64 *)(p->thread.sp);
-       do {
-               if (fp < (unsigned long)stack ||
-                   fp >= (unsigned long)stack+THREAD_SIZE)
-                       return 0;
-               ip = *(u64 *)(fp+8);
-               if (!in_sched_functions(ip))
-                       return ip;
-               fp = *(u64 *)fp;
-       } while (count++ < 16);
-       return 0;
-}
-
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
 {
        int ret = 0;
index 94b7d15db3fc91f3a70d665892f6f41832145c22..2f9ed1ff063260ed33bf845e1e523df0ad2bd58e 100644 (file)
@@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (svm->vmcb->control.next_rip != 0) {
-               WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+               WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
                svm->next_rip = svm->vmcb->control.next_rip;
        }
 
@@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
        set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
-#define MTRR_TYPE_UC_MINUS     7
-#define MTRR2PROTVAL_INVALID 0xff
-
-static u8 mtrr2protval[8];
-
-static u8 fallback_mtrr_type(int mtrr)
-{
-       /*
-        * WT and WP aren't always available in the host PAT.  Treat
-        * them as UC and UC- respectively.  Everything else should be
-        * there.
-        */
-       switch (mtrr)
-       {
-       case MTRR_TYPE_WRTHROUGH:
-               return MTRR_TYPE_UNCACHABLE;
-       case MTRR_TYPE_WRPROT:
-               return MTRR_TYPE_UC_MINUS;
-       default:
-               BUG();
-       }
-}
-
-static void build_mtrr2protval(void)
-{
-       int i;
-       u64 pat;
-
-       for (i = 0; i < 8; i++)
-               mtrr2protval[i] = MTRR2PROTVAL_INVALID;
-
-       /* Ignore the invalid MTRR types.  */
-       mtrr2protval[2] = 0;
-       mtrr2protval[3] = 0;
-
-       /*
-        * Use host PAT value to figure out the mapping from guest MTRR
-        * values to nested page table PAT/PCD/PWT values.  We do not
-        * want to change the host PAT value every time we enter the
-        * guest.
-        */
-       rdmsrl(MSR_IA32_CR_PAT, pat);
-       for (i = 0; i < 8; i++) {
-               u8 mtrr = pat >> (8 * i);
-
-               if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
-                       mtrr2protval[mtrr] = __cm_idx2pte(i);
-       }
-
-       for (i = 0; i < 8; i++) {
-               if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
-                       u8 fallback = fallback_mtrr_type(i);
-                       mtrr2protval[i] = mtrr2protval[fallback];
-                       BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
-               }
-       }
-}
-
 static __init int svm_hardware_setup(void)
 {
        int cpu;
@@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void)
        } else
                kvm_disable_tdp();
 
-       build_mtrr2protval();
        return 0;
 
 err:
@@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
        return target_tsc - tsc;
 }
 
-static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
-{
-       struct kvm_vcpu *vcpu = &svm->vcpu;
-
-       /* Unlike Intel, AMD takes the guest's CR0.CD into account.
-        *
-        * AMD does not have IPAT.  To emulate it for the case of guests
-        * with no assigned devices, just set everything to WB.  If guests
-        * have assigned devices, however, we cannot force WB for RAM
-        * pages only, so use the guest PAT directly.
-        */
-       if (!kvm_arch_has_assigned_device(vcpu->kvm))
-               *g_pat = 0x0606060606060606;
-       else
-               *g_pat = vcpu->arch.pat;
-}
-
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
-       u8 mtrr;
-
-       /*
-        * 1. MMIO: trust guest MTRR, so same as item 3.
-        * 2. No passthrough: always map as WB, and force guest PAT to WB as well
-        * 3. Passthrough: can't guarantee the result, try to trust guest.
-        */
-       if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
-               return 0;
-
-       if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
-           kvm_read_cr0(vcpu) & X86_CR0_CD)
-               return _PAGE_NOCACHE;
-
-       mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
-       return mtrr2protval[mtrr];
-}
-
 static void init_vmcb(struct vcpu_svm *svm, bool init_event)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
                clr_cr_intercept(svm, INTERCEPT_CR3_READ);
                clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
                save->g_pat = svm->vcpu.arch.pat;
-               svm_set_guest_pat(svm, &save->g_pat);
                save->cr3 = 0;
                save->cr4 = 0;
        }
@@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        if (!vcpu->fpu_active)
                cr0 |= X86_CR0_TS;
-
-       /* These are emulated via page tables.  */
-       cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
-
+       /*
+        * re-enable caching here because the QEMU bios
+        * does not do it - this results in some delay at
+        * reboot
+        */
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+               cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
        mark_dirty(svm->vmcb, VMCB_CR);
        update_cr0_intercept(svm);
@@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
-       case MSR_IA32_CR_PAT:
-               if (npt_enabled) {
-                       if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
-                               return 1;
-                       vcpu->arch.pat = data;
-                       svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
-                       mark_dirty(svm->vmcb, VMCB_NPT);
-                       break;
-               }
-               /* fall through */
        default:
                return kvm_set_msr_common(vcpu, msr);
        }
@@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void)
        return true;
 }
 
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+       return 0;
+}
+
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 }
index 64076740251e7f3b8197fe87eeff262891ac995a..06ef4908ba61d2e25ead615953a2f42c923a9219 100644 (file)
@@ -8617,17 +8617,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        u64 ipat = 0;
 
        /* For VT-d and EPT combination
-        * 1. MMIO: guest may want to apply WC, trust it.
+        * 1. MMIO: always map as UC
         * 2. EPT with VT-d:
         *   a. VT-d without snooping control feature: can't guarantee the
-        *      result, try to trust guest.  So the same as item 1.
+        *      result, try to trust guest.
         *   b. VT-d with snooping control feature: snooping control feature of
         *      VT-d engine can guarantee the cache correctness. Just set it
         *      to WB to keep consistent with host. So the same as item 3.
         * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
         *    consistent with host MTRR
         */
-       if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+       if (is_mmio) {
+               cache = MTRR_TYPE_UNCACHABLE;
+               goto exit;
+       }
+
+       if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
                ipat = VMX_EPT_IPAT_BIT;
                cache = MTRR_TYPE_WRBACK;
                goto exit;
index 991466bf8dee4ab202f422e64c0d211bb5f439fe..92511d4b72364a978db0b38628b9449907ee1832 100644 (file)
@@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                vcpu->pvclock_set_guest_stopped_request = false;
        }
 
-       pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO;
-
        /* If the host uses TSC clocksource, then it is stable */
        if (use_master_clock)
                pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
@@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                                        &vcpu->requests);
 
                        ka->boot_vcpu_runs_old_kvmclock = tmp;
-
-                       ka->kvmclock_offset = -get_kernel_ns();
                }
 
                vcpu->arch.time = data;
index 30564e2752d361870e91a4e25a5afbb3d029b7d6..df48430c279b8688996b9f0074c08b1ce139af06 100644 (file)
@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
         * has been zapped already via cleanup_highmem().
         */
        all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
-       set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
+       set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
 
        rodata_test();
 
index 1db84c0758b732b3465fcc896ef98862dabe0f16..6a28ded74211145a74bfe677f9619f2d2fb676ae 100644 (file)
@@ -704,6 +704,70 @@ out:
        return ret;
 }
 
+/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+       /* Initial call */
+       if (!entry)
+               return memmap.map_end - memmap.desc_size;
+
+       entry -= memmap.desc_size;
+       if (entry < memmap.map)
+               return NULL;
+
+       return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+       if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
+               /*
+                * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+                * config table feature requires us to map all entries
+                * in the same order as they appear in the EFI memory
+                * map. That is to say, entry N must have a lower
+                * virtual address than entry N+1. This is because the
+                * firmware toolchain leaves relative references in
+                * the code/data sections, which are split and become
+                * separate EFI memory regions. Mapping things
+                * out-of-order leads to the firmware accessing
+                * unmapped addresses.
+                *
+                * Since we need to map things this way whether or not
+                * the kernel actually makes use of
+                * EFI_PROPERTIES_TABLE, let's just switch to this
+                * scheme by default for 64-bit.
+                */
+               return efi_map_next_entry_reverse(entry);
+       }
+
+       /* Initial call */
+       if (!entry)
+               return memmap.map;
+
+       entry += memmap.desc_size;
+       if (entry >= memmap.map_end)
+               return NULL;
+
+       return entry;
+}
+
 /*
  * Map the efi memory ranges of the runtime services and update new_mmap with
  * virtual addresses.
@@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
        unsigned long left = 0;
        efi_memory_desc_t *md;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+       p = NULL;
+       while ((p = efi_map_next_entry(p))) {
                md = p;
                if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
 #ifdef CONFIG_X86_64
index 63c223dff5f1eebed92297d2cd641535a3aeceb9..b56855a1382a374f8c52632b9aa243a1112a1a06 100644 (file)
@@ -28,4 +28,5 @@ generic-y += statfs.h
 generic-y += termios.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index 1e28ddb656b891b92d7c135fa65914939b1451aa..8764c241e5bb44858e753b75f6c102c06a927171 100644 (file)
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
        return cpu;
 }
 
-int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
+int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+                           const struct cpumask *online_mask)
 {
        unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
        cpumask_var_t cpus;
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
 
        cpumask_clear(cpus);
        nr_cpus = nr_uniq_cpus = 0;
-       for_each_online_cpu(i) {
+       for_each_cpu(i, online_mask) {
                nr_cpus++;
                first_sibling = get_first_sibling(i);
                if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
 
        queue = 0;
        for_each_possible_cpu(i) {
-               if (!cpu_online(i)) {
+               if (!cpumask_test_cpu(i, online_mask)) {
                        map[i] = 0;
                        continue;
                }
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
        if (!map)
                return NULL;
 
-       if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
+       if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
                return map;
 
        kfree(map);
index 279c5d674edf3cb38627feb360eb745194eecd4e..788fffd9b4098e35a953ed8cc182a9633f9cc421 100644 (file)
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
        unsigned int i, first = 1;
        ssize_t ret = 0;
 
-       blk_mq_disable_hotplug();
-
        for_each_cpu(i, hctx->cpumask) {
                if (first)
                        ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
                first = 0;
        }
 
-       blk_mq_enable_hotplug();
-
        ret += sprintf(ret + page, "\n");
        return ret;
 }
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
        struct blk_mq_ctx *ctx;
        int i;
 
-       if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+       if (!hctx->nr_ctx)
                return;
 
        hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
        struct blk_mq_ctx *ctx;
        int i, ret;
 
-       if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
+       if (!hctx->nr_ctx)
                return 0;
 
        ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
        struct blk_mq_ctx *ctx;
        int i, j;
 
+       blk_mq_disable_hotplug();
+
        queue_for_each_hw_ctx(q, hctx, i) {
                blk_mq_unregister_hctx(hctx);
 
@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
        kobject_put(&q->mq_kobj);
 
        kobject_put(&disk_to_dev(disk)->kobj);
+
+       q->mq_sysfs_init_done = false;
+       blk_mq_enable_hotplug();
 }
 
 static void blk_mq_sysfs_init(struct request_queue *q)
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk)
        struct blk_mq_hw_ctx *hctx;
        int ret, i;
 
+       blk_mq_disable_hotplug();
+
        blk_mq_sysfs_init(q);
 
        ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
        if (ret < 0)
-               return ret;
+               goto out;
 
        kobject_uevent(&q->mq_kobj, KOBJ_ADD);
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               hctx->flags |= BLK_MQ_F_SYSFS_UP;
                ret = blk_mq_register_hctx(hctx);
                if (ret)
                        break;
        }
 
-       if (ret) {
+       if (ret)
                blk_mq_unregister_disk(disk);
-               return ret;
-       }
+       else
+               q->mq_sysfs_init_done = true;
+out:
+       blk_mq_enable_hotplug();
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(blk_mq_register_disk);
 
@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        int i;
 
+       if (!q->mq_sysfs_init_done)
+               return;
+
        queue_for_each_hw_ctx(q, hctx, i)
                blk_mq_unregister_hctx(hctx);
 }
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        int i, ret = 0;
 
+       if (!q->mq_sysfs_init_done)
+               return ret;
+
        queue_for_each_hw_ctx(q, hctx, i) {
                ret = blk_mq_register_hctx(hctx);
                if (ret)
index 9115c6d59948addbc445a26ad0f9ccaf4237b137..ed96474d75cb62fb261526736727c67ea2238d46 100644 (file)
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
 }
 EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
 
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                void *priv)
 {
-       struct blk_mq_tags *tags = hctx->tags;
+       struct blk_mq_hw_ctx *hctx;
+       int i;
+
+
+       queue_for_each_hw_ctx(q, hctx, i) {
+               struct blk_mq_tags *tags = hctx->tags;
+
+               /*
+                * If not software queues are currently mapped to this
+                * hardware queue, there's nothing to check
+                */
+               if (!blk_mq_hw_queue_mapped(hctx))
+                       continue;
+
+               if (tags->nr_reserved_tags)
+                       bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
+               bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
+                     false);
+       }
 
-       if (tags->nr_reserved_tags)
-               bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
-       bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
-                       false);
 }
-EXPORT_SYMBOL(blk_mq_tag_busy_iter);
 
 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
 {
index 9eb2cf4f01cb874706d64af87a01e94e0121f7e4..d468a79f2c4a2c11a00387816bcc03b64aea09d1 100644 (file)
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
 extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
 extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+               void *priv);
 
 enum {
        BLK_MQ_TAG_CACHE_MIN    = 1,
index f2d67b4047a04d7015c3c2af16871972c3b5a720..7785ae96267a197926c700f74bcd6524892a8c01 100644 (file)
@@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq)
  *     Ends all I/O on a request. It does not handle partial completions.
  *     The actual completion happens out-of-order, through a IPI handler.
  **/
-void blk_mq_complete_request(struct request *rq)
+void blk_mq_complete_request(struct request *rq, int error)
 {
        struct request_queue *q = rq->q;
 
        if (unlikely(blk_should_fake_timeout(q)))
                return;
-       if (!blk_mark_rq_complete(rq))
+       if (!blk_mark_rq_complete(rq)) {
+               rq->errors = error;
                __blk_mq_complete_request(rq);
+       }
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
                 * If a request wasn't started before the queue was
                 * marked dying, kill it here or it'll go unnoticed.
                 */
-               if (unlikely(blk_queue_dying(rq->q))) {
-                       rq->errors = -EIO;
-                       blk_mq_complete_request(rq);
-               }
+               if (unlikely(blk_queue_dying(rq->q)))
+                       blk_mq_complete_request(rq, -EIO);
                return;
        }
        if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
                .next           = 0,
                .next_set       = 0,
        };
-       struct blk_mq_hw_ctx *hctx;
        int i;
 
-       queue_for_each_hw_ctx(q, hctx, i) {
-               /*
-                * If not software queues are currently mapped to this
-                * hardware queue, there's nothing to check
-                */
-               if (!blk_mq_hw_queue_mapped(hctx))
-                       continue;
-
-               blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
-       }
+       blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
 
        if (data.next_set) {
                data.next = blk_rq_timeout(round_jiffies_up(data.next));
                mod_timer(&q->timeout, data.next);
        } else {
+               struct blk_mq_hw_ctx *hctx;
+
                queue_for_each_hw_ctx(q, hctx, i) {
                        /* the hctx may be unmapped, so check it here */
                        if (blk_mq_hw_queue_mapped(hctx))
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
        }
 }
 
-static void blk_mq_map_swqueue(struct request_queue *q)
+static void blk_mq_map_swqueue(struct request_queue *q,
+                              const struct cpumask *online_mask)
 {
        unsigned int i;
        struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
        struct blk_mq_tag_set *set = q->tag_set;
 
+       /*
+        * Avoid others reading imcomplete hctx->cpumask through sysfs
+        */
+       mutex_lock(&q->sysfs_lock);
+
        queue_for_each_hw_ctx(q, hctx, i) {
                cpumask_clear(hctx->cpumask);
                hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
         */
        queue_for_each_ctx(q, ctx, i) {
                /* If the cpu isn't online, the cpu is mapped to first hctx */
-               if (!cpu_online(i))
+               if (!cpumask_test_cpu(i, online_mask))
                        continue;
 
                hctx = q->mq_ops->map_queue(q, i);
                cpumask_set_cpu(i, hctx->cpumask);
-               cpumask_set_cpu(i, hctx->tags->cpumask);
                ctx->index_hw = hctx->nr_ctx;
                hctx->ctxs[hctx->nr_ctx++] = ctx;
        }
 
+       mutex_unlock(&q->sysfs_lock);
+
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_ctxmap *map = &hctx->ctx_map;
 
@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                hctx->next_cpu = cpumask_first(hctx->cpumask);
                hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
        }
+
+       queue_for_each_ctx(q, ctx, i) {
+               if (!cpumask_test_cpu(i, online_mask))
+                       continue;
+
+               hctx = q->mq_ops->map_queue(q, i);
+               cpumask_set_cpu(i, hctx->tags->cpumask);
+       }
 }
 
 static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q)
                kfree(hctx);
        }
 
+       kfree(q->mq_map);
+       q->mq_map = NULL;
+
        kfree(q->queue_hw_ctx);
 
        /* ctx kobj stays in queue_ctx */
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        if (blk_mq_init_hw_queues(q, set))
                goto err_hctxs;
 
+       get_online_cpus();
        mutex_lock(&all_q_mutex);
-       list_add_tail(&q->all_q_node, &all_q_list);
-       mutex_unlock(&all_q_mutex);
 
+       list_add_tail(&q->all_q_node, &all_q_list);
        blk_mq_add_queue_tag_set(set, q);
+       blk_mq_map_swqueue(q, cpu_online_mask);
 
-       blk_mq_map_swqueue(q);
+       mutex_unlock(&all_q_mutex);
+       put_online_cpus();
 
        return q;
 
@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q)
 {
        struct blk_mq_tag_set   *set = q->tag_set;
 
+       mutex_lock(&all_q_mutex);
+       list_del_init(&q->all_q_node);
+       mutex_unlock(&all_q_mutex);
+
        blk_mq_del_queue_tag_set(q);
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
 
        percpu_ref_exit(&q->mq_usage_counter);
-
-       kfree(q->mq_map);
-
-       q->mq_map = NULL;
-
-       mutex_lock(&all_q_mutex);
-       list_del_init(&q->all_q_node);
-       mutex_unlock(&all_q_mutex);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
+static void blk_mq_queue_reinit(struct request_queue *q,
+                               const struct cpumask *online_mask)
 {
        WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
 
        blk_mq_sysfs_unregister(q);
 
-       blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
+       blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);
 
        /*
         * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
         * involves free and re-allocate memory, worthy doing?)
         */
 
-       blk_mq_map_swqueue(q);
+       blk_mq_map_swqueue(q, online_mask);
 
        blk_mq_sysfs_register(q);
 }
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
                                      unsigned long action, void *hcpu)
 {
        struct request_queue *q;
+       int cpu = (unsigned long)hcpu;
+       /*
+        * New online cpumask which is going to be set in this hotplug event.
+        * Declare this cpumasks as global as cpu-hotplug operation is invoked
+        * one-by-one and dynamically allocating this could result in a failure.
+        */
+       static struct cpumask online_new;
 
        /*
-        * Before new mappings are established, hotadded cpu might already
-        * start handling requests. This doesn't break anything as we map
-        * offline CPUs to first hardware queue. We will re-init the queue
-        * below to get optimal settings.
+        * Before hotadded cpu starts handling requests, new mappings must
+        * be established.  Otherwise, these requests in hw queue might
+        * never be dispatched.
+        *
+        * For example, there is a single hw queue (hctx) and two CPU queues
+        * (ctx0 for CPU0, and ctx1 for CPU1).
+        *
+        * Now CPU1 is just onlined and a request is inserted into
+        * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
+        * still zero.
+        *
+        * And then while running hw queue, flush_busy_ctxs() finds bit0 is
+        * set in pending bitmap and tries to retrieve requests in
+        * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
+        * so the request in ctx1->rq_list is ignored.
         */
-       if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
-           action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DEAD:
+       case CPU_UP_CANCELED:
+               cpumask_copy(&online_new, cpu_online_mask);
+               break;
+       case CPU_UP_PREPARE:
+               cpumask_copy(&online_new, cpu_online_mask);
+               cpumask_set_cpu(cpu, &online_new);
+               break;
+       default:
                return NOTIFY_OK;
+       }
 
        mutex_lock(&all_q_mutex);
 
@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
        }
 
        list_for_each_entry(q, &all_q_list, all_q_node)
-               blk_mq_queue_reinit(q);
+               blk_mq_queue_reinit(q, &online_new);
 
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_unfreeze_queue(q);
index 6a48c4c0d8a2a6efb881ea29b772df3bba9d5540..f4fea79649105b4e134860b53294ef2dac90a95f 100644 (file)
@@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void);
  * CPU -> queue mappings
  */
 extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
-extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
+extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
+                                  const struct cpumask *online_mask);
 extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
 
 /*
index 6d88dd15c98da8cada935c0dc937eb5c8db158cd..19709663241223968ee73f3d1847be9e253969f7 100644 (file)
@@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
                srlen = cert->raw_serial_size;
                q = cert->raw_serial;
        }
-       if (srlen > 1 && *q == 0) {
-               srlen--;
-               q++;
-       }
 
        ret = -ENOMEM;
        desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
index 2614a839c60dab8aca4d2955368888ae5c0fefc3..42c66b64c12cefd8c1491e7b91af138b86ddf5af 100644 (file)
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
                goto err_exit;
 
        mutex_lock(&ec->mutex);
+       result = -ENODATA;
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
+                       result = 0;
                        q->handler = acpi_ec_get_query_handler(handler);
                        ec_dbg_evt("Query(0x%02x) scheduled",
                                   q->handler->query_bit);
index 6da0f9beab19880ac71199b7cd99fd57b2fa6e21..c9336751e5e3708f96e9f972ab05fc5454970adb 100644 (file)
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev)
 
        /* Interrupt Line values above 0xF are forbidden */
        if (dev->irq > 0 && (dev->irq <= 0xF) &&
+           acpi_isa_irq_available(dev->irq) &&
            (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
                dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
                         pin_name(dev->pin), dev->irq);
index 3b4ea98e3ea069eca5f9e0094f520c31eee639b2..7c8408b946ca10160d41648f14f99a6716529903 100644 (file)
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void)
                            PIRQ_PENALTY_PCI_POSSIBLE;
                }
        }
-       /* Add a penalty for the SCI */
-       acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
        return 0;
 }
 
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
                                irq = link->irq.possible[i];
                }
        }
+       if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+               printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
+                           "Try pci=noacpi or acpi=off\n",
+                           acpi_device_name(link->device),
+                           acpi_device_bid(link->device));
+               return -ENODEV;
+       }
 
        /* Attempt to enable the link device at this IRQ. */
        if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active)
        }
 }
 
+bool acpi_isa_irq_available(int irq)
+{
+       return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+                           acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
+}
+
 /*
  * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
  * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
index 28cd75c535b047f2c4276fed4308fa8159491007..7ae7cd990fbf79bf39de570a6a0fca5bd47cef94 100644 (file)
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
        u32 microvolt[3] = {0};
        int count, ret;
 
-       count = of_property_count_u32_elems(opp->np, "opp-microvolt");
-       if (!count)
+       /* Missing property isn't a problem, but an invalid entry is */
+       if (!of_find_property(opp->np, "opp-microvolt", NULL))
                return 0;
 
+       count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+       if (count < 0) {
+               dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+                       __func__, count);
+               return count;
+       }
+
        /* There can be one or three elements here */
        if (count != 1 && count != 3) {
                dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  * share a common logic which is isolated here.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@ unlock:
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  *
  * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
  * successful.
  */
 int dev_pm_opp_disable(struct device *dev, unsigned long freq)
index f9889b6bc02c316bed46e130c9f5c7ce38b7b93b..674f800a3b5760ad6374c98fa11e88097e30d160 100644 (file)
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
 {
        const bool write = cmd->rq->cmd_flags & REQ_WRITE;
        struct loop_device *lo = cmd->rq->q->queuedata;
-       int ret = -EIO;
+       int ret = 0;
 
-       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+       if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+               ret = -EIO;
                goto failed;
+       }
 
        ret = do_req_filebacked(lo, cmd->rq);
-
  failed:
-       if (ret)
-               cmd->rq->errors = -EIO;
-       blk_mq_complete_request(cmd->rq);
+       blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
 }
 
 static void loop_queue_write_work(struct work_struct *work)
index a295b98c6baed2df8bdd9484a62e44ca9bbfdc7a..1c9e4fe5aa440cbde62bb5e6c0cf0c397d8417a7 100644 (file)
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
        case NULL_IRQ_SOFTIRQ:
                switch (queue_mode)  {
                case NULL_Q_MQ:
-                       blk_mq_complete_request(cmd->rq);
+                       blk_mq_complete_request(cmd->rq, cmd->rq->errors);
                        break;
                case NULL_Q_RQ:
                        blk_complete_request(cmd->rq);
index b97fc3fe0916a6b6fd3fb2be32be44ce3c137b39..6f04771f1019798cc2feabf73eff2ddbadc84b81 100644 (file)
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                        spin_unlock_irqrestore(req->q->queue_lock, flags);
                        return;
                }
+
                if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
                        if (cmd_rq->ctx == CMD_CTX_CANCELLED)
-                               req->errors = -EINTR;
-                       else
-                               req->errors = status;
+                               status = -EINTR;
                } else {
-                       req->errors = nvme_error_status(status);
+                       status = nvme_error_status(status);
                }
-       } else
-               req->errors = 0;
+       }
+
        if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
                u32 result = le32_to_cpup(&cqe->result);
                req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
        }
        nvme_free_iod(nvmeq->dev, iod);
 
-       blk_mq_complete_request(req);
+       blk_mq_complete_request(req, status);
 }
 
 /* length is in bytes.  gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ns && ns->ms && !blk_integrity_rq(req)) {
                if (!(ns->pi_type && ns->ms == 8) &&
                                        req->cmd_type != REQ_TYPE_DRV_PRIV) {
-                       req->errors = -EFAULT;
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, -EFAULT);
                        return BLK_MQ_RQ_QUEUE_OK;
                }
        }
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
        list_sort(NULL, &dev->namespaces, ns_cmp);
 }
 
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+       struct nvme_queue *nvmeq;
+       int i;
+
+       for (i = 0; i < dev->online_queues; i++) {
+               nvmeq = dev->queues[i];
+
+               if (!nvmeq->tags || !(*nvmeq->tags))
+                       continue;
+
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                       blk_mq_tags_cpumask(*nvmeq->tags));
+       }
+}
+
 static void nvme_dev_scan(struct work_struct *work)
 {
        struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work)
                return;
        nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
        kfree(ctrl);
+       nvme_set_irq_hints(dev);
 }
 
 /*
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = {
        .compat_ioctl   = nvme_dev_ioctl,
 };
 
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
-       struct nvme_queue *nvmeq;
-       int i;
-
-       for (i = 0; i < dev->online_queues; i++) {
-               nvmeq = dev->queues[i];
-
-               if (!nvmeq->tags || !(*nvmeq->tags))
-                       continue;
-
-               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
-                                       blk_mq_tags_cpumask(*nvmeq->tags));
-       }
-}
-
 static int nvme_dev_start(struct nvme_dev *dev)
 {
        int result;
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
        if (result)
                goto free_tags;
 
-       nvme_set_irq_hints(dev);
-
        dev->event_limit = 1;
        return result;
 
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
        } else {
                nvme_unfreeze_queues(dev);
                nvme_dev_add(dev);
-               nvme_set_irq_hints(dev);
        }
        return 0;
 }
index e93899cc6f60be0bd13b45dde3b8d697b7a733c8..6ca35495a5becdbac067cb4338981191fd6bc56a 100644 (file)
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
        do {
                virtqueue_disable_cb(vq);
                while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
-                       blk_mq_complete_request(vbr->req);
+                       blk_mq_complete_request(vbr->req, vbr->req->errors);
                        req_done = true;
                }
                if (unlikely(virtqueue_is_broken(vq)))
index deb3f001791f159c5c7ebce19814de31e3106a5e..767657565de64e73f61304741fe9f39c496a2892 100644 (file)
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
 
 static int xen_blkif_disconnect(struct xen_blkif *blkif)
 {
+       struct pending_req *req, *n;
+       int i = 0, j;
+
        if (blkif->xenblkd) {
                kthread_stop(blkif->xenblkd);
                wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
        /* Remove all persistent grants and the cache of ballooned pages. */
        xen_blkbk_free_caches(blkif);
 
+       /* Check that there is no request in use */
+       list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+               list_del(&req->free_list);
+
+               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
+                       kfree(req->segments[j]);
+
+               for (j = 0; j < MAX_INDIRECT_PAGES; j++)
+                       kfree(req->indirect_pages[j]);
+
+               kfree(req);
+               i++;
+       }
+
+       WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+       blkif->nr_ring_pages = 0;
+
        return 0;
 }
 
 static void xen_blkif_free(struct xen_blkif *blkif)
 {
-       struct pending_req *req, *n;
-       int i = 0, j;
 
        xen_blkif_disconnect(blkif);
        xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
        BUG_ON(!list_empty(&blkif->free_pages));
        BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
 
-       /* Check that there is no request in use */
-       list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
-               list_del(&req->free_list);
-
-               for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
-                       kfree(req->segments[j]);
-
-               for (j = 0; j < MAX_INDIRECT_PAGES; j++)
-                       kfree(req->indirect_pages[j]);
-
-               kfree(req);
-               i++;
-       }
-
-       WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-
        kmem_cache_free(xen_blkif_cachep, blkif);
 }
 
index 0823a96902f87fa90d2e35a425183ea0de2e0049..611170896b8c94ce1d7494d62116ba1fde574fce 100644 (file)
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
        RING_IDX i, rp;
        unsigned long flags;
        struct blkfront_info *info = (struct blkfront_info *)dev_id;
+       int error;
 
        spin_lock_irqsave(&info->io_lock, flags);
 
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        continue;
                }
 
-               req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+               error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                           info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
                                queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
                        }
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                case BLKIF_OP_FLUSH_DISKCACHE:
                case BLKIF_OP_WRITE_BARRIER:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
                        if (unlikely(bret->status == BLKIF_RSP_ERROR &&
                                     info->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               req->errors = -EOPNOTSUPP;
+                               error = -EOPNOTSUPP;
                        }
-                       if (unlikely(req->errors)) {
-                               if (req->errors == -EOPNOTSUPP)
-                                       req->errors = 0;
+                       if (unlikely(error)) {
+                               if (error == -EOPNOTSUPP)
+                                       error = 0;
                                info->feature_flush = 0;
                                xlvbd_flush(info);
                        }
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
                                        "request: %x\n", bret->status);
 
-                       blk_mq_complete_request(req);
+                       blk_mq_complete_request(req, error);
                        break;
                default:
                        BUG();
index bb2c2b05096455066826a13d5ad1156248f5e64e..d3c1742ded1af7655c3e2e77031ab3801ea84761 100644 (file)
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
        bc_timer.freq = clk_get_rate(timer_clk);
 
        irq = irq_of_parse_and_map(np, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
                return;
        }
index edacf3902e107d9ac60c84cdaba4f4ad1822213c..1cea08cf603eb30d5028e157dc8927aef4a004a9 100644 (file)
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
        int irq, error;
 
        irq  = irq_of_parse_and_map(np, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                pr_err("%s: failed to map interrupts\n", __func__);
                return;
        }
index a165b4bfd3300e97d409f2053b71adb276392336..dd24375b76ddcba72409d3c5c1285f19c172a45f 100644 (file)
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
        return desc;
 }
 
+void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+{
+       memset(&desc->lld, 0, sizeof(desc->lld));
+       INIT_LIST_HEAD(&desc->descs_list);
+       desc->direction = DMA_TRANS_NONE;
+       desc->xfer_size = 0;
+       desc->active_xfer = false;
+}
+
 /* Call must be protected by lock. */
 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
 {
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
                desc = list_first_entry(&atchan->free_descs_list,
                                        struct at_xdmac_desc, desc_node);
                list_del(&desc->desc_node);
-               desc->active_xfer = false;
+               at_xdmac_init_used_desc(desc);
        }
 
        return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
 
        if (xt->src_inc) {
                if (xt->src_sgl)
-                       chan_cc |=  AT_XDMAC_CC_SAM_UBS_DS_AM;
+                       chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
                else
                        chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
        }
 
        if (xt->dst_inc) {
                if (xt->dst_sgl)
-                       chan_cc |=  AT_XDMAC_CC_DAM_UBS_DS_AM;
+                       chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
                else
                        chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
        }
index 3ff284c8e3d5aef72f229017c883c73cbe13403f..09479d4be4db3d776fd1f3400724d13f26808428 100644 (file)
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
        mutex_lock(&dma_list_mutex);
 
        if (chan->client_count == 0) {
+               struct dma_device *device = chan->device;
+
+               dma_cap_set(DMA_PRIVATE, device->cap_mask);
+               device->privatecnt++;
                err = dma_chan_get(chan);
-               if (err)
+               if (err) {
                        pr_debug("%s: failed to get %s: (%d)\n",
                                __func__, dma_chan_name(chan), err);
+                       chan = NULL;
+                       if (--device->privatecnt == 0)
+                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+               }
        } else
                chan = NULL;
 
index cf1c87fa1edd557eb57f53dd41c11c02a440ea82..bedce038c6e281bb1e1bf6ba89585c14d532a5b2 100644 (file)
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        INIT_LIST_HEAD(&dw->dma.channels);
        for (i = 0; i < nr_channels; i++) {
                struct dw_dma_chan      *dwc = &dw->chan[i];
-               int                     r = nr_channels - i - 1;
 
                dwc->chan.device = &dw->dma;
                dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 
                /* 7 is highest priority & 0 is lowest. */
                if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
-                       dwc->priority = r;
+                       dwc->priority = nr_channels - i - 1;
                else
                        dwc->priority = i;
 
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
                /* Hardware configuration */
                if (autocfg) {
                        unsigned int dwc_params;
+                       unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
                        void __iomem *addr = chip->regs + r * sizeof(u32);
 
                        dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
index 18c14e1f1414e650969ff3c9e34431072b3abd83..48d6d9e94f6763c91bcf069848d9ef13e2eed48d 100644 (file)
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
        struct idma64_desc *desc = idma64c->desc;
        struct idma64_hw_desc *hw;
        size_t bytes = desc->length;
-       u64 llp;
-       u32 ctlhi;
+       u64 llp = channel_readq(idma64c, LLP);
+       u32 ctlhi = channel_readl(idma64c, CTL_HI);
        unsigned int i = 0;
 
-       llp = channel_readq(idma64c, LLP);
        do {
                hw = &desc->hw[i];
-       } while ((hw->llp != llp) && (++i < desc->ndesc));
+               if (hw->llp == llp)
+                       break;
+               bytes -= hw->len;
+       } while (++i < desc->ndesc);
 
        if (!i)
                return bytes;
 
-       do {
-               bytes -= desc->hw[--i].len;
-       } while (i);
+       /* The current chunk is not fully transfered yet */
+       bytes += desc->hw[--i].len;
 
-       ctlhi = channel_readl(idma64c, CTL_HI);
        return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
 }
 
index 5cb61ce01036fef2dc5248d11f99859e2dcb9d86..fc4156afa070306cd2fee4502f361717f364706b 100644 (file)
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
                return;
 
        /* clear the channel mapping in DRCMR */
-       reg = pxad_drcmr(chan->drcmr);
-       writel_relaxed(0, chan->phy->base + reg);
+       if (chan->drcmr <= DRCMR_CHLNUM) {
+               reg = pxad_drcmr(chan->drcmr);
+               writel_relaxed(0, chan->phy->base + reg);
+       }
 
        spin_lock_irqsave(&pdev->phy_lock, flags);
        for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
                "%s(); phy=%p(%d) misaligned=%d\n", __func__,
                phy, phy->idx, misaligned);
 
-       reg = pxad_drcmr(phy->vchan->drcmr);
-       writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+       if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+               reg = pxad_drcmr(phy->vchan->drcmr);
+               writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+       }
 
        dalgn = phy_readl_relaxed(phy, DALGN);
        if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
        struct dma_async_tx_descriptor *tx;
        struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
 
+       INIT_LIST_HEAD(&vd->node);
        tx = vchan_tx_prep(vc, vd, tx_flags);
        tx->tx_submit = pxad_tx_submit;
        dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
                width = chan->cfg.src_addr_width;
                dev_addr = chan->cfg.src_addr;
                *dev_src = dev_addr;
-               *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
+               *dcmd |= PXA_DCMD_INCTRGADDR;
+               if (chan->drcmr <= DRCMR_CHLNUM)
+                       *dcmd |= PXA_DCMD_FLOWSRC;
        }
        if (dir == DMA_MEM_TO_DEV) {
                maxburst = chan->cfg.dst_maxburst;
                width = chan->cfg.dst_addr_width;
                dev_addr = chan->cfg.dst_addr;
                *dev_dst = dev_addr;
-               *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
+               *dcmd |= PXA_DCMD_INCSRCADDR;
+               if (chan->drcmr <= DRCMR_CHLNUM)
+                       *dcmd |= PXA_DCMD_FLOWTRG;
        }
        if (dir == DMA_MEM_TO_MEM)
                *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
        else
                curr = phy_readl_relaxed(chan->phy, DTADR);
 
+       /*
+        * curr has to be actually read before checking descriptor
+        * completion, so that a curr inside a status updater
+        * descriptor implies the following test returns true, and
+        * preventing reordering of curr load and the test.
+        */
+       rmb();
+       if (is_desc_completed(vd))
+               goto out;
+
        for (i = 0; i < sw_desc->nb_desc - 1; i++) {
                hw_desc = sw_desc->hw_desc[i];
                if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
index a1a500d96ff2788db7355a65284a9a3b54c0a1e0..1661d518224a7e4e57ca6c8c717096b5a87333e1 100644 (file)
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
 static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
 {
        struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
-       struct sun4i_dma_promise *promise;
+       struct sun4i_dma_promise *promise, *tmp;
 
        /* Free all the demands and completed demands */
-       list_for_each_entry(promise, &contract->demands, list)
+       list_for_each_entry_safe(promise, tmp, &contract->demands, list)
                kfree(promise);
 
-       list_for_each_entry(promise, &contract->completed_demands, list)
+       list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
                kfree(promise);
 
        kfree(contract);
index b23e8d52d1263abc11cc126e9e0b80e1dcc5cc1b..8d57b1b12e411ef902d26af984e7d34a741a4cf2 100644 (file)
@@ -59,7 +59,6 @@
 #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN                0xD070
 #define XGENE_DMA_RING_BLK_MEM_RDY             0xD074
 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL         0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v)             (((v) & 0x0001FFFE) >> 1)
 #define XGENE_DMA_RING_ID_GET(owner, num)      (((owner) << 6) | (num))
 #define XGENE_DMA_RING_DST_ID(v)               ((1 << 10) | (v))
 #define XGENE_DMA_RING_CMD_OFFSET              0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
        return flyby_type[src_cnt];
 }
 
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
-       u32 __iomem *cmd_base = ring->cmd_base;
-       u32 ring_state = ioread32(&cmd_base[1]);
-
-       return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
 static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
                                     dma_addr_t *paddr)
 {
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
        dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
 }
 
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
-                                  struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+                                   struct xgene_dma_desc_sw *desc_sw)
 {
+       struct xgene_dma_ring *ring = &chan->tx_ring;
        struct xgene_dma_desc_hw *desc_hw;
 
-       /* Check if can push more descriptor to hw for execution */
-       if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
-               return -EBUSY;
-
        /* Get hw descriptor from DMA tx ring */
        desc_hw = &ring->desc_hw[ring->head];
 
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
                memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
        }
 
+       /* Increment the pending transaction count */
+       chan->pending += ((desc_sw->flags &
+                         XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
        /* Notify the hw that we have descriptor ready for execution */
        iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
                  2 : 1, ring->cmd);
-
-       return 0;
 }
 
 /**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
 static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
 {
        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
-       int ret;
 
        /*
         * If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
                if (chan->pending >= chan->max_outstanding)
                        return;
 
-               ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
-               if (ret)
-                       return;
+               xgene_chan_xfer_request(chan, desc_sw);
 
                /*
                 * Delete this element from ld pending queue and append it to
                 * ld running queue
                 */
                list_move_tail(&desc_sw->node, &chan->ld_running);
-
-               /* Increment the pending transaction count */
-               chan->pending++;
        }
 }
 
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
                 * Decrement the pending transaction count
                 * as we have processed one
                 */
-               chan->pending--;
+               chan->pending -= ((desc_sw->flags &
+                                 XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
 
                /*
                 * Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
                                     struct xgene_dma_ring *ring,
                                     enum xgene_dma_ring_cfgsize cfgsize)
 {
+       int ret;
+
        /* Setup DMA ring descriptor variables */
        ring->pdma = chan->pdma;
        ring->cfgsize = cfgsize;
        ring->num = chan->pdma->ring_num++;
        ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
 
-       ring->size = xgene_dma_get_ring_size(chan, cfgsize);
-       if (ring->size <= 0)
-               return ring->size;
+       ret = xgene_dma_get_ring_size(chan, cfgsize);
+       if (ret <= 0)
+               return ret;
+       ring->size = ret;
 
        /* Allocate memory for DMA ring descriptor */
        ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
                 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
 
        /* Set the max outstanding request possible to this channel */
-       chan->max_outstanding = rx_ring->slots;
+       chan->max_outstanding = tx_ring->slots;
 
        return ret;
 }
index 39915a6b7986e2fba00d285370f57f27ed3eeb9a..c017fcd8e07c29b65b7a480a1b817b4645c40f33 100644 (file)
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
        struct dma_chan *chan;
        struct zx_dma_chan *c;
 
-       if (request > d->dma_requests)
+       if (request >= d->dma_requests)
                return NULL;
 
        chan = dma_get_any_slave_channel(&d->slave);
index 6fd3da938717c27c233af9c6b66b186f69fc93fa..413fcf2970c0c01b3ab825b1cccd5895986b0c47 100644 (file)
@@ -1,6 +1,14 @@
 #
 # Makefile for linux kernel
 #
+
+#
+# ARM64 maps efi runtime services in userspace addresses
+# which don't have KASAN shadow. So dereference of these addresses
+# in efi_call_virt() will cause crash if this code instrumented.
+#
+KASAN_SANITIZE_runtime-wrappers.o      := n
+
 obj-$(CONFIG_EFI)                      += efi.o vars.o reboot.o
 obj-$(CONFIG_EFI_VARS)                 += efivars.o
 obj-$(CONFIG_EFI_ESRT)                 += esrt.o
index 816dbe9f4b82e68314f5c5408b52649e4b9b4383..bca9a76cbd332bace4d303457010b64eaa3d2b5f 100644 (file)
@@ -14,6 +14,8 @@ cflags-$(CONFIG_ARM64)                := $(subst -pg,,$(KBUILD_CFLAGS))
 cflags-$(CONFIG_ARM)           := $(subst -pg,,$(KBUILD_CFLAGS)) \
                                   -fno-builtin -fpic -mno-single-pic-base
 
+cflags-$(CONFIG_EFI_ARMSTUB)   += -I$(srctree)/scripts/dtc/libfdt
+
 KBUILD_CFLAGS                  := $(cflags-y) \
                                   $(call cc-option,-ffreestanding) \
                                   $(call cc-option,-fno-stack-protector)
@@ -22,7 +24,15 @@ GCOV_PROFILE                 := n
 KASAN_SANITIZE                 := n
 
 lib-y                          := efi-stub-helper.o
-lib-$(CONFIG_EFI_ARMSTUB)      += arm-stub.o fdt.o
+
+# include the stub's generic dependencies from lib/ when building for ARM/arm64
+arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
+
+$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
+       $(call if_changed_rule,cc_o_c)
+
+lib-$(CONFIG_EFI_ARMSTUB)      += arm-stub.o fdt.o string.o \
+                                  $(patsubst %.c,lib-%.o,$(arm-deps))
 
 #
 # arm64 puts the stub in the kernel proper, which will unnecessarily retain all
@@ -30,10 +40,27 @@ lib-$(CONFIG_EFI_ARMSTUB)   += arm-stub.o fdt.o
 # So let's apply the __init annotations at the section level, by prefixing
 # the section names directly. This will ensure that even all the inline string
 # literals are covered.
+# The fact that the stub and the kernel proper are essentially the same binary
+# also means that we need to be extra careful to make sure that the stub does
+# not rely on any absolute symbol references, considering that the virtual
+# kernel mapping that the linker uses is not active yet when the stub is
+# executing. So build all C dependencies of the EFI stub into libstub, and do
+# a verification pass to see if any absolute relocations exist in any of the
+# object files.
 #
-extra-$(CONFIG_ARM64)          := $(lib-y)
-lib-$(CONFIG_ARM64)            := $(patsubst %.o,%.init.o,$(lib-y))
+extra-$(CONFIG_EFI_ARMSTUB)    := $(lib-y)
+lib-$(CONFIG_EFI_ARMSTUB)      := $(patsubst %.o,%.stub.o,$(lib-y))
+
+STUBCOPY_FLAGS-y               := -R .debug* -R *ksymtab*
+STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
+                                  --prefix-symbols=__efistub_
+STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
+
+$(obj)/%.stub.o: $(obj)/%.o FORCE
+       $(call if_changed,stubcopy)
 
-OBJCOPYFLAGS := --prefix-alloc-sections=.init
-$(obj)/%.init.o: $(obj)/%.o FORCE
-       $(call if_changed,objcopy)
+quiet_cmd_stubcopy = STUBCPY $@
+      cmd_stubcopy = if $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; then     \
+                    $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y)        \
+                    && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
+                        rm -f $@; /bin/false); else /bin/false; fi
index e29560e6b40b0e5f28a141e7c88ceba1bdfa22ff..950c87f5d279335210088e4154eda135b24304d5 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/efi.h>
+#include <linux/sort.h>
 #include <asm/efi.h>
 
 #include "efistub.h"
@@ -305,6 +306,44 @@ fail:
  */
 #define EFI_RT_VIRTUAL_BASE    0x40000000
 
+static int cmp_mem_desc(const void *l, const void *r)
+{
+       const efi_memory_desc_t *left = l, *right = r;
+
+       return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+                                efi_memory_desc_t *right)
+{
+       u64 left_end;
+
+       if (left == NULL || right == NULL)
+               return false;
+
+       left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+       return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+                                                     efi_memory_desc_t *right)
+{
+       static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+                                        EFI_MEMORY_WC | EFI_MEMORY_UC |
+                                        EFI_MEMORY_RUNTIME;
+
+       return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
 /*
  * efi_get_virtmap() - create a virtual mapping for the EFI memory map
  *
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
                     int *count)
 {
        u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
-       efi_memory_desc_t *out = runtime_map;
+       efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
        int l;
 
-       for (l = 0; l < map_size; l += desc_size) {
-               efi_memory_desc_t *in = (void *)memory_map + l;
+       /*
+        * To work around potential issues with the Properties Table feature
+        * introduced in UEFI 2.5, which may split PE/COFF executable images
+        * in memory into several RuntimeServicesCode and RuntimeServicesData
+        * regions, we need to preserve the relative offsets between adjacent
+        * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+        * The easiest way to find adjacent regions is to sort the memory map
+        * before traversing it.
+        */
+       sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+       for (l = 0; l < map_size; l += desc_size, prev = in) {
                u64 paddr, size;
 
+               in = (void *)memory_map + l;
                if (!(in->attribute & EFI_MEMORY_RUNTIME))
                        continue;
 
+               paddr = in->phys_addr;
+               size = in->num_pages * EFI_PAGE_SIZE;
+
                /*
                 * Make the mapping compatible with 64k pages: this allows
                 * a 4k page size kernel to kexec a 64k page size kernel and
                 * vice versa.
                 */
-               paddr = round_down(in->phys_addr, SZ_64K);
-               size = round_up(in->num_pages * EFI_PAGE_SIZE +
-                               in->phys_addr - paddr, SZ_64K);
-
-               /*
-                * Avoid wasting memory on PTEs by choosing a virtual base that
-                * is compatible with section mappings if this region has the
-                * appropriate size and physical alignment. (Sections are 2 MB
-                * on 4k granule kernels)
-                */
-               if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
-                       efi_virt_base = round_up(efi_virt_base, SZ_2M);
+               if (!regions_are_adjacent(prev, in) ||
+                   !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+                       paddr = round_down(in->phys_addr, SZ_64K);
+                       size += in->phys_addr - paddr;
+
+                       /*
+                        * Avoid wasting memory on PTEs by choosing a virtual
+                        * base that is compatible with section mappings if this
+                        * region has the appropriate size and physical
+                        * alignment. (Sections are 2 MB on 4k granule kernels)
+                        */
+                       if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+                               efi_virt_base = round_up(efi_virt_base, SZ_2M);
+                       else
+                               efi_virt_base = round_up(efi_virt_base, SZ_64K);
+               }
 
                in->virt_addr = efi_virt_base + in->phys_addr - paddr;
                efi_virt_base += size;
index ef5d764e2a27ea506775e7c117dc291b9749ded1..b62e2f5dcab3b2d95074b534de803915145dc20c 100644 (file)
@@ -147,15 +147,6 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        if (status)
                goto fdt_set_fail;
 
-       /*
-        * Add kernel version banner so stub/kernel match can be
-        * verified.
-        */
-       status = fdt_setprop_string(fdt, node, "linux,uefi-stub-kern-ver",
-                            linux_banner);
-       if (status)
-               goto fdt_set_fail;
-
        return EFI_SUCCESS;
 
 fdt_set_fail:
diff --git a/drivers/firmware/efi/libstub/string.c b/drivers/firmware/efi/libstub/string.c
new file mode 100644 (file)
index 0000000..09d5a08
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Taken from:
+ *  linux/lib/string.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#ifndef __HAVE_ARCH_STRSTR
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char *strstr(const char *s1, const char *s2)
+{
+       size_t l1, l2;
+
+       l2 = strlen(s2);
+       if (!l2)
+               return (char *)s1;
+       l1 = strlen(s1);
+       while (l1 >= l2) {
+               l1--;
+               if (!memcmp(s1, s2, l2))
+                       return (char *)s1;
+               s1++;
+       }
+       return NULL;
+}
+#endif
+
+#ifndef __HAVE_ARCH_STRNCMP
+/**
+ * strncmp - Compare two length-limited strings
+ * @cs: One string
+ * @ct: Another string
+ * @count: The maximum number of bytes to compare
+ */
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+       unsigned char c1, c2;
+
+       while (count) {
+               c1 = *cs++;
+               c2 = *ct++;
+               if (c1 != c2)
+                       return c1 < c2 ? -1 : 1;
+               if (!c1)
+                       break;
+               count--;
+       }
+       return 0;
+}
+#endif
index 1c3fc99c5465bd10489ac1b31e17484426b7adb9..8e995148f56e263ecde7e5e7a390645b585f2c52 100644 (file)
@@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
        return ret;
 }
 
-static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
-                                    cgs_handle_t *handle)
-{
-       CGS_FUNC_ADEV;
-       int r;
-       uint32_t dma_handle;
-       struct drm_gem_object *obj;
-       struct amdgpu_bo *bo;
-       struct drm_device *dev = adev->ddev;
-       struct drm_file *file_priv = NULL, *priv;
-
-       mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(priv, &dev->filelist, lhead) {
-               rcu_read_lock();
-               if (priv->pid == get_pid(task_pid(current)))
-                       file_priv = priv;
-               rcu_read_unlock();
-               if (file_priv)
-                       break;
-       }
-       mutex_unlock(&dev->struct_mutex);
-       r = dev->driver->prime_fd_to_handle(dev,
-                                           file_priv, dmabuf_fd,
-                                           &dma_handle);
-       spin_lock(&file_priv->table_lock);
-
-       /* Check if we currently have a reference on the object */
-       obj = idr_find(&file_priv->object_idr, dma_handle);
-       if (obj == NULL) {
-               spin_unlock(&file_priv->table_lock);
-               return -EINVAL;
-       }
-       spin_unlock(&file_priv->table_lock);
-       bo = gem_to_amdgpu_bo(obj);
-       *handle = (cgs_handle_t)bo;
-       return 0;
-}
-
 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
 {
        struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
 };
 
 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
-       amdgpu_cgs_import_gpu_mem,
        amdgpu_cgs_add_irq_source,
        amdgpu_cgs_irq_get,
        amdgpu_cgs_irq_put
index 749420f1ea6fbf2bc1417cfd5ea0210cf3c6243d..cb3c274edb0a6b23a9b830f1ca923cf07ccc45f2 100644 (file)
@@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
        uint64_t *chunk_array_user;
        uint64_t *chunk_array;
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
-       unsigned size, i;
+       unsigned size;
+       int i;
        int ret;
 
        if (cs->in.num_chunks == 0)
index cd6edc40c9cd0c3dc2f09e96a2207f35e8b53d28..1e0bba29e16796f97c8eee38fc9d3100c405f6bc 100644 (file)
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
                        amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
                }
                if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-                       amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
-                                                              ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+                       amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
                if (ext_encoder)
                        amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
        } else {
index 774528ab8704387f00525618b1c48578387b70df..fab5471d25d7e3dc3a3605d22c52fd669e919f7b 100644 (file)
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 9a07742620d0361ad054930ff3b07c75c9bbcf2c..7bc9e9fcf3d26cbbaa6d7aa76fbef0349964ec6f 100644 (file)
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
        status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
        mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+       /* reset addr and status */
+       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+       if (!addr && !status)
+               return 0;
+
        dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
                entry->src_id, entry->src_data);
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
        dev_err(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                status);
        gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
-       /* reset addr and status */
-       WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
 
        return 0;
 }
index 488642f08267902c628c9db541030c6a27cb2def..3b47ae313e36bc7a0385365c082a519207fd7470 100644 (file)
 
 #include "cgs_common.h"
 
-/**
- * cgs_import_gpu_mem() - Import dmabuf handle
- * @cgs_device:  opaque device handle
- * @dmabuf_fd:   DMABuf file descriptor
- * @handle:      memory handle (output)
- *
- * Must be called in the process context that dmabuf_fd belongs to.
- *
- * Return:  0 on success, -errno otherwise
- */
-typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
-                                   cgs_handle_t *handle);
-
 /**
  * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
  * @private_data:  private data provided to cgs_add_irq_source
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
 typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
 
 struct cgs_os_ops {
-       cgs_import_gpu_mem_t import_gpu_mem;
-
        /* IRQ handling */
        cgs_add_irq_source_t add_irq_source;
        cgs_irq_get_t irq_get;
        cgs_irq_put_t irq_put;
 };
 
-#define cgs_import_gpu_mem(dev,dmabuf_fd,handle)               \
-       CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
 #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
        CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler,    \
                    private_data)
index e23df5fd3836b1b70169a1ee125782c1e754b875..bf27a07dbce36993e7ed668ff567f5565a6e0995 100644 (file)
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_dp_mst_port *port,
                                  int offset, int size, u8 *bytes);
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-                                   struct drm_dp_mst_branch *mstb);
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+                                    struct drm_dp_mst_branch *mstb);
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
                                           struct drm_dp_mst_branch *mstb,
                                           struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
 
-       cancel_work_sync(&mstb->mgr->work);
-
        /*
         * destroy all ports - don't need lock
         * as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
 {
        struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
        struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+
        if (!port->input) {
                port->vcpi.num_slots = 0;
 
                kfree(port->cached_edid);
 
-               /* we can't destroy the connector here, as
-                  we might be holding the mode_config.mutex
-                  from an EDID retrieval */
+               /*
+                * The only time we don't have a connector
+                * on an output port is if the connector init
+                * fails.
+                */
                if (port->connector) {
+                       /* we can't destroy the connector here, as
+                        * we might be holding the mode_config.mutex
+                        * from an EDID retrieval */
+
                        mutex_lock(&mgr->destroy_connector_lock);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
                        return;
                }
+               /* no need to clean up vcpi
+                * as if we have no connector we never setup a vcpi */
                drm_dp_port_teardown_pdt(port, port->pdt);
-
-               if (!port->input && port->vcpi.vcpi > 0)
-                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
        }
        kfree(port);
-
-       (*mgr->cbs->hotplug)(mgr);
 }
 
 static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
        }
 }
 
-static void build_mst_prop_path(struct drm_dp_mst_port *port,
-                               struct drm_dp_mst_branch *mstb,
+static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+                               int pnum,
                                char *proppath,
                                size_t proppath_size)
 {
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
                snprintf(temp, sizeof(temp), "-%d", port_num);
                strlcat(proppath, temp, proppath_size);
        }
-       snprintf(temp, sizeof(temp), "-%d", port->port_num);
+       snprintf(temp, sizeof(temp), "-%d", pnum);
        strlcat(proppath, temp, proppath_size);
 }
 
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                drm_dp_port_teardown_pdt(port, old_pdt);
 
                ret = drm_dp_port_setup_pdt(port);
-               if (ret == true) {
+               if (ret == true)
                        drm_dp_send_link_address(mstb->mgr, port->mstb);
-                       port->mstb->link_address_sent = true;
-               }
        }
 
        if (created && !port->input) {
                char proppath[255];
-               build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
-               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
 
-               if (port->port_num >= 8) {
+               build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+               port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+               if (!port->connector) {
+                       /* remove it from the port list */
+                       mutex_lock(&mstb->mgr->lock);
+                       list_del(&port->next);
+                       mutex_unlock(&mstb->mgr->lock);
+                       /* drop port list reference */
+                       drm_dp_put_port(port);
+                       goto out;
+               }
+               if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
                        port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+                       drm_mode_connector_set_tile_property(port->connector);
                }
+               (*mstb->mgr->cbs->register_connector)(port->connector);
        }
 
+out:
        /* put reference to this port */
        drm_dp_put_port(port);
 }
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
 {
        struct drm_dp_mst_port *port;
        struct drm_dp_mst_branch *mstb_child;
-       if (!mstb->link_address_sent) {
+       if (!mstb->link_address_sent)
                drm_dp_send_link_address(mgr, mstb);
-               mstb->link_address_sent = true;
-       }
+
        list_for_each_entry(port, &mstb->ports, next) {
                if (port->input)
                        continue;
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
        mutex_unlock(&mgr->qlock);
 }
 
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
-                                   struct drm_dp_mst_branch *mstb)
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+                                    struct drm_dp_mst_branch *mstb)
 {
        int len;
        struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg)
-               return -ENOMEM;
+               return;
 
        txmsg->dst = mstb;
        len = build_link_address(txmsg);
 
+       mstb->link_address_sent = true;
        drm_dp_queue_down_tx(mgr, txmsg);
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
                        }
                        (*mgr->cbs->hotplug)(mgr);
                }
-       } else
+       } else {
+               mstb->link_address_sent = false;
                DRM_DEBUG_KMS("link address failed %d\n", ret);
+       }
 
        kfree(txmsg);
-       return 0;
 }
 
 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
        drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
                           DP_MST_EN | DP_UPSTREAM_IS_SRC);
        mutex_unlock(&mgr->lock);
+       flush_work(&mgr->work);
+       flush_work(&mgr->destroy_connector_work);
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
 
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
 
        if (port->cached_edid)
                edid = drm_edid_duplicate(port->cached_edid);
-       else
+       else {
                edid = drm_get_edid(connector, &port->aux.ddc);
-
-       drm_mode_connector_set_tile_property(connector);
+               drm_mode_connector_set_tile_property(connector);
+       }
        drm_dp_put_port(port);
        return edid;
 }
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
        struct drm_dp_mst_port *port;
-
+       bool send_hotplug = false;
        /*
         * Not a regular list traverse as we have to drop the destroy
         * connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                if (!port->input && port->vcpi.vcpi > 0)
                        drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
                kfree(port);
+               send_hotplug = true;
        }
+       if (send_hotplug)
+               (*mgr->cbs->hotplug)(mgr);
 }
 
 /**
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
  */
 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
 {
+       flush_work(&mgr->work);
        flush_work(&mgr->destroy_connector_work);
        mutex_lock(&mgr->payload_lock);
        kfree(mgr->payloads);
index 418d299f3b129b307f86a970fdf49d3a826af4c8..ca08c472311bd3f6238f7513bc4ac26737228884 100644 (file)
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
                struct drm_crtc *crtc = mode_set->crtc;
                int ret;
 
-               if (crtc->funcs->cursor_set) {
+               if (crtc->funcs->cursor_set2) {
+                       ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+                       if (ret)
+                               error = true;
+               } else if (crtc->funcs->cursor_set) {
                        ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
                        if (ret)
                                error = true;
index d734780b31c0fdcd67d2d622333b0a78c8098e10..a18164f2f6d28290c09462dd7a755168873a42d3 100644 (file)
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 }
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+/**
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
+ *
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
+ */
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
 {
        bool poll = false;
        struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
        if (poll)
                schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
 }
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+
 
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
                                                              uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 
        /* Re-enable polling in case the global poll config changed. */
        if (drm_kms_helper_poll != dev->mode_config.poll_running)
-               __drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
 
        dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
        mutex_lock(&dev->mode_config.mutex);
-       __drm_kms_helper_poll_enable(dev);
+       drm_kms_helper_poll_enable_locked(dev);
        mutex_unlock(&dev->mode_config.mutex);
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_enable);
index cbdb78ef3baca57cd1ddf701425b953f140377f2..e6cbaca821a47b9740f4d35d7cfce00bbd0aa11a 100644 (file)
@@ -37,7 +37,6 @@
  * DECON stands for Display and Enhancement controller.
  */
 
-#define DECON_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 #define WINDOWS_NR     2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
        return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       if (adjusted_mode->vrefresh == 0)
-               adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
-
-       return true;
-}
-
 static void decon_commit(struct exynos_drm_crtc *crtc)
 {
        struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .enable = decon_enable,
        .disable = decon_disable,
-       .mode_fixup = decon_mode_fixup,
        .commit = decon_commit,
        .enable_vblank = decon_enable_vblank,
        .disable_vblank = decon_disable_vblank,
index d66ade0efac892b84ce7e26c4f9e132870806d33..124fb9a56f02b596b5a6c4cf6cbf3f28151f4470 100644 (file)
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_dp_suspend(struct device *dev)
-{
-       struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-       exynos_dp_disable(&dp->encoder);
-       return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
-       struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
-       exynos_dp_enable(&dp->encoder);
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
-};
-
 static const struct of_device_id exynos_dp_match[] = {
        { .compatible = "samsung,exynos5-dp" },
        {},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
        .driver         = {
                .name   = "exynos-dp",
                .owner  = THIS_MODULE,
-               .pm     = &exynos_dp_pm_ops,
                .of_match_table = exynos_dp_match,
        },
 };
index c68a6a2a9b5794558015abdeefb0e58c4958049d..7f55ba6771c6b94e5f45bee6bdec078c27c74f5b 100644 (file)
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
 
 int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
 {
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
 
 int exynos_drm_device_subdrv_probe(struct drm_device *dev)
 {
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
 
 int exynos_drm_device_subdrv_remove(struct drm_device *dev)
 {
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
 
 int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
 {
@@ -111,7 +107,6 @@ err:
        }
        return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
 
 void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
 {
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
                        subdrv->close(dev, subdrv->dev, file);
        }
 }
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
index 0872aa2f450f273a992bc414081e8501e11bf787..ed28823d3b35ef704a5dded0c1c55c1eff6ef3ed 100644 (file)
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
                exynos_crtc->ops->disable(exynos_crtc);
 }
 
-static bool
-exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
-                           const struct drm_display_mode *mode,
-                           struct drm_display_mode *adjusted_mode)
-{
-       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
-       if (exynos_crtc->ops->mode_fixup)
-               return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
-                                                   adjusted_mode);
-
-       return true;
-}
-
 static void
 exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 {
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
 static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
        .enable         = exynos_drm_crtc_enable,
        .disable        = exynos_drm_crtc_disable,
-       .mode_fixup     = exynos_drm_crtc_mode_fixup,
        .mode_set_nofb  = exynos_drm_crtc_mode_set_nofb,
        .atomic_begin   = exynos_crtc_atomic_begin,
        .atomic_flush   = exynos_crtc_atomic_flush,
index 831d2e4cacf9d0bb951f5bbd9d7bbfab4b681c91..ae9e6b2d3758a97104ac6be69f1a970e6c0f3bb6 100644 (file)
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
 {
        struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
 
        return 0;
 }
+#endif
 
 static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
 {
index b7ba21dfb69641f36410550711f024fde541bb72..6c717ba672dbc8ad41fcc3adedfc82c32c6de5bc 100644 (file)
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
  *
  * @enable: enable the device
  * @disable: disable the device
- * @mode_fixup: fix mode data before applying it
  * @commit: set current hw specific display mode to hw.
  * @enable_vblank: specific driver callback for enabling vblank interrupt.
  * @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
        void (*enable)(struct exynos_drm_crtc *crtc);
        void (*disable)(struct exynos_drm_crtc *crtc);
-       bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
-                               const struct drm_display_mode *mode,
-                               struct drm_display_mode *adjusted_mode);
        void (*commit)(struct exynos_drm_crtc *crtc);
        int (*enable_vblank)(struct exynos_drm_crtc *crtc);
        void (*disable_vblank)(struct exynos_drm_crtc *crtc);
index 2a652359af644b51f257cde7528d70b6016897da..dd3a5e6d58c8f04c43fb8afd7c7b6243f7312761 100644 (file)
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
        .set_addr = fimc_dst_set_addr,
 };
 
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
-       DRM_DEBUG_KMS("enable[%d]\n", enable);
-
-       if (enable) {
-               clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
-               clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
-               ctx->suspended = false;
-       } else {
-               clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
-               clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
-               ctx->suspended = true;
-       }
-
-       return 0;
-}
-
 static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
 {
        struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+       DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+       if (enable) {
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+               clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+               ctx->suspended = false;
+       } else {
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+               clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int fimc_suspend(struct device *dev)
 {
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
 static int fimc_runtime_suspend(struct device *dev)
 {
        struct fimc_context *ctx = get_fimc_context(dev);
index 750a9e6b9e8d92c312e3bb685fb524f249ad0488..3d1aba67758baf4a4e25e4c383e300d5a6dd954e 100644 (file)
@@ -41,7 +41,6 @@
  * CPU Interface.
  */
 
-#define FIMD_DEFAULT_FRAMERATE 60
 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
 
 /* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
        return (clkdiv < 0x100) ? clkdiv : 0xff;
 }
 
-static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
-               const struct drm_display_mode *mode,
-               struct drm_display_mode *adjusted_mode)
-{
-       if (adjusted_mode->vrefresh == 0)
-               adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
-
-       return true;
-}
-
 static void fimd_commit(struct exynos_drm_crtc *crtc)
 {
        struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
                return;
 
        val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
-       writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+       writel(val, ctx->regs + DP_MIE_CLKCON);
 }
 
 static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .enable = fimd_enable,
        .disable = fimd_disable,
-       .mode_fixup = fimd_mode_fixup,
        .commit = fimd_commit,
        .enable_vblank = fimd_enable_vblank,
        .disable_vblank = fimd_disable_vblank,
index 3734c34aed16a22938509454cf794e5b4069ac35..c17efdb238a6e24f6fcecac58c395b8964b12703 100644 (file)
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
 
 int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
                                 struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
        g2d_put_cmdlist(g2d, node);
        return ret;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
 
 int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
                          struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
 out:
        return 0;
 }
-EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
 
 static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
index f12fbc36b120065902c50253a4e91e9cc8952df5..407afedb60031a00f7f0cc5cb599cf7b3e57a9b8 100644 (file)
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
        nr_pages = obj->size >> PAGE_SHIFT;
 
        if (!is_drm_iommu_supported(dev)) {
-               dma_addr_t start_addr;
-               unsigned int i = 0;
-
                obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
                if (!obj->pages) {
                        DRM_ERROR("failed to allocate pages.\n");
                        return -ENOMEM;
                }
+       }
 
-               obj->cookie = dma_alloc_attrs(dev->dev,
-                                       obj->size,
-                                       &obj->dma_addr, GFP_KERNEL,
-                                       &obj->dma_attrs);
-               if (!obj->cookie) {
-                       DRM_ERROR("failed to allocate buffer.\n");
+       obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
+                                     GFP_KERNEL, &obj->dma_attrs);
+       if (!obj->cookie) {
+               DRM_ERROR("failed to allocate buffer.\n");
+               if (obj->pages)
                        drm_free_large(obj->pages);
-                       return -ENOMEM;
-               }
+               return -ENOMEM;
+       }
+
+       if (obj->pages) {
+               dma_addr_t start_addr;
+               unsigned int i = 0;
 
                start_addr = obj->dma_addr;
                while (i < nr_pages) {
-                       obj->pages[i] = phys_to_page(start_addr);
+                       obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
+                                                              start_addr));
                        start_addr += PAGE_SIZE;
                        i++;
                }
        } else {
-               obj->pages = dma_alloc_attrs(dev->dev, obj->size,
-                                       &obj->dma_addr, GFP_KERNEL,
-                                       &obj->dma_attrs);
-               if (!obj->pages) {
-                       DRM_ERROR("failed to allocate buffer.\n");
-                       return -ENOMEM;
-               }
+               obj->pages = obj->cookie;
        }
 
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)obj->dma_addr, obj->size);
 
-       if (!is_drm_iommu_supported(dev)) {
-               dma_free_attrs(dev->dev, obj->size, obj->cookie,
-                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
-               drm_free_large(obj->pages);
-       } else
-               dma_free_attrs(dev->dev, obj->size, obj->pages,
-                               (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+       dma_free_attrs(dev->dev, obj->size, obj->cookie,
+                       (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
 
-       obj->dma_addr = (dma_addr_t)NULL;
+       if (!is_drm_iommu_supported(dev))
+               drm_free_large(obj->pages);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
         * once dmabuf's refcount becomes 0.
         */
        if (obj->import_attach)
-               goto out;
-
-       exynos_drm_free_buf(exynos_gem_obj);
-
-out:
-       drm_gem_free_mmap_offset(obj);
+               drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
+       else
+               exynos_drm_free_buf(exynos_gem_obj);
 
        /* release file pointer to gem object. */
        drm_gem_object_release(obj);
 
        kfree(exynos_gem_obj);
-       exynos_gem_obj = NULL;
 }
 
 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
        return exynos_gem_obj->size;
 }
 
-
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
                                                      unsigned long size)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
                return ERR_PTR(ret);
        }
 
+       ret = drm_gem_create_mmap_offset(obj);
+       if (ret < 0) {
+               drm_gem_object_release(obj);
+               kfree(exynos_gem_obj);
+               return ERR_PTR(ret);
+       }
+
        DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
 
        return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
        drm_gem_object_unreference_unlocked(obj);
 }
 
-int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
+static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
                                      struct vm_area_struct *vma)
 {
        struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
 
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv)
-{      struct exynos_drm_gem_obj *exynos_gem_obj;
+{
+       struct exynos_drm_gem_obj *exynos_gem_obj;
        struct drm_exynos_gem_info *args = data;
        struct drm_gem_object *obj;
 
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
                               struct drm_mode_create_dumb *args)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
+       unsigned int flags;
        int ret;
 
        /*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
        args->pitch = args->width * ((args->bpp + 7) / 8);
        args->size = args->pitch * args->height;
 
-       if (is_drm_iommu_supported(dev)) {
-               exynos_gem_obj = exynos_drm_gem_create(dev,
-                       EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
-                       args->size);
-       } else {
-               exynos_gem_obj = exynos_drm_gem_create(dev,
-                       EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
-                       args->size);
-       }
+       if (is_drm_iommu_supported(dev))
+               flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
+       else
+               flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
 
+       exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
        if (IS_ERR(exynos_gem_obj)) {
                dev_warn(dev->dev, "FB allocation failed.\n");
                return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
                goto unlock;
        }
 
-       ret = drm_gem_create_mmap_offset(obj);
-       if (ret)
-               goto out;
-
        *offset = drm_vma_node_offset_addr(&obj->vma_node);
        DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
 
-out:
        drm_gem_object_unreference(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
 err_close_vm:
        drm_gem_vm_close(vma);
-       drm_gem_free_mmap_offset(obj);
 
        return ret;
 }
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
        if (ret < 0)
                goto err_free_large;
 
+       exynos_gem_obj->sgt = sgt;
+
        if (sgt->nents == 1) {
                /* always physically continuous memory if sgt->nents is 1. */
                exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
index cd62f8410d1e5d86f6dd221aebf924be1fe71db9..b62d1007c0e05f88e4cfc38970bb7baa9a87fe7a 100644 (file)
@@ -39,6 +39,7 @@
  *     - this address could be physical address without IOMMU and
  *     device address with IOMMU.
  * @pages: Array of backing pages.
+ * @sgt: Imported sg_table.
  *
  * P.S. this object would be transferred to user as kms_bo.handle so
  *     user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
        dma_addr_t              dma_addr;
        struct dma_attrs        dma_attrs;
        struct page             **pages;
+       struct sg_table         *sgt;
 };
 
 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
 /* destroy a buffer with gem object */
 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
 
-/* create a private gem object and initialize it. */
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
-                                                     unsigned long size);
-
 /* create a new buffer with gem object */
 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
                                                unsigned int flags,
index 425e7062538812c0613c055b9b4fdeea709c4be0..2f5c118f4c8ef5ea27a68532756ca77549e325f4 100644 (file)
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int rotator_clk_crtl(struct rot_context *rot, bool enable)
 {
        if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
 static int rotator_runtime_suspend(struct device *dev)
 {
        struct rot_context *rot = dev_get_drvdata(dev);
index 3e4be5a3becdddf9fd2a23e6be26f02da90a28f2..6ade068884328680ffe024dd91eabb9ffe6d9013 100644 (file)
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
 
        drm_mode_connector_set_path_property(connector, pathprop);
+       return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_device *dev = connector->dev;
        drm_modeset_lock_all(dev);
        intel_connector_add_to_fbdev(intel_connector);
        drm_modeset_unlock_all(dev);
        drm_connector_register(&intel_connector->base);
-       return connector;
 }
 
 static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 
 static struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = intel_dp_add_mst_connector,
+       .register_connector = intel_dp_register_mst_connector,
        .destroy_connector = intel_dp_destroy_mst_connector,
        .hotplug = intel_dp_mst_hotplug,
 };
index 53c0173a39fe182d5d2e50ac2ffc6637f77526fd..b17785719598c9ca867340dc77aaed858f0c72db 100644 (file)
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 
        /* Enable polling and queue hotplug re-enabling. */
        if (hpd_disabled) {
-               drm_kms_helper_poll_enable(dev);
+               drm_kms_helper_poll_enable_locked(dev);
                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
        }
index 72e0edd7bbde77d3b12812bead2a3f676589385a..7412caedcf7f98a2a5e494c41e2bad97f34d4e34 100644 (file)
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
        read_pointer = ring->next_context_status_buffer;
-       write_pointer = status_pointer & 0x07;
+       write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
        if (read_pointer > write_pointer)
-               write_pointer += 6;
+               write_pointer += GEN8_CSB_ENTRIES;
 
        spin_lock(&ring->execlist_lock);
 
        while (read_pointer < write_pointer) {
                read_pointer++;
                status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % 6) * 8);
+                               (read_pointer % GEN8_CSB_ENTRIES) * 8);
                status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
-                               (read_pointer % 6) * 8 + 4);
+                               (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
 
                if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
                        continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        spin_unlock(&ring->execlist_lock);
 
        WARN(submit_contexts > 2, "More than two context complete events?\n");
-       ring->next_context_status_buffer = write_pointer % 6;
+       ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
        I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-                  _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
+                  _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+                                ((u32)ring->next_context_status_buffer &
+                                 GEN8_CSB_PTR_MASK) << 8));
 }
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u8 next_context_status_buffer_hw;
 
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
        POSTING_READ(RING_MODE_GEN7(ring));
-       ring->next_context_status_buffer = 0;
+
+       /*
+        * Instead of resetting the Context Status Buffer (CSB) read pointer to
+        * zero, we need to read the write pointer from hardware and use its
+        * value because "this register is power context save restored".
+        * Effectively, these states have been observed:
+        *
+        *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+        * BDW  | CSB regs not reset       | CSB regs reset       |
+        * CHT  | CSB regs not reset       | CSB regs not reset   |
+        */
+       next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+                                                  & GEN8_CSB_PTR_MASK);
+
+       /*
+        * When the CSB registers are reset (also after power-up / gpu reset),
+        * CSB write pointer is set to all 1's, which is not valid, use '5' in
+        * this special case, so the first element read is CSB[0].
+        */
+       if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+               next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+       ring->next_context_status_buffer = next_context_status_buffer_hw;
        DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
 
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
index 64f89f9982a20f745cb41ef5950cd637437000ca..3c63bb32ad81c657e418b1b7143ed934d036c66f 100644 (file)
@@ -25,6 +25,8 @@
 #define _INTEL_LRC_H_
 
 #define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
 
 /* Execlists regs */
 #define RING_ELSP(ring)                        ((ring)->mmio_base+0x230)
index af7fdb3bd663aef062a5cd41a2cdacbb4492515d..7401cf90b0dbcd1eb335e0c6defc42d22c9bb631 100644 (file)
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
        }
 
        if (power_well->data == SKL_DISP_PW_1) {
-               intel_prepare_ddi(dev);
+               if (!dev_priv->power_domains.initializing)
+                       intel_prepare_ddi(dev);
                gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
        }
 }
index dd845f82cc24aaa5b46cf5680ffd607d3a8ebb2b..4649bd2ed3401ae74049798ab591c8b2e2e58779 100644 (file)
@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
                  adjusted_mode->hdisplay,
                  adjusted_mode->vdisplay);
 
-       if (qcrtc->index == 0)
+       if (bo->is_primary == false)
                recreate_primary = true;
 
        if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
index c3872598b85a3856787b1bf0b7113633a468e020..65adb9c723772d9f0011573d320182823b18d37a 100644 (file)
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
                } else
                        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-                       args.ucAction = ATOM_LCD_BLON;
-                       atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+                       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+                       atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
                }
                break;
        case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                                atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
                }
                if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-                       atombios_dig_transmitter_setup(encoder,
-                                                      ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+                       atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
                if (ext_encoder)
                        atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
                break;
index 5e09c061847f50c688650d12625a462e8c4737cd..6cddae44fa6e4cafa59de1b3d7528b82959b6fd9 100644 (file)
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
 {
        struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
        struct drm_device *dev = master->base.dev;
-       struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector;
        struct drm_connector *connector;
 
@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
        drm_mode_connector_set_path_property(connector, pathprop);
 
+       return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+
        drm_modeset_lock_all(dev);
        radeon_fb_add_connector(rdev, connector);
        drm_modeset_unlock_all(dev);
 
        drm_connector_register(connector);
-       return connector;
 }
 
 static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
 
 struct drm_dp_mst_topology_cbs mst_cbs = {
        .add_connector = radeon_dp_add_mst_connector,
+       .register_connector = radeon_dp_register_mst_connector,
        .destroy_connector = radeon_dp_destroy_mst_connector,
        .hotplug = radeon_dp_mst_hotplug,
 };
index 7214858ffceaa8c20409206533dcc332fd663705..1aa657fe31cb26f88c2d413bb1a38974e091eee2 100644 (file)
@@ -48,40 +48,10 @@ struct radeon_fbdev {
        struct radeon_device *rdev;
 };
 
-/**
- * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
- *
- * @info: fbdev info
- *
- * This function hides the cursor on all CRTCs used by fbdev.
- */
-static int radeon_fb_helper_set_par(struct fb_info *info)
-{
-       int ret;
-
-       ret = drm_fb_helper_set_par(info);
-
-       /* XXX: with universal plane support fbdev will automatically disable
-        * all non-primary planes (including the cursor)
-        */
-       if (ret == 0) {
-               struct drm_fb_helper *fb_helper = info->par;
-               int i;
-
-               for (i = 0; i < fb_helper->crtc_count; i++) {
-                       struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
-                       radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
-               }
-       }
-
-       return ret;
-}
-
 static struct fb_ops radeonfb_ops = {
        .owner = THIS_MODULE,
        .fb_check_var = drm_fb_helper_check_var,
-       .fb_set_par = radeon_fb_helper_set_par,
+       .fb_set_par = drm_fb_helper_set_par,
        .fb_fillrect = drm_fb_helper_cfb_fillrect,
        .fb_copyarea = drm_fb_helper_cfb_copyarea,
        .fb_imageblit = drm_fb_helper_cfb_imageblit,
index 5ae8f921da2a478bef55b617c3a28f66ed1e2773..8a76821177a6c0c1a3cc9659b4f119943d48b3ce 100644 (file)
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
                                         0, 0,
                                         DRM_MM_SEARCH_DEFAULT,
                                         DRM_MM_CREATE_DEFAULT);
+       if (ret) {
+               (void) vmw_cmdbuf_man_process(man);
+               ret = drm_mm_insert_node_generic(&man->mm, info->node,
+                                                info->page_size, 0, 0,
+                                                DRM_MM_SEARCH_DEFAULT,
+                                                DRM_MM_CREATE_DEFAULT);
+       }
+
        spin_unlock_bh(&man->lock);
        info->done = !ret;
 
index 6cb89c0ebab6df03f7e8b38fc81cecd3136e57de..1fd46859ed29ded69dbb479c980316142bde5fd7 100644 (file)
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = {
        { .compatible = "stericsson,abx500-temp" },
        {},
 };
+MODULE_DEVICE_TABLE(of, abx500_temp_match);
 #endif
 
 static struct platform_driver abx500_temp_driver = {
index a3dae6d0082a0d08e4183f63e27b79ae5510b863..82de3deeb18a7ddf5e041e695b35cc8b1500abea 100644 (file)
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = {
        { .compatible = "gpio-fan", },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
 #endif /* CONFIG_OF_GPIO */
 
 static int gpio_fan_probe(struct platform_device *pdev)
index 2d9a712699ff5d541e831629834b23882b4fa606..3e23003f78b01ca731e8c232f38e1132eccf251f 100644 (file)
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = {
        { .compatible = "pwm-fan", },
        {},
 };
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
 
 static struct platform_driver pwm_fan_driver = {
        .probe          = pwm_fan_probe,
index 3a3738fe016b3af0a2e0b723396822b786e2c20f..cd4510a6337548d26344b8ccc5cb427ab13dd4ef 100644 (file)
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = {
                .name = "C6-SKL",
                .desc = "MWAIT 0x20",
                .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
-               .exit_latency = 75,
+               .exit_latency = 85,
                .target_residency = 200,
                .enter = &intel_idle,
                .enter_freeze = intel_idle_freeze, },
@@ -636,10 +636,18 @@ static struct cpuidle_state skl_cstates[] = {
                .name = "C8-SKL",
                .desc = "MWAIT 0x40",
                .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
-               .exit_latency = 174,
+               .exit_latency = 200,
                .target_residency = 800,
                .enter = &intel_idle,
                .enter_freeze = intel_idle_freeze, },
+       {
+               .name = "C9-SKL",
+               .desc = "MWAIT 0x50",
+               .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 480,
+               .target_residency = 5000,
+               .enter = &intel_idle,
+               .enter_freeze = intel_idle_freeze, },
        {
                .name = "C10-SKL",
                .desc = "MWAIT 0x60",
index 41d6911e244e1765a34b77fe067cf4e4ddeab172..f1ccd40beae9eb2b7a8e6aaad7ef1a98ab8a0a81 100644 (file)
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
        if (MLX5_CAP_GEN(mdev, apm))
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
-       props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
        if (MLX5_CAP_GEN(mdev, xrc))
                props->device_cap_flags |= IB_DEVICE_XRC;
        props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
        return 0;
 }
 
-static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
-{
-       struct mlx5_create_mkey_mbox_in *in;
-       struct mlx5_mkey_seg *seg;
-       struct mlx5_core_mr mr;
-       int err;
-
-       in = kzalloc(sizeof(*in), GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       seg = &in->seg;
-       seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
-       seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
-       seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
-       seg->start_addr = 0;
-
-       err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
-                                   NULL, NULL, NULL);
-       if (err) {
-               mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
-               goto err_in;
-       }
-
-       kfree(in);
-       *key = mr.key;
-
-       return 0;
-
-err_in:
-       kfree(in);
-
-       return err;
-}
-
-static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
-{
-       struct mlx5_core_mr mr;
-       int err;
-
-       memset(&mr, 0, sizeof(mr));
-       mr.key = key;
-       err = mlx5_core_destroy_mkey(dev->mdev, &mr);
-       if (err)
-               mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
-}
-
 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
                                      struct ib_ucontext *context,
                                      struct ib_udata *udata)
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
                        kfree(pd);
                        return ERR_PTR(-EFAULT);
                }
-       } else {
-               err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
-               if (err) {
-                       mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
-                       kfree(pd);
-                       return ERR_PTR(err);
-               }
        }
 
        return &pd->ibpd;
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
        struct mlx5_ib_dev *mdev = to_mdev(pd->device);
        struct mlx5_ib_pd *mpd = to_mpd(pd);
 
-       if (!pd->uobject)
-               free_pa_mkey(mdev, mpd->pa_lkey);
-
        mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
        kfree(mpd);
 
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        struct ib_srq_init_attr attr;
        struct mlx5_ib_dev *dev;
        struct ib_cq_init_attr cq_attr = {.cqe = 1};
-       u32 rsvd_lkey;
        int ret = 0;
 
        dev = container_of(devr, struct mlx5_ib_dev, devr);
 
-       ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
-       if (ret) {
-               pr_err("Failed to query special context %d\n", ret);
-               return ret;
-       }
-       dev->ib_dev.local_dma_lkey = rsvd_lkey;
-
        devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
        if (IS_ERR(devr->p0)) {
                ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
        dev->ib_dev.owner               = THIS_MODULE;
        dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
+       dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
        dev->num_ports          = MLX5_CAP_GEN(mdev, num_ports);
        dev->ib_dev.phys_port_cnt     = dev->num_ports;
        dev->ib_dev.num_comp_vectors    =
index bb8cda79e8812cf1122feaa70a3f113958858d77..22123b79d550d6a7e0474501592f36dc6f0b632e 100644 (file)
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
 struct mlx5_ib_pd {
        struct ib_pd            ibpd;
        u32                     pdn;
-       u32                     pa_lkey;
 };
 
 /* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@ struct mlx5_ib_qp {
        int                     uuarn;
 
        int                     create_type;
-       u32                     pa_lkey;
 
        /* Store signature errors */
        bool                    signature_en;
index c745c6c5e10da0b296fd19ef6ee01d7650af44ff..6f521a3418e8e1c69b9cca74fc8443dd05e30dac 100644 (file)
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                        err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
                        if (err)
                                mlx5_ib_dbg(dev, "err %d\n", err);
-                       else
-                               qp->pa_lkey = to_mpd(pd)->pa_lkey;
                }
 
                if (err)
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
                mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
        dseg->addr = cpu_to_be64(mfrpl->map);
        dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
-       dseg->lkey = cpu_to_be32(pd->pa_lkey);
+       dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
 }
 
 static __be32 send_ieth(struct ib_send_wr *wr)
index ca2873698d75444066312640a1eb84dc5e2190db..4cd5428a2399a2cc73757c49382842094714d1bc 100644 (file)
@@ -80,7 +80,7 @@ enum {
        IPOIB_NUM_WC              = 4,
 
        IPOIB_MAX_PATH_REC_QUEUE  = 3,
-       IPOIB_MAX_MCAST_QUEUE     = 3,
+       IPOIB_MAX_MCAST_QUEUE     = 64,
 
        IPOIB_FLAG_OPER_UP        = 0,
        IPOIB_FLAG_INITIALIZED    = 1,
@@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
 
 int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
                       union ib_gid *mgid, int set_qkey);
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
 
 int ipoib_init_qp(struct net_device *dev);
 int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
index 36536ce5a3e2f9d51278be970d51bb51ec07232c..f74316e679d2fc2b7b27212d47fc806e95844f01 100644 (file)
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
        unsigned long dt;
        unsigned long flags;
        int i;
+       LIST_HEAD(remove_list);
+       struct ipoib_mcast *mcast, *tmcast;
+       struct net_device *dev = priv->dev;
 
        if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
                return;
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
                                                          lockdep_is_held(&priv->lock))) != NULL) {
                        /* was the neigh idle for two GC periods */
                        if (time_after(neigh_obsolete, neigh->alive)) {
+                               u8 *mgid = neigh->daddr + 4;
+
+                               /* Is this multicast ? */
+                               if (*mgid == 0xff) {
+                                       mcast = __ipoib_mcast_find(dev, mgid);
+
+                                       if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+                                               list_del(&mcast->list);
+                                               rb_erase(&mcast->rb_node, &priv->multicast_tree);
+                                               list_add_tail(&mcast->list, &remove_list);
+                                       }
+                               }
+
                                rcu_assign_pointer(*np,
                                                   rcu_dereference_protected(neigh->hnext,
                                                                             lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
 
 out_unlock:
        spin_unlock_irqrestore(&priv->lock, flags);
+       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+               ipoib_mcast_leave(dev, mcast);
 }
 
 static void ipoib_reap_neigh(struct work_struct *work)
index 09a1748f9d131423f020020456d61d2f6c44a8b1..136cbefe00f87aeb79b02d6508d42fdac5741069 100644 (file)
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
        return mcast;
 }
 
-static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
                rec.hop_limit     = priv->broadcast->mcmember.hop_limit;
 
                /*
-                * Historically Linux IPoIB has never properly supported SEND
-                * ONLY join. It emulated it by not providing all the required
-                * attributes, which is enough to prevent group creation and
-                * detect if there are full members or not. A major problem
-                * with supporting SEND ONLY is detecting when the group is
-                * auto-destroyed as IPoIB will cache the MLID..
+                * Send-only IB Multicast joins do not work at the core
+                * IB layer yet, so we can't use them here.  However,
+                * we are emulating an Ethernet multicast send, which
+                * does not require a multicast subscription and will
+                * still send properly.  The most appropriate thing to
+                * do is to create the group if it doesn't exist as that
+                * most closely emulates the behavior, from a user space
+                * application perspecitive, of Ethernet multicast
+                * operation.  For now, we do a full join, maybe later
+                * when the core IB layers support send only joins we
+                * will use them.
                 */
-#if 1
-               if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-                       comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
-#else
+#if 0
                if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
                        rec.join_state = 4;
 #endif
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
        return 0;
 }
 
-static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int ret = 0;
index 1ace5d83a4d761b82ffbe446bbf41a1f051cd66b..f58ff96b6cbb9778153b4ab79794f8a206fa9343 100644 (file)
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
 module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
 
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+                "Always register memory, even for continuous memory regions (default:true)");
+
 bool iser_pi_enable = false;
 module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
 MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
index 86f6583485ef3f99c678a5ce1087f45e7e2ba1f1..a5edd6ede692c7be3d1c6da2f355b062cea9e43e 100644 (file)
@@ -611,6 +611,7 @@ extern int iser_debug_level;
 extern bool iser_pi_enable;
 extern int iser_pi_guard;
 extern unsigned int iser_max_sectors;
+extern bool iser_always_reg;
 
 int iser_assign_reg_ops(struct iser_device *device);
 
index 2493cc748db839b4ec885b82e5292633f242ad01..4c46d67d37a13100b60c6daa0a0b01b8f6855608 100644 (file)
@@ -803,11 +803,12 @@ static int
 iser_reg_prot_sg(struct iscsi_iser_task *task,
                 struct iser_data_buf *mem,
                 struct iser_fr_desc *desc,
+                bool use_dma_key,
                 struct iser_mem_reg *reg)
 {
        struct iser_device *device = task->iser_conn->ib_conn.device;
 
-       if (mem->dma_nents == 1)
+       if (use_dma_key)
                return iser_reg_dma(device, mem, reg);
 
        return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@ static int
 iser_reg_data_sg(struct iscsi_iser_task *task,
                 struct iser_data_buf *mem,
                 struct iser_fr_desc *desc,
+                bool use_dma_key,
                 struct iser_mem_reg *reg)
 {
        struct iser_device *device = task->iser_conn->ib_conn.device;
 
-       if (mem->dma_nents == 1)
+       if (use_dma_key)
                return iser_reg_dma(device, mem, reg);
 
        return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
        struct iser_mem_reg *reg = &task->rdma_reg[dir];
        struct iser_mem_reg *data_reg;
        struct iser_fr_desc *desc = NULL;
+       bool use_dma_key;
        int err;
 
        err = iser_handle_unaligned_buf(task, mem, dir);
        if (unlikely(err))
                return err;
 
-       if (mem->dma_nents != 1 ||
-           scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+       use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
+                      scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
+
+       if (!use_dma_key) {
                desc = device->reg_ops->reg_desc_get(ib_conn);
                reg->mem_h = desc;
        }
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
        else
                data_reg = &task->desc.data_reg;
 
-       err = iser_reg_data_sg(task, mem, desc, data_reg);
+       err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
        if (unlikely(err))
                goto err_reg;
 
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
                        if (unlikely(err))
                                goto err_reg;
 
-                       err = iser_reg_prot_sg(task, mem, desc, prot_reg);
+                       err = iser_reg_prot_sg(task, mem, desc,
+                                              use_dma_key, prot_reg);
                        if (unlikely(err))
                                goto err_reg;
                }
index ae70cc1463ac2b75d7eae512bf3224e90ba2d59f..85132d867bc86fcfcd99b7065e9f746301422de1 100644 (file)
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
                             (unsigned long)comp);
        }
 
-       device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
-                                  IB_ACCESS_REMOTE_WRITE |
-                                  IB_ACCESS_REMOTE_READ);
-       if (IS_ERR(device->mr))
-               goto dma_mr_err;
+       if (!iser_always_reg) {
+               int access = IB_ACCESS_LOCAL_WRITE |
+                            IB_ACCESS_REMOTE_WRITE |
+                            IB_ACCESS_REMOTE_READ;
+
+               device->mr = ib_get_dma_mr(device->pd, access);
+               if (IS_ERR(device->mr))
+                       goto dma_mr_err;
+       }
 
        INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
                                iser_event_handler);
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
        return 0;
 
 handler_err:
-       ib_dereg_mr(device->mr);
+       if (device->mr)
+               ib_dereg_mr(device->mr);
 dma_mr_err:
        for (i = 0; i < device->comps_used; i++)
                tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@ comps_err:
 static void iser_free_device_ib_res(struct iser_device *device)
 {
        int i;
-       BUG_ON(device->mr == NULL);
 
        for (i = 0; i < device->comps_used; i++) {
                struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device)
        }
 
        (void)ib_unregister_event_handler(&device->event_handler);
-       (void)ib_dereg_mr(device->mr);
+       if (device->mr)
+               (void)ib_dereg_mr(device->mr);
        ib_dealloc_pd(device->pd);
 
        kfree(device->comps);
index 56eb471b5576954f6119664b23a1d7b83316d3fb..4215b5382092c15d693e62de6e029626e9fa551d 100644 (file)
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
 config JOYSTICK_ZHENHUA
        tristate "5-byte Zhenhua RC transmitter"
        select SERIO
+       select BITREVERSE
        help
          Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
          supplied with a ready to fly micro electric indoor helicopters
index b76ac580703ce5dc9ef97fac6620adc47ea44273..a8bc2fe170dd83e12ff78706f97f9bc32a72e5cd 100644 (file)
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
                if (w->counter == 24) { /* full frame */
                        walkera0701_parse_frame(w);
                        w->counter = NO_SYNC;
-                       if (abs(pulse_time - SYNC_PULSE) < RESERVE)     /* new frame sync */
+                       if (abs64(pulse_time - SYNC_PULSE) < RESERVE)   /* new frame sync */
                                w->counter = 0;
                } else {
                        if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
                        } else
                                w->counter = NO_SYNC;
                }
-       } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) <
+       } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
                                RESERVE + BIN1_PULSE - BIN0_PULSE)      /* frame sync .. */
                w->counter = 0;
 
index b052afec9a11f0d323eb735dee3d262eabf116a2..6639b2b8528aa6da9a518d3bd3dc0da3010f4a09 100644 (file)
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
 
        error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
        if (error)
-               return error;
+               goto err_free_keypad;
 
        res = request_mem_region(res->start, resource_size(res), pdev->name);
        if (!res) {
index 867db8a91372017d2af6a11d24f189a7bc796fc2..e317b75357a0182d99ef3c04223596bd1de6fc80 100644 (file)
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
        default:
                reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
                break;
-       };
+       }
 
        error = regmap_update_bits(pwrkey->regmap,
                                   pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
index 345df9b03aed7f1eff56cf2f1a743c59c8d5e780..5adbcedcb81cf4391bfdddefe11aae5d3131dd76 100644 (file)
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
        dev->id.product = user_dev->id.product;
        dev->id.version = user_dev->id.version;
 
-       for_each_set_bit(i, dev->absbit, ABS_CNT) {
+       for (i = 0; i < ABS_CNT; i++) {
                input_abs_set_max(dev, i, user_dev->absmax[i]);
                input_abs_set_min(dev, i, user_dev->absmin[i]);
                input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
index 73670f2aebfd5e189ab794c6ffd4b44759bb5acb..c0ec26118732879f7674f68bd0458b3d27602f61 100644 (file)
@@ -60,7 +60,7 @@ struct elan_transport_ops {
        int (*get_sm_version)(struct i2c_client *client,
                              u8* ic_type, u8 *version);
        int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
-       int (*get_product_id)(struct i2c_client *client, u8 *id);
+       int (*get_product_id)(struct i2c_client *client, u16 *id);
 
        int (*get_max)(struct i2c_client *client,
                       unsigned int *max_x, unsigned int *max_y);
index fa945304b9a576d4303c778eca929a5f3517092a..5e1665bbaa0baca86e2c865ba88162be2f209d2e 100644 (file)
@@ -40,7 +40,7 @@
 #include "elan_i2c.h"
 
 #define DRIVER_NAME            "elan_i2c"
-#define ELAN_DRIVER_VERSION    "1.6.0"
+#define ELAN_DRIVER_VERSION    "1.6.1"
 #define ETP_MAX_PRESSURE       255
 #define ETP_FWIDTH_REDUCE      90
 #define ETP_FINGER_WIDTH       15
@@ -76,7 +76,7 @@ struct elan_tp_data {
        unsigned int            x_res;
        unsigned int            y_res;
 
-       u                     product_id;
+       u16                     product_id;
        u8                      fw_version;
        u8                      sm_version;
        u8                      iap_version;
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
                           u16 *signature_address)
 {
        switch (iap_version) {
+       case 0x00:
+       case 0x06:
        case 0x08:
                *validpage_count = 512;
                break;
+       case 0x03:
+       case 0x07:
        case 0x09:
+       case 0x0A:
+       case 0x0B:
+       case 0x0C:
                *validpage_count = 768;
                break;
        case 0x0D:
                *validpage_count = 896;
                break;
+       case 0x0E:
+               *validpage_count = 640;
+               break;
        default:
                /* unknown ic type clear value */
                *validpage_count = 0;
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data)
 
        error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
                                &data->fw_signature_address);
-       if (error) {
-               dev_err(&data->client->dev,
-                       "unknown iap version %d\n", data->iap_version);
-               return error;
-       }
+       if (error)
+               dev_warn(&data->client->dev,
+                        "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
+                        data->iap_version, data->ic_type);
 
        return 0;
 }
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
        const u8 *fw_signature;
        static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
 
+       if (data->fw_validpage_count == 0)
+               return -EINVAL;
+
        /* Look for a firmware with the product id appended. */
        fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
        if (!fw_name) {
index 683c840c9dd73f31d1279b7db88dfacd1f42b7b0..a679e56c44cd49ddea4361aebdb97d4fe7f1e12e 100644 (file)
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
        return 0;
 }
 
-static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
 {
        int error;
        u8 val[3];
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
                return error;
        }
 
-       *id = val[0];
+       *id = le16_to_cpup((__le16 *)val);
        return 0;
 }
 
index ff36a366b2aa1aadbe3c9a7f0e9de3eb687d83c3..cb6aecbc1dc28a20885885c4b361ecf8343bc445 100644 (file)
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
        return 0;
 }
 
-static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
 {
        int error;
        u8 val[3];
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
                return error;
        }
 
-       *id = val[1];
+       *id = be16_to_cpup((__be16 *)val);
        return 0;
 }
 
index 994ae788615698bf3af613f1de765fe0411be995..6025eb430c0a5010c908961ccf8897943fd3c945 100644 (file)
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse)
        struct synaptics_data *priv = psmouse->private;
 
        priv->mode = 0;
-
-       if (priv->absolute_mode) {
+       if (priv->absolute_mode)
                priv->mode |= SYN_BIT_ABSOLUTE_MODE;
-               if (SYN_CAP_EXTENDED(priv->capabilities))
-                       priv->mode |= SYN_BIT_W_MODE;
-       }
-
-       if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
+       if (priv->disable_gesture)
                priv->mode |= SYN_BIT_DISABLE_GESTURE;
-
        if (psmouse->rate >= 80)
                priv->mode |= SYN_BIT_HIGH_RATE;
+       if (SYN_CAP_EXTENDED(priv->capabilities))
+               priv->mode |= SYN_BIT_W_MODE;
 
        if (synaptics_mode_cmd(psmouse, priv->mode))
                return -1;
index 75516996db2070b621c6250cdd9710acf473ad42..316f2c8971011dae527d506ee18d49ce96f316e0 100644 (file)
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
         * time before the ACK arrives.
         */
        if (ps2_sendbyte(ps2dev, command & 0xff,
-                        command == PS2_CMD_RESET_BAT ? 1000 : 200))
-               goto out;
+                        command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
+               serio_pause_rx(ps2dev->serio);
+               goto out_reset_flags;
+       }
 
-       for (i = 0; i < send; i++)
-               if (ps2_sendbyte(ps2dev, param[i], 200))
-                       goto out;
+       for (i = 0; i < send; i++) {
+               if (ps2_sendbyte(ps2dev, param[i], 200)) {
+                       serio_pause_rx(ps2dev->serio);
+                       goto out_reset_flags;
+               }
+       }
 
        /*
         * The reset command takes a long time to execute.
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
                                   !(ps2dev->flags & PS2_FLAG_CMD), timeout);
        }
 
+       serio_pause_rx(ps2dev->serio);
+
        if (param)
                for (i = 0; i < receive; i++)
                        param[i] = ps2dev->cmdbuf[(receive - 1) - i];
 
        if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
-               goto out;
+               goto out_reset_flags;
 
        rc = 0;
 
- out:
-       serio_pause_rx(ps2dev->serio);
+ out_reset_flags:
        ps2dev->flags = 0;
        serio_continue_rx(ps2dev->serio);
 
index 26b45936f9fdf3334c6f083aeaa7c5ee5bb178dd..1e8cd6f1fe9e875005af95b54787890f81116a5f 100644 (file)
@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
        parkbd_port = parkbd_allocate_serio();
        if (!parkbd_port) {
                parport_release(parkbd_dev);
+               parport_unregister_device(parkbd_dev);
                return -ENOMEM;
        }
 
index ff0b75813daa21cff6f8baccfbf453a06ebf383b..8275267eac25441f308e6103e82d48830d1feb71 100644 (file)
@@ -94,7 +94,7 @@ struct imx6ul_tsc {
  * TSC module need ADC to get the measure value. So
  * before config TSC, we should initialize ADC module.
  */
-static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
 {
        int adc_hc = 0;
        int adc_gc;
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
 
        timeout = wait_for_completion_timeout
                        (&tsc->completion, ADC_TIMEOUT);
-       if (timeout == 0)
+       if (timeout == 0) {
                dev_err(tsc->dev, "Timeout for adc calibration\n");
+               return -ETIMEDOUT;
+       }
 
        adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
-       if (adc_gs & ADC_CALF)
+       if (adc_gs & ADC_CALF) {
                dev_err(tsc->dev, "ADC calibration failed\n");
+               return -EINVAL;
+       }
 
        /* TSC need the ADC work in hardware trigger */
        adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
        adc_cfg |= ADC_HARDWARE_TRIGGER;
        writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+       return 0;
 }
 
 /*
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
        writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
 }
 
-static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
 {
-       imx6ul_adc_init(tsc);
+       int err;
+
+       err = imx6ul_adc_init(tsc);
+       if (err)
+               return err;
        imx6ul_tsc_channel_config(tsc);
        imx6ul_tsc_set(tsc);
+
+       return 0;
 }
 
 static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev)
                return err;
        }
 
-       imx6ul_tsc_init(tsc);
-
-       return 0;
+       return imx6ul_tsc_init(tsc);
 }
 
 static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        int tsc_irq;
        int adc_irq;
 
-       tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+       tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
        if (!tsc)
                return -ENOMEM;
 
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        if (!input_dev)
                return -ENOMEM;
 
-       input_dev->name = "iMX6UL TouchScreen Controller";
+       input_dev->name = "iMX6UL Touchscreen Controller";
        input_dev->id.bustype = BUS_HOST;
 
        input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
        }
 
        adc_irq = platform_get_irq(pdev, 1);
-       if (adc_irq <= 0) {
+       if (adc_irq < 0) {
                dev_err(&pdev->dev, "no adc irq resource?\n");
                return adc_irq;
        }
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
                        goto out;
                }
 
-               imx6ul_tsc_init(tsc);
+               retval = imx6ul_tsc_init(tsc);
        }
 
 out:
index 7cce87650fc8da3e401ec9a9aa1807ea1a2d9dfb..1fafc9f57af6c75a7a8a9e9d6b90e016f4e90272 100644 (file)
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
        if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
                dev_err(dev, "failed to get x-size property\n");
                return NULL;
-       };
+       }
 
        if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
                dev_err(dev, "failed to get y-size property\n");
                return NULL;
-       };
+       }
 
        of_property_read_u32(np, "contact-threshold",
                                &pdata->contact_threshold);
index 4664c2a96c67fee361c3476ddc8f9a8e8842d271..d9da766719c863327d4a8563804994c3edfd01c0 100644 (file)
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
 endmenu
 
 config IOMMU_IOVA
-       bool
+       tristate
 
 config OF_IOMMU
        def_bool y
index 2d7349a3ee1496408f051b4da8accebc8dd02ec1..041bc1810a86131deb77152dd6b5a7cd43338a5d 100644 (file)
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
 
        /* Restrict dma_mask to the width that the iommu can handle */
        dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+       /* Ensure we reserve the whole size-aligned region */
+       nrpages = __roundup_pow_of_two(nrpages);
 
        if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
                /*
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void)
 static int __init iommu_init_mempool(void)
 {
        int ret;
-       ret = iommu_iova_cache_init();
+       ret = iova_cache_get();
        if (ret)
                return ret;
 
@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void)
 
        kmem_cache_destroy(iommu_domain_cache);
 domain_error:
-       iommu_iova_cache_destroy();
+       iova_cache_put();
 
        return -ENOMEM;
 }
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void)
 {
        kmem_cache_destroy(iommu_devinfo_cache);
        kmem_cache_destroy(iommu_domain_cache);
-       iommu_iova_cache_destroy();
+       iova_cache_put();
 }
 
 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
index b7c3d923f3e1c0569c42492d435b7c4d9a321caa..fa0adef32bd6d3a4af1b97ee3b1fb22dde6c225f 100644 (file)
  */
 
 #include <linux/iova.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 
-static struct kmem_cache *iommu_iova_cache;
-
-int iommu_iova_cache_init(void)
-{
-       int ret = 0;
-
-       iommu_iova_cache = kmem_cache_create("iommu_iova",
-                                        sizeof(struct iova),
-                                        0,
-                                        SLAB_HWCACHE_ALIGN,
-                                        NULL);
-       if (!iommu_iova_cache) {
-               pr_err("Couldn't create iova cache\n");
-               ret = -ENOMEM;
-       }
-
-       return ret;
-}
-
-void iommu_iova_cache_destroy(void)
-{
-       kmem_cache_destroy(iommu_iova_cache);
-}
-
-struct iova *alloc_iova_mem(void)
-{
-       return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
-       kmem_cache_free(iommu_iova_cache, iova);
-}
-
 void
 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
        iovad->start_pfn = start_pfn;
        iovad->dma_32bit_pfn = pfn_32bit;
 }
+EXPORT_SYMBOL_GPL(init_iova_domain);
 
 static struct rb_node *
 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
        }
 }
 
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
+/*
+ * Computes the padding size required, to make the start address
+ * naturally aligned on the power-of-two order of its size
  */
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
+static unsigned int
+iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
 {
-       unsigned int pad_size = 0;
-       unsigned int order = ilog2(size);
-
-       if (order)
-               pad_size = (limit_pfn + 1) % (1 << order);
-
-       return pad_size;
+       return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
 }
 
 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
        rb_insert_color(&iova->node, root);
 }
 
+static struct kmem_cache *iova_cache;
+static unsigned int iova_cache_users;
+static DEFINE_MUTEX(iova_cache_mutex);
+
+struct iova *alloc_iova_mem(void)
+{
+       return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(alloc_iova_mem);
+
+void free_iova_mem(struct iova *iova)
+{
+       kmem_cache_free(iova_cache, iova);
+}
+EXPORT_SYMBOL(free_iova_mem);
+
+int iova_cache_get(void)
+{
+       mutex_lock(&iova_cache_mutex);
+       if (!iova_cache_users) {
+               iova_cache = kmem_cache_create(
+                       "iommu_iova", sizeof(struct iova), 0,
+                       SLAB_HWCACHE_ALIGN, NULL);
+               if (!iova_cache) {
+                       mutex_unlock(&iova_cache_mutex);
+                       printk(KERN_ERR "Couldn't create iova cache\n");
+                       return -ENOMEM;
+               }
+       }
+
+       iova_cache_users++;
+       mutex_unlock(&iova_cache_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+       mutex_lock(&iova_cache_mutex);
+       if (WARN_ON(!iova_cache_users)) {
+               mutex_unlock(&iova_cache_mutex);
+               return;
+       }
+       iova_cache_users--;
+       if (!iova_cache_users)
+               kmem_cache_destroy(iova_cache);
+       mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
 /**
  * alloc_iova - allocates an iova
  * @iovad: - iova domain in question
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
        if (!new_iova)
                return NULL;
 
-       /* If size aligned is set then round the size to
-        * to next power of two.
-        */
-       if (size_aligned)
-               size = __roundup_pow_of_two(size);
-
        ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
                        new_iova, size_aligned);
 
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
 
        return new_iova;
 }
+EXPORT_SYMBOL_GPL(alloc_iova);
 
 /**
  * find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return NULL;
 }
+EXPORT_SYMBOL_GPL(find_iova);
 
 /**
  * __free_iova - frees the given iova
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        free_iova_mem(iova);
 }
+EXPORT_SYMBOL_GPL(__free_iova);
 
 /**
  * free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
                __free_iova(iovad, iova);
 
 }
+EXPORT_SYMBOL_GPL(free_iova);
 
 /**
  * put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad)
        }
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(put_iova_domain);
 
 static int
 __is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@ finish:
        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
        return iova;
 }
+EXPORT_SYMBOL_GPL(reserve_iova);
 
 /**
  * copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
        }
        spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 }
+EXPORT_SYMBOL_GPL(copy_reserved_iova);
 
 struct iova *
 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@ error:
                free_iova_mem(prev);
        return NULL;
 }
+
+MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
+MODULE_LICENSE("GPL");
index cf351c6374645e7b0e4fe982a937d49b4904cfe9..a7c8c9ffbafd3503228a6c4e63c6870b5d9c784c 100644 (file)
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
 
        dev_alias->dev_id = alias;
        if (pdev != dev_alias->pdev)
-               dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+               dev_alias->count += its_pci_msi_vec_count(pdev);
 
        return 0;
 }
index ac7ae2b3cb83726e336ce310d95e2b890da8224d..25ceae9f7348b3208bdd6dec0048a88d4ce34f73 100644 (file)
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
 out:
        spin_unlock(&lpi_lock);
 
+       if (!bitmap)
+               *base = *nr_ids = 0;
+
        return bitmap;
 }
 
index af2f16bb8a94d3912787b1893431c97e541fc0ec..aeaa061f0dbfd3694d8a9f890822eba266e2e4e0 100644 (file)
@@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained)
                intrmask[i] = gic_read(intrmask_reg);
                pending_reg += gic_reg_step;
                intrmask_reg += gic_reg_step;
+
+               if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+                       continue;
+
+               pending[i] |= (u64)gic_read(pending_reg) << 32;
+               intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
+               pending_reg += gic_reg_step;
+               intrmask_reg += gic_reg_step;
        }
 
        bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
        spin_lock_irqsave(&gic_lock, flags);
 
        /* Re-route this IRQ */
-       gic_map_to_vpe(irq, cpumask_first(&tmp));
+       gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
 
        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
@@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu,
                                      GIC_SHARED_TO_HWIRQ(intr));
        int i;
 
-       gic_map_to_vpe(intr, cpu);
+       gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
        for (i = 0; i < NR_CPUS; i++)
                clear_bit(intr, pcpu_masks[i].pcpu_mask);
        set_bit(intr, pcpu_masks[cpu].pcpu_mask);
index e51de52eeb94f71c9d6712a61d31e49f8e6f2f60..48b5890c28e35ad70484d67b29e12a70cb9a4b1b 100644 (file)
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
        if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
                ret = bitmap_storage_alloc(&store, chunks,
                                           !bitmap->mddev->bitmap_info.external,
-                                          bitmap->cluster_slot);
+                                          mddev_is_clustered(bitmap->mddev)
+                                          ? bitmap->cluster_slot : 0);
        if (ret)
                goto err;
 
index 4f5ecbe94ccbf97c562d96930635c6aaff0550d3..c702de18207ae76ab56f1235ed5c98a9095ed050 100644 (file)
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
                 * which will now never happen */
                wake_up_process(mddev->sync_thread->tsk);
 
+       if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+               return -EBUSY;
        mddev_unlock(mddev);
        wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
                                          &mddev->recovery));
+       wait_event(mddev->sb_wait,
+                  !test_bit(MD_CHANGE_PENDING, &mddev->flags));
        mddev_lock_nointr(mddev);
 
        mutex_lock(&mddev->open_mutex);
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev)
                        md_reap_sync_thread(mddev);
                        clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
                        clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       clear_bit(MD_CHANGE_PENDING, &mddev->flags);
                        goto unlock;
                }
 
index d222522c52e077dcdd012fbf22d929fc6a7cb0ad..d132f06afdd1aa3140922f7965494087cf43eb7a 100644 (file)
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev)
        return 0;
 
 out_free_conf:
-       if (conf->pool)
-               mempool_destroy(conf->pool);
+       mempool_destroy(conf->pool);
        kfree(conf->multipaths);
        kfree(conf);
        mddev->private = NULL;
index 63e619b2f44eb3ce51a90eb74980ed8a1f91c639..f8e5db0cb5aaae3038e67ec9f348f1faa4c64508 100644 (file)
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
                struct md_rdev *rdev;
                bool discard_supported = false;
 
-               rdev_for_each(rdev, mddev) {
-                       disk_stack_limits(mddev->gendisk, rdev->bdev,
-                                         rdev->data_offset << 9);
-                       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
-                               discard_supported = true;
-               }
                blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
                blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
                blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
                blk_queue_io_opt(mddev->queue,
                                 (mddev->chunk_sectors << 9) * mddev->raid_disks);
 
+               rdev_for_each(rdev, mddev) {
+                       disk_stack_limits(mddev->gendisk, rdev->bdev,
+                                         rdev->data_offset << 9);
+                       if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+                               discard_supported = true;
+               }
                if (!discard_supported)
                        queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
                else
index 4517f06c41bafe0fb2fbe2a5b454b68f012b2455..049df6c4a8cc302c9a266e34e5a1edefecb31cf7 100644 (file)
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
        }
 
        if (bio && bio_data_dir(bio) == WRITE) {
-               if (bio->bi_iter.bi_sector >=
-                   conf->mddev->curr_resync_completed) {
+               if (bio->bi_iter.bi_sector >= conf->next_resync) {
                        if (conf->start_next_window == MaxSector)
                                conf->start_next_window =
                                        conf->next_resync +
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
        conf->r1buf_pool = NULL;
 
        spin_lock_irq(&conf->resync_lock);
-       conf->next_resync = 0;
+       conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
        conf->start_next_window = MaxSector;
        conf->current_window_requests +=
                conf->next_window_requests;
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 
  abort:
        if (conf) {
-               if (conf->r1bio_pool)
-                       mempool_destroy(conf->r1bio_pool);
+               mempool_destroy(conf->r1bio_pool);
                kfree(conf->mirrors);
                safe_put_page(conf->tmppage);
                kfree(conf->poolinfo);
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
 {
        struct r1conf *conf = priv;
 
-       if (conf->r1bio_pool)
-               mempool_destroy(conf->r1bio_pool);
+       mempool_destroy(conf->r1bio_pool);
        kfree(conf->mirrors);
        safe_put_page(conf->tmppage);
        kfree(conf->poolinfo);
index 0fc33eb888551292bb37461f08d7e704483f93e6..7c99a403771527354a5323137f5004d7adf9e007 100644 (file)
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
                printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
                       mdname(mddev));
        if (conf) {
-               if (conf->r10bio_pool)
-                       mempool_destroy(conf->r10bio_pool);
+               mempool_destroy(conf->r10bio_pool);
                kfree(conf->mirrors);
                safe_put_page(conf->tmppage);
                kfree(conf);
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev)
 
 out_free_conf:
        md_unregister_thread(&mddev->thread);
-       if (conf->r10bio_pool)
-               mempool_destroy(conf->r10bio_pool);
+       mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
        kfree(conf->mirrors);
        kfree(conf);
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
 {
        struct r10conf *conf = priv;
 
-       if (conf->r10bio_pool)
-               mempool_destroy(conf->r10bio_pool);
+       mempool_destroy(conf->r10bio_pool);
        safe_put_page(conf->tmppage);
        kfree(conf->mirrors);
        kfree(conf->mirrors_old);
index 15ef2c641b2b93e96004d073463fdcfaaaaceab4..49bb8d3ff9be8c7741a5bebc6b210fde38989a09 100644 (file)
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf)
               drop_one_stripe(conf))
                ;
 
-       if (conf->slab_cache)
-               kmem_cache_destroy(conf->slab_cache);
+       kmem_cache_destroy(conf->slab_cache);
        conf->slab_cache = NULL;
 }
 
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        spin_unlock_irq(&sh->stripe_lock);
                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
                                wake_up(&conf->wait_for_overlap);
+                       if (bi)
+                               s->to_read--;
                        while (bi && bi->bi_iter.bi_sector <
                               sh->dev[i].sector + STRIPE_SECTORS) {
                                struct bio *nextbi =
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                 */
                clear_bit(R5_LOCKED, &sh->dev[i].flags);
        }
+       s->to_write = 0;
+       s->written = 0;
 
        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
                if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
                 */
                return 0;
 
-       for (i = 0; i < s->failed; i++) {
+       for (i = 0; i < s->failed && i < 2; i++) {
                if (fdev[i]->towrite &&
                    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
                    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
            sh->sector < sh->raid_conf->mddev->recovery_cp)
                /* reconstruct-write isn't being forced */
                return 0;
-       for (i = 0; i < s->failed; i++) {
+       for (i = 0; i < s->failed && i < 2; i++) {
                if (s->failed_num[i] != sh->pd_idx &&
                    s->failed_num[i] != sh->qd_idx &&
                    !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
index 0520064dc33beb164aa9d80642c371e227d599b1..a3eb20bdcd97bf32d7a56d741b1bde5097d65767 100644 (file)
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
        int err = cmd->error;
 
        /* Flag re-tuning needed on CRC errors */
-       if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+       if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+           cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+           (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
            (mrq->data && mrq->data->error == -EILSEQ) ||
-           (mrq->stop && mrq->stop->error == -EILSEQ))
+           (mrq->stop && mrq->stop->error == -EILSEQ)))
                mmc_retune_needed(host);
 
        if (err && cmd->retries && mmc_host_is_spi(host)) {
index abd933b7029bec26b7adebbea2db8fe3be426eb6..5466f25f0281e7b8a83ce4f34d21d1c14a3ebe4a 100644 (file)
@@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host)
                                           0, &cd_gpio_invert);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
-               else if (ret != -ENOENT)
+               else if (ret != -ENOENT && ret != -ENOSYS)
                        return ret;
 
                /*
@@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host)
        ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
        if (!ret)
                dev_info(host->parent, "Got WP GPIO\n");
-       else if (ret != -ENOENT)
+       else if (ret != -ENOENT && ret != -ENOSYS)
                return ret;
 
        if (of_property_read_bool(np, "disable-wp"))
index 1420f29628c70d8e8fdedbfa3fe7d77f1ba0ae0b..8cadd74e8407bb08d7e277a82ac5d80d496f77e4 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
 #include <linux/io.h>
 #include <linux/regulator/consumer.h>
 #include <linux/gpio.h>
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc)
 {
        struct pxamci_host *host = mmc_priv(mmc);
 
-       if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
-               if (host->pdata->gpio_card_ro_invert)
-                       return !gpio_get_value(host->pdata->gpio_card_ro);
-               else
-                       return gpio_get_value(host->pdata->gpio_card_ro);
-       }
+       if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
+               return mmc_gpio_get_ro(mmc);
        if (host->pdata && host->pdata->get_ro)
                return !!host->pdata->get_ro(mmc_dev(mmc));
        /*
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
 
 static const struct mmc_host_ops pxamci_ops = {
        .request                = pxamci_request,
+       .get_cd                 = mmc_gpio_get_cd,
        .get_ro                 = pxamci_get_ro,
        .set_ios                = pxamci_set_ios,
        .enable_sdio_irq        = pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev)
                gpio_power = host->pdata->gpio_power;
        }
        if (gpio_is_valid(gpio_power)) {
-               ret = gpio_request(gpio_power, "mmc card power");
+               ret = devm_gpio_request(&pdev->dev, gpio_power,
+                                       "mmc card power");
                if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+                       dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
+                               gpio_power);
                        goto out;
                }
                gpio_direction_output(gpio_power,
                                      host->pdata->gpio_power_invert);
        }
-       if (gpio_is_valid(gpio_ro)) {
-               ret = gpio_request(gpio_ro, "mmc card read only");
-               if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
-                       goto err_gpio_ro;
-               }
-               gpio_direction_input(gpio_ro);
+       if (gpio_is_valid(gpio_ro))
+               ret = mmc_gpio_request_ro(mmc, gpio_ro);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+               goto out;
+       } else {
+               mmc->caps |= host->pdata->gpio_card_ro_invert ?
+                       MMC_CAP2_RO_ACTIVE_HIGH : 0;
        }
-       if (gpio_is_valid(gpio_cd)) {
-               ret = gpio_request(gpio_cd, "mmc card detect");
-               if (ret) {
-                       dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
-                       goto err_gpio_cd;
-               }
-               gpio_direction_input(gpio_cd);
 
-               ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
-                                 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-                                 "mmc card detect", mmc);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to request card detect IRQ\n");
-                       goto err_request_irq;
-               }
+       if (gpio_is_valid(gpio_cd))
+               ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+               goto out;
        }
 
        if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev)
 
        return 0;
 
-err_request_irq:
-       gpio_free(gpio_cd);
-err_gpio_cd:
-       gpio_free(gpio_ro);
-err_gpio_ro:
-       gpio_free(gpio_power);
- out:
+out:
        if (host) {
                if (host->dma_chan_rx)
                        dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev)
                        gpio_ro = host->pdata->gpio_card_ro;
                        gpio_power = host->pdata->gpio_power;
                }
-               if (gpio_is_valid(gpio_cd)) {
-                       free_irq(gpio_to_irq(gpio_cd), mmc);
-                       gpio_free(gpio_cd);
-               }
-               if (gpio_is_valid(gpio_ro))
-                       gpio_free(gpio_ro);
-               if (gpio_is_valid(gpio_power))
-                       gpio_free(gpio_power);
                if (host->vcc)
                        regulator_put(host->vcc);
 
index a7b7a67715986d748d9f880088cc2ae069bd2283..b981b8552e43aad1778e7e7e7277b75d73884c68 100644 (file)
 #define SDXC_IDMAC_DES0_CES    BIT(30) /* card error summary */
 #define SDXC_IDMAC_DES0_OWN    BIT(31) /* 1-idma owns it, 0-host owns it */
 
+#define SDXC_CLK_400K          0
+#define SDXC_CLK_25M           1
+#define SDXC_CLK_50M           2
+#define SDXC_CLK_50M_DDR       3
+
+struct sunxi_mmc_clk_delay {
+       u32 output;
+       u32 sample;
+};
+
 struct sunxi_idma_des {
        u32     config;
        u32     buf_size;
@@ -229,6 +239,7 @@ struct sunxi_mmc_host {
        struct clk      *clk_mmc;
        struct clk      *clk_sample;
        struct clk      *clk_output;
+       const struct sunxi_mmc_clk_delay *clk_delays;
 
        /* irq */
        spinlock_t      lock;
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
 
        /* determine delays */
        if (rate <= 400000) {
-               oclk_dly = 180;
-               sclk_dly = 42;
+               oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
+               sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
        } else if (rate <= 25000000) {
-               oclk_dly = 180;
-               sclk_dly = 75;
+               oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
+               sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
        } else if (rate <= 50000000) {
                if (ios->timing == MMC_TIMING_UHS_DDR50) {
-                       oclk_dly = 60;
-                       sclk_dly = 120;
+                       oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+                       sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
                } else {
-                       oclk_dly = 90;
-                       sclk_dly = 150;
+                       oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
+                       sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
                }
-       } else if (rate <= 100000000) {
-               oclk_dly = 6;
-               sclk_dly = 24;
-       } else if (rate <= 200000000) {
-               oclk_dly = 3;
-               sclk_dly = 12;
        } else {
                return -EINVAL;
        }
@@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 static const struct of_device_id sunxi_mmc_of_match[] = {
        { .compatible = "allwinner,sun4i-a10-mmc", },
        { .compatible = "allwinner,sun5i-a13-mmc", },
+       { .compatible = "allwinner,sun9i-a80-mmc", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = {
        .hw_reset        = sunxi_mmc_hw_reset,
 };
 
+static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
+       [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
+       [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
+       [SDXC_CLK_50M]          = { .output =  90, .sample = 120 },
+       [SDXC_CLK_50M_DDR]      = { .output =  60, .sample = 120 },
+};
+
+static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
+       [SDXC_CLK_400K]         = { .output = 180, .sample = 180 },
+       [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
+       [SDXC_CLK_50M]          = { .output = 150, .sample = 120 },
+       [SDXC_CLK_50M_DDR]      = { .output =  90, .sample = 120 },
+};
+
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
                                      struct platform_device *pdev)
 {
@@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
        else
                host->idma_des_size_bits = 16;
 
+       if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
+               host->clk_delays = sun9i_mmc_clk_delays;
+       else
+               host->clk_delays = sunxi_mmc_clk_delays;
+
        ret = mmc_regulator_get_supply(host->mmc);
        if (ret) {
                if (ret != -EPROBE_DEFER)
index 5bbd1f094f4e33dca9c7ad3dca3edc7736b7e0bb..1fc23e48fe8e49fc947c972cca179399a60a37ec 100644 (file)
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
                goto bad;
        }
 
+       if (data_size > ubi->leb_size) {
+               ubi_err(ubi, "bad data_size");
+               goto bad;
+       }
+
        if (vol_type == UBI_VID_STATIC) {
                /*
                 * Although from high-level point of view static volumes may
index 80bdd5b88bac271fbd01f5496be76bf1b79f0d9a..d85c1976216078d2f1647b34ba5873c9c86439a0 100644 (file)
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
                if (ubi->corr_peb_count)
                        ubi_err(ubi, "%d PEBs are corrupted and not used",
                                ubi->corr_peb_count);
+               return -ENOSPC;
        }
        ubi->rsvd_pebs += reserved_pebs;
        ubi->avail_pebs -= reserved_pebs;
index 275d9fb6fe5c541c7253ece5cacc7ae189110eba..eb4489f9082fe84345d2ec68b0f8723888e5c177 100644 (file)
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
                if (ubi->corr_peb_count)
                        ubi_err(ubi, "%d PEBs are corrupted and not used",
                                ubi->corr_peb_count);
+               err = -ENOSPC;
                goto out_free;
        }
        ubi->avail_pebs -= reserved_pebs;
index f8baa897d1a0e48b39f2d628ce36bfc2ef73ca08..1f7dd927cc5ea4777530a8ba23daae631f258bf9 100644 (file)
@@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                                reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
                        else
                                reg |= PORT_CONTROL_FRAME_MODE_DSA;
+                       reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+                               PORT_CONTROL_FORWARD_UNKNOWN_MC;
                }
 
                if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
index b7a0f7879de2d3ee4d2b419a3b26c5e7661224c6..9e59663a6eadb012de6f4a4474484800401fce3b 100644 (file)
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
 }
 
 /* Flush FLI data fifo. */
-static u32
+static int
 bfa_flash_fifo_flush(void __iomem *pci_bar)
 {
        u32 i;
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
 }
 
 /* Read flash status. */
-static u32
+static int
 bfa_flash_status_read(void __iomem *pci_bar)
 {
        union bfa_flash_dev_status_reg  dev_status;
-       u32                             status;
+       int                             status;
        u32                     ret_status;
        int                             i;
 
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
 }
 
 /* Start flash read operation. */
-static u32
+static int
 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
                     char *buf)
 {
-       u32 status;
+       int status;
 
        /* len must be mutiple of 4 and not exceeding fifo size */
        if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@ static enum bfa_status
 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
                   u32 len)
 {
-       u32 n, status;
+       u32 n;
+       int status;
        u32 off, l, s, residue, fifo_sz;
 
        residue = len;
index cc2d8b4b18e3e2a99ef303b76809545496089787..253f8ed0537a058778fd08b2c5a0747f53d7eca4 100644 (file)
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
        struct net_device *ndev;
        struct hip04_priv *priv;
        struct resource *res;
-       unsigned int irq;
+       int irq;
        int ret;
 
        ndev = alloc_etherdev(sizeof(struct hip04_priv));
index 28df37420da963d5d8f3b3234e4f584442537121..ac02c675c59c4167b4870c892c6afcceb13aec4c 100644 (file)
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
        u32 index;
 };
 
-#define EMAC_ETHTOOL_REGS_VER          0
-#define EMAC4_ETHTOOL_REGS_VER         1
-#define EMAC4SYNC_ETHTOOL_REGS_VER     2
+#define EMAC_ETHTOOL_REGS_VER          3
+#define EMAC4_ETHTOOL_REGS_VER         4
+#define EMAC4SYNC_ETHTOOL_REGS_VER     5
 
 #endif /* __IBM_NEWEMAC_CORE_H */
index 3e0d20037675e84164ae9da793ddc080737e1d81..62488a67149d1f0ac38279517427853ebbcec6fc 100644 (file)
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
        /* take the lock before we start messing with the ring */
        mutex_lock(&hw->aq.arq_mutex);
 
+       if (hw->aq.arq.count == 0) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQRX: Admin queue not initialized.\n");
+               ret_code = I40E_ERR_QUEUE_EMPTY;
+               goto clean_arq_element_err;
+       }
+
        /* set next_to_use to head */
        ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
        if (ntu == ntc) {
@@ -1007,6 +1014,8 @@ clean_arq_element_out:
        /* Set pending if needed, unlock and return */
        if (pending != NULL)
                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
        mutex_unlock(&hw->aq.arq_mutex);
 
        if (i40e_is_nvm_update_op(&e->desc)) {
index 851c1a159be8a1566d8a792c52bf4e83ea6877d7..2fdf978ae6a5d10a6751f3ec702ef3636fca543f 100644 (file)
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
                rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
-       rx_ctx.showiv = 1;
+       /* this controls whether VLAN is stripped from inner headers */
+       rx_ctx.showiv = 0;
 #ifdef I40E_FCOE
        rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
 #endif
index f08450b907745afefcdd6da766f7a32ae3aa57c2..929d47152bf271b1ddba3d8dc284d1941131261a 100644 (file)
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
        /* take the lock before we start messing with the ring */
        mutex_lock(&hw->aq.arq_mutex);
 
+       if (hw->aq.arq.count == 0) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQRX: Admin queue not initialized.\n");
+               ret_code = I40E_ERR_QUEUE_EMPTY;
+               goto clean_arq_element_err;
+       }
+
        /* set next_to_use to head */
        ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
        if (ntu == ntc) {
@@ -948,6 +955,8 @@ clean_arq_element_out:
        /* Set pending if needed, unlock and return */
        if (pending != NULL)
                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
        mutex_unlock(&hw->aq.arq_mutex);
 
        return ret_code;
index bd9ea0d01aae4cba296d2eba5e9f7a865b5a0426..1d4e2e054647ae3da57bfad23f4460f622ca85b0 100644 (file)
@@ -1184,10 +1184,11 @@ out:
        if (prot == MLX4_PROT_ETH) {
                /* manage the steering entry for promisc mode */
                if (new_entry)
-                       new_steering_entry(dev, port, steer, index, qp->qpn);
+                       err = new_steering_entry(dev, port, steer,
+                                                index, qp->qpn);
                else
-                       existing_steering_entry(dev, port, steer,
-                                               index, qp->qpn);
+                       err = existing_steering_entry(dev, port, steer,
+                                                     index, qp->qpn);
        }
        if (err && link && index != -1) {
                if (index < dev->caps.num_mgms)
index aa0d5ffe92d8177234c1975d958a76751a9539c6..9335e5ae18ccee954b4cc08eff41a01871b41b2e 100644 (file)
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
 
        return err;
 }
-
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
-{
-       struct mlx5_cmd_query_special_contexts_mbox_in in;
-       struct mlx5_cmd_query_special_contexts_mbox_out out;
-       int err;
-
-       memset(&in, 0, sizeof(in));
-       memset(&out, 0, sizeof(out));
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-       if (err)
-               return err;
-
-       if (out.hdr.status)
-               err = mlx5_cmd_status_to_err(&out.hdr);
-
-       *rsvd_lkey = be32_to_cpu(out.resd_lkey);
-
-       return err;
-}
-EXPORT_SYMBOL(mlx5_core_query_special_context);
index 2b32e0c5a0b46bcdb4e50d1931c676f2f65503d0..b4f21232019a98c7e0afd9e5a43a5a160da765fe 100644 (file)
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
-       u16 rg_saw_cnt;
+       int rg_saw_cnt;
        u32 data;
        static const struct ephy_info e_info_8168h_1[] = {
                { 0x1e, 0x0800, 0x0001 },
index dd652f2ae03db964ed539c5d369092173ab9ab33..108a3118ace7fbd107a2916aa29066cc326c0b6c 100644 (file)
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi)
         * Unbound PCI devices are always put in D0, regardless of
         * runtime PM status.  During probe, the device is set to
         * active and the usage count is incremented.  If the driver
-        * supports runtime PM, it should call pm_runtime_put_noidle()
-        * in its probe routine and pm_runtime_get_noresume() in its
-        * remove routine.
+        * supports runtime PM, it should call pm_runtime_put_noidle(),
+        * or any other runtime PM helper function decrementing the usage
+        * count, in its probe routine and pm_runtime_get_noresume() in
+        * its remove routine.
         */
        pm_runtime_get_sync(dev);
        pci_dev->driver = pci_drv;
index d9de36ee165de119904936c7ba5bec20408ad447..04e2653bb8c02cd9aede61bca14c9638da62c432 100644 (file)
@@ -5,7 +5,7 @@
 menu "Performance monitor support"
 
 config ARM_PMU
-       depends on PERF_EVENTS && ARM
+       depends on PERF_EVENTS && (ARM || ARM64)
        bool "ARM PMU framework"
        default y
        help
index cbfc5990052b6b2733ae1c8a81467d3a0e9e70f4..126a48c6431e5a5d9798aed3472916b06ef476c8 100644 (file)
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
 static void scsi_mq_done(struct scsi_cmnd *cmd)
 {
        trace_scsi_dispatch_cmd_done(cmd);
-       blk_mq_complete_request(cmd->request);
+       blk_mq_complete_request(cmd->request, cmd->request->errors);
 }
 
 static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
index 7ff96270c933b1ad577ee3dbb9cedf92d8f07a89..e570ff084add5b0596cbac05d07bca560d350b01 100644 (file)
@@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
                switch_on_temp = 0;
 
        temperature_threshold = control_temp - switch_on_temp;
+       /*
+        * estimate_pid_constants() tries to find appropriate default
+        * values for thermal zones that don't provide them. If a
+        * system integrator has configured a thermal zone with two
+        * passive trip points at the same temperature, that person
+        * hasn't put any effort to set up the thermal zone properly
+        * so just give up.
+        */
+       if (!temperature_threshold)
+               return;
 
        if (!tz->tzp->k_po || force)
                tz->tzp->k_po = int_to_frac(sustainable_power) /
index c68edc16aa54c5e65347588e32c883ffdaf71f63..79e1aa1b0959f1ed8b2e2404fc3a044683c68f0d 100644 (file)
@@ -817,8 +817,9 @@ config ITCO_WDT
        tristate "Intel TCO Timer/Watchdog"
        depends on (X86 || IA64) && PCI
        select WATCHDOG_CORE
+       depends on I2C || I2C=n
        select LPC_ICH if !EXPERT
-       select I2C_I801 if !EXPERT
+       select I2C_I801 if !EXPERT && I2C
        ---help---
          Hardware driver for the intel TCO timer based watchdog devices.
          These drivers are included in the Intel 82801 I/O Controller
index 66c3e656a616619e02c8c523f19913e3274459ff..8a5ce5b5a0b6f9cc684382ddfaa6ed96b4dadb7d 100644 (file)
 #define PM_RSTC_WRCFG_FULL_RESET       0x00000020
 #define PM_RSTC_RESET                  0x00000102
 
+/*
+ * The Raspberry Pi firmware uses the RSTS register to know which partiton
+ * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
+ * Partiton 63 is a special partition used by the firmware to indicate halt.
+ */
+#define PM_RSTS_RASPBERRYPI_HALT       0x555
+
 #define SECS_TO_WDOG_TICKS(x) ((x) << 16)
 #define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
 
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void)
         * hard reset.
         */
        val = readl_relaxed(wdt->base + PM_RSTS);
-       val &= PM_RSTC_WRCFG_CLR;
-       val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
+       val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
        writel_relaxed(val, wdt->base + PM_RSTS);
 
        /* Continue with normal reset mechanism */
index cc1bdfc2ff71c31b65dc455c382fb3455815fc2a..006e2348022cbc7015831819fa0f2ae0f689ebfc 100644 (file)
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = {
        },
        {},
 };
+MODULE_DEVICE_TABLE(of, gef_wdt_ids);
 
 static struct platform_driver gef_wdt_driver = {
        .driver = {
index 69013007dc4701826518c0babd6d94d258719892..098fa9c34d6d8232b86ed51c2e2f7bea9a50c982 100644 (file)
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = {
        { .compatible = "men,a021-wdt" },
        { },
 };
+MODULE_DEVICE_TABLE(of, a21_wdt_ids);
 
 static struct platform_driver a21_wdt_driver = {
        .probe = a21_wdt_probe,
index 2789da2c05156d02931779cd21322a24e5f133e2..60b0605bd7e60eb7ed1f8d67f4b01022a70997b3 100644 (file)
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = {
        { .compatible = "moxa,moxart-watchdog" },
        { },
 };
+MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
 
 static struct platform_driver moxart_wdt_driver = {
        .probe      = moxart_wdt_probe,
index 7ae6df7ea1d2d04962ef4554a6a2fa1efb977006..bcfb14bfc1e49eb36d507c53ccd1c3f4f23135f9 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -569,8 +569,20 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
                goto fallback;
 
+       sector = bh.b_blocknr << (blkbits - 9);
+
        if (buffer_unwritten(&bh) || buffer_new(&bh)) {
                int i;
+
+               length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
+                                               bh.b_size);
+               if (length < 0) {
+                       result = VM_FAULT_SIGBUS;
+                       goto out;
+               }
+               if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
+                       goto fallback;
+
                for (i = 0; i < PTRS_PER_PMD; i++)
                        clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
                wmb_pmem();
@@ -623,7 +635,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                result = VM_FAULT_NOPAGE;
                spin_unlock(ptl);
        } else {
-               sector = bh.b_blocknr << (blkbits - 9);
                length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
                                                bh.b_size);
                if (length < 0) {
index 96f3448b6eb40c0682ec915a71d5030d5bdf4283..fd65b3f1923ccdb609ee29601604624abf32333d 100644 (file)
@@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
 {
        int err;
 
-       mutex_lock(&inode->i_mutex);
        err = security_inode_init_security(inode, dentry, qstr,
                                           &init_xattrs, 0);
-       mutex_unlock(&inode->i_mutex);
-
        if (err) {
                struct ubifs_info *c = dentry->i_sb->s_fs_info;
                ubifs_err(c, "cannot initialize security for inode %lu, error %d",
index 94f9ea8abcae35af8ca36560403fbd25facb7c65..011dde083f231e763e4c96fcc7fb3cb9b6ce23c7 100644 (file)
@@ -1,15 +1,10 @@
 #ifndef _ASM_WORD_AT_A_TIME_H
 #define _ASM_WORD_AT_A_TIME_H
 
-/*
- * This says "generic", but it's actually big-endian only.
- * Little-endian can use more efficient versions of these
- * interfaces, see for example
- *      arch/x86/include/asm/word-at-a-time.h
- * for those.
- */
-
 #include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
 
 struct word_at_a_time {
        const unsigned long high_bits, low_bits;
@@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
 #define zero_bytemask(mask) (~1ul << __fls(mask))
 #endif
 
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+       return mask*0x0001020304050608ul >> 56;
+}
+
+#else  /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+       /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+       long a = (0x0ff0001+mask) >> 23;
+       /* Fix the 1 for 00 case */
+       return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+       return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 2a747a91fdede982354438e1c48fc1a332d06885..3febb4b9fce9243793fbaf3e8dbb331fe6adf81d 100644 (file)
@@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
 #endif
index 499e9f625aeffb2f618458b45121a6a6286e6e3e..0212d139a480909a216da244ffd8aadf6effa630 100644 (file)
 #define MODE_I2C_READ  4
 #define MODE_I2C_STOP  8
 
+/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
+#define DP_MST_PHYSICAL_PORT_0 0
+#define DP_MST_LOGICAL_PORT_0 8
+
 #define DP_LINK_STATUS_SIZE       6
 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
index 86d0b25ed0547db2f63606af97d8bf9e966e39db..0f408b002d989ee8bca9489d6c1add673b315a51 100644 (file)
@@ -374,6 +374,7 @@ struct drm_dp_mst_topology_mgr;
 struct drm_dp_mst_topology_cbs {
        /* create a connector for a port */
        struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
+       void (*register_connector)(struct drm_connector *connector);
        void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
                                  struct drm_connector *connector);
        void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
index 7235c4851460e6dc79d6d95a53d368b8f06e3525..43856d19cf4d8abc8942849202485fe76244135d 100644 (file)
@@ -217,6 +217,7 @@ struct pci_dev;
 
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
+bool acpi_isa_irq_available(int irq);
 void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
index 37d1602c4f7aa08b464577c675910046a4db3dde..5e7d43ab61c000d894164e093132f607344e9cc0 100644 (file)
@@ -145,7 +145,6 @@ enum {
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
        BLK_MQ_F_TAG_SHARED     = 1 << 1,
        BLK_MQ_F_SG_MERGE       = 1 << 2,
-       BLK_MQ_F_SYSFS_UP       = 1 << 3,
        BLK_MQ_F_DEFER_ISSUE    = 1 << 4,
        BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
        BLK_MQ_F_ALLOC_POLICY_BITS = 1,
@@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
 void blk_mq_cancel_requeue_work(struct request_queue *q);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request(struct request *rq, int error);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
-               void *priv);
 void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
                void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);
index 99da9ebc73776af0a5efb69a73310f522b952b25..19c2e947d4d127364887a133d4b0d0ce92090e1c 100644 (file)
@@ -456,6 +456,8 @@ struct request_queue {
        struct blk_mq_tag_set   *tag_set;
        struct list_head        tag_set_list;
        struct bio_set          *bio_split;
+
+       bool                    mq_sysfs_init_done;
 };
 
 #define QUEUE_FLAG_QUEUED      1       /* uses generic tag queueing */
index 3920a19d819415bc3109a491171e32388f9bda18..92f7177db2ce869a29db8813911c3a8a0c2b86b2 100644 (file)
@@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
        return iova >> iova_shift(iovad);
 }
 
-int iommu_iova_cache_init(void);
-void iommu_iova_cache_destroy(void);
+int iova_cache_get(void);
+void iova_cache_put(void);
 
 struct iova *alloc_iova_mem(void);
 void free_iova_mem(struct iova *iova);
index ad800e62cb7a603fdd5f7fb1a752edb03417dbb4..6452ff4c463fd8715edc9a5043bc812521a18d6a 100644 (file)
@@ -242,7 +242,6 @@ struct mem_cgroup {
         * percpu counter.
         */
        struct mem_cgroup_stat_cpu __percpu *stat;
-       spinlock_t pcp_counter_lock;
 
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
        struct cg_proto tcp_mem;
index 8eb3b19af2a4bc2ece866e8d07c6243115ce13d4..250b1ff8b48d43c0f9f2479e388b405d8238eb41 100644 (file)
@@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out {
        u8                      rsvd[8];
 };
 
-struct mlx5_cmd_query_special_contexts_mbox_in {
-       struct mlx5_inbox_hdr   hdr;
-       u8                      rsvd[8];
-};
-
-struct mlx5_cmd_query_special_contexts_mbox_out {
-       struct mlx5_outbox_hdr  hdr;
-       __be32                  dump_fill_mkey;
-       __be32                  resd_lkey;
-};
-
 struct mlx5_cmd_layout {
        u8              type;
        u8              rsvd0[3];
index 27b53f9a24ad85a4be3928a470ee4627a20859a6..8b6d6f2154a4eaab1cce3db487b92d8d1b36d4a9 100644 (file)
@@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey);
 
 struct mlx5_profile {
        u64     mask;
index 91c08f6f0dc96dbb7474d3349f62b5d3f723fe80..80001de019ba33d86b90b9922b39722270cb0449 100644 (file)
@@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
 #endif
 }
 
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+       return page->mem_cgroup;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+       page->mem_cgroup = memcg;
+}
+#else
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+       return NULL;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+}
+#endif
+
 /*
  * Some inline functions in vmstat.h depend on page_zone()
  */
index ff476515f7163ab1b0247cfd64c0a4f6a364b2f3..581abf84856691ad2e5ce39f77f9c9112170061c 100644 (file)
@@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
                   struct rcu_synchronize *rs_array);
 
 #define _wait_rcu_gp(checktiny, ...) \
-do { \
-       call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
-       const int __n = ARRAY_SIZE(__crcu_array); \
-       struct rcu_synchronize __rs_array[__n]; \
-       \
-       __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
+do {                                                                   \
+       call_rcu_func_t __crcu_array[] = { __VA_ARGS__ };               \
+       struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)];    \
+       __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array),              \
+                       __crcu_array, __rs_array);                      \
 } while (0)
 
 #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
index 2b0a30a6e31cf780fd8fcf11b7857b45b5c86bee..4398411236f16c3f87691162909dc6197fb62b08 100644 (file)
@@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_COMPLETE)
                skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
        else if (skb->ip_summed == CHECKSUM_PARTIAL &&
-                skb_checksum_start_offset(skb) <= len)
+                skb_checksum_start_offset(skb) < 0)
                skb->ip_summed = CHECKSUM_NONE;
 }
 
index a8d90db9c4b058626b2d5ddcc84f3fa5d16561aa..9ef7795e65e40c5dfbee53726909fbcb2ce341b0 100644 (file)
@@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
 #ifndef __HAVE_ARCH_STRLCPY
 size_t strlcpy(char *, const char *, size_t);
 #endif
+#ifndef __HAVE_ARCH_STRSCPY
+ssize_t __must_check strscpy(char *, const char *, size_t);
+#endif
 #ifndef __HAVE_ARCH_STRCAT
 extern char * strcat(char *, const char *);
 #endif
index 4a167b30a12ff0d127cccab36c07669689223441..cb1b9bbda332116b6e2173b011ff9fd58f83f431 100644 (file)
@@ -63,7 +63,11 @@ struct unix_sock {
 #define UNIX_GC_MAYBE_CYCLE    1
        struct socket_wq        peer_wq;
 };
-#define unix_sk(__sk) ((struct unix_sock *)__sk)
+
+static inline struct unix_sock *unix_sk(struct sock *sk)
+{
+       return (struct unix_sock *)sk;
+}
 
 #define peer_wait peer_wq.wait
 
index df0e09bb7dd5a20f068b6b2a916d6a33a3ccf5ef..9057d7af3ae145ba711c837f4bcbe58e851f2320 100644 (file)
@@ -11,8 +11,6 @@
 
 #include <linux/types.h>
 
-#include <linux/compiler.h>
-
 #define UFFD_API ((__u64)0xAA)
 /*
  * After implementing the respective features it will become:
index 66c4f567eb7368d21ff11377f629c53cc169bc8b..1471db9a7e6112b3316ae887b50c6d8d1352f171 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
                return retval;
        }
 
-       /* ipc_addid() locks msq upon success. */
-       id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
-       if (id < 0) {
-               ipc_rcu_putref(msq, msg_rcu_free);
-               return id;
-       }
-
        msq->q_stime = msq->q_rtime = 0;
        msq->q_ctime = get_seconds();
        msq->q_cbytes = msq->q_qnum = 0;
@@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
        INIT_LIST_HEAD(&msq->q_receivers);
        INIT_LIST_HEAD(&msq->q_senders);
 
+       /* ipc_addid() locks msq upon success. */
+       id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+       if (id < 0) {
+               ipc_rcu_putref(msq, msg_rcu_free);
+               return id;
+       }
+
        ipc_unlock_object(&msq->q_perm);
        rcu_read_unlock();
 
index 222131e8e38f334547004bf0830b26bf808cc6a2..41787276e14170af7de8261181721991fde528bf 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        if (IS_ERR(file))
                goto no_file;
 
-       id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
-       if (id < 0) {
-               error = id;
-               goto no_id;
-       }
-
        shp->shm_cprid = task_tgid_vnr(current);
        shp->shm_lprid = 0;
        shp->shm_atim = shp->shm_dtim = 0;
@@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_nattch = 0;
        shp->shm_file = file;
        shp->shm_creator = current;
+
+       id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+       if (id < 0) {
+               error = id;
+               goto no_id;
+       }
+
        list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
 
        /*
index be4230020a1f718c31b02012554600c710b928b9..0f401d94b7c657d5e7126fe78f149c94ffea8e24 100644 (file)
@@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
        rcu_read_lock();
        spin_lock(&new->lock);
 
+       current_euid_egid(&euid, &egid);
+       new->cuid = new->uid = euid;
+       new->gid = new->cgid = egid;
+
        id = idr_alloc(&ids->ipcs_idr, new,
                       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
                       GFP_NOWAIT);
@@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
 
        ids->in_use++;
 
-       current_euid_egid(&euid, &egid);
-       new->cuid = new->uid = euid;
-       new->gid = new->cgid = egid;
-
        if (next_id < 0) {
                new->seq = ids->seq++;
                if (ids->seq > IPCID_SEQ_MAX)
index f548f69c4299dd1ee44bfdc1f84d79d655d0d6d7..b11756f9b6dcfdf2673b2a396ac0e0de5c980101 100644 (file)
@@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event)
                                              PERF_EVENT_STATE_INACTIVE;
 }
 
-/*
- * Called at perf_event creation and when events are attached/detached from a
- * group.
- */
-static void perf_event__read_size(struct perf_event *event)
+static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
 {
        int entry = sizeof(u64); /* value */
        int size = 0;
@@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event)
                entry += sizeof(u64);
 
        if (event->attr.read_format & PERF_FORMAT_GROUP) {
-               nr += event->group_leader->nr_siblings;
+               nr += nr_siblings;
                size += sizeof(u64);
        }
 
@@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event)
        event->read_size = size;
 }
 
-static void perf_event__header_size(struct perf_event *event)
+static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
 {
        struct perf_sample_data *data;
-       u64 sample_type = event->attr.sample_type;
        u16 size = 0;
 
-       perf_event__read_size(event);
-
        if (sample_type & PERF_SAMPLE_IP)
                size += sizeof(data->ip);
 
@@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event)
        event->header_size = size;
 }
 
+/*
+ * Called at perf_event creation and when events are attached/detached from a
+ * group.
+ */
+static void perf_event__header_size(struct perf_event *event)
+{
+       __perf_event_read_size(event,
+                              event->group_leader->nr_siblings);
+       __perf_event_header_size(event, event->attr.sample_type);
+}
+
 static void perf_event__id_header_size(struct perf_event *event)
 {
        struct perf_sample_data *data;
@@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event)
        event->id_header_size = size;
 }
 
+static bool perf_event_validate_size(struct perf_event *event)
+{
+       /*
+        * The values computed here will be over-written when we actually
+        * attach the event.
+        */
+       __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
+       __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
+       perf_event__id_header_size(event);
+
+       /*
+        * Sum the lot; should not exceed the 64k limit we have on records.
+        * Conservative limit to allow for callchains and other variable fields.
+        */
+       if (event->read_size + event->header_size +
+           event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
+               return false;
+
+       return true;
+}
+
 static void perf_group_attach(struct perf_event *event)
 {
        struct perf_event *group_leader = event->group_leader, *pos;
@@ -8297,13 +8322,35 @@ SYSCALL_DEFINE5(perf_event_open,
 
        if (move_group) {
                gctx = group_leader->ctx;
+               mutex_lock_double(&gctx->mutex, &ctx->mutex);
+       } else {
+               mutex_lock(&ctx->mutex);
+       }
 
+       if (!perf_event_validate_size(event)) {
+               err = -E2BIG;
+               goto err_locked;
+       }
+
+       /*
+        * Must be under the same ctx::mutex as perf_install_in_context(),
+        * because we need to serialize with concurrent event creation.
+        */
+       if (!exclusive_event_installable(event, ctx)) {
+               /* exclusive and group stuff are assumed mutually exclusive */
+               WARN_ON_ONCE(move_group);
+
+               err = -EBUSY;
+               goto err_locked;
+       }
+
+       WARN_ON_ONCE(ctx->parent_ctx);
+
+       if (move_group) {
                /*
                 * See perf_event_ctx_lock() for comments on the details
                 * of swizzling perf_event::ctx.
                 */
-               mutex_lock_double(&gctx->mutex, &ctx->mutex);
-
                perf_remove_from_context(group_leader, false);
 
                list_for_each_entry(sibling, &group_leader->sibling_list,
@@ -8311,13 +8358,7 @@ SYSCALL_DEFINE5(perf_event_open,
                        perf_remove_from_context(sibling, false);
                        put_ctx(gctx);
                }
-       } else {
-               mutex_lock(&ctx->mutex);
-       }
 
-       WARN_ON_ONCE(ctx->parent_ctx);
-
-       if (move_group) {
                /*
                 * Wait for everybody to stop referencing the events through
                 * the old lists, before installing it on new lists.
@@ -8349,22 +8390,29 @@ SYSCALL_DEFINE5(perf_event_open,
                perf_event__state_init(group_leader);
                perf_install_in_context(ctx, group_leader, group_leader->cpu);
                get_ctx(ctx);
-       }
 
-       if (!exclusive_event_installable(event, ctx)) {
-               err = -EBUSY;
-               mutex_unlock(&ctx->mutex);
-               fput(event_file);
-               goto err_context;
+               /*
+                * Now that all events are installed in @ctx, nothing
+                * references @gctx anymore, so drop the last reference we have
+                * on it.
+                */
+               put_ctx(gctx);
        }
 
+       /*
+        * Precalculate sample_data sizes; do while holding ctx::mutex such
+        * that we're serialized against further additions and before
+        * perf_install_in_context() which is the point the event is active and
+        * can use these values.
+        */
+       perf_event__header_size(event);
+       perf_event__id_header_size(event);
+
        perf_install_in_context(ctx, event, event->cpu);
        perf_unpin_context(ctx);
 
-       if (move_group) {
+       if (move_group)
                mutex_unlock(&gctx->mutex);
-               put_ctx(gctx);
-       }
        mutex_unlock(&ctx->mutex);
 
        put_online_cpus();
@@ -8375,12 +8423,6 @@ SYSCALL_DEFINE5(perf_event_open,
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
 
-       /*
-        * Precalculate sample_data sizes
-        */
-       perf_event__header_size(event);
-       perf_event__id_header_size(event);
-
        /*
         * Drop the reference on the group_event after placing the
         * new event on the sibling_list. This ensures destruction
@@ -8391,6 +8433,12 @@ SYSCALL_DEFINE5(perf_event_open,
        fd_install(event_fd, event_file);
        return event_fd;
 
+err_locked:
+       if (move_group)
+               mutex_unlock(&gctx->mutex);
+       mutex_unlock(&ctx->mutex);
+/* err_file: */
+       fput(event_file);
 err_context:
        perf_unpin_context(ctx);
        put_ctx(ctx);
index e3a8c9577ba641c38747a0925a7896eca7ef4d79..a50ddc9417ff5fbdccf837f9f43b716200ceb013 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/mutex.h>
 
 #include "internals.h"
 
@@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
 
 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 {
+       static DEFINE_MUTEX(register_lock);
        char name [MAX_NAMELEN];
 
-       if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
+       if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
                return;
 
+       /*
+        * irq directories are registered only when a handler is
+        * added, not when the descriptor is created, so multiple
+        * tasks might try to register at the same time.
+        */
+       mutex_lock(&register_lock);
+
+       if (desc->dir)
+               goto out_unlock;
+
        memset(name, 0, MAX_NAMELEN);
        sprintf(name, "%d", irq);
 
        /* create /proc/irq/1234 */
        desc->dir = proc_mkdir(name, root_irq_dir);
        if (!desc->dir)
-               return;
+               goto out_unlock;
 
 #ifdef CONFIG_SMP
        /* create /proc/irq/<irq>/smp_affinity */
@@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 
        proc_create_data("spurious", 0444, desc->dir,
                         &irq_spurious_proc_fops, (void *)(long)irq);
+
+out_unlock:
+       mutex_unlock(&register_lock);
 }
 
 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
index 8acfbf773e0623f187b8c6677ed48d63b6bd7eef..4e49cc4c9952ca82eff8a2b5e5e61765d48ea96f 100644 (file)
@@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock);
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
                          struct lockdep_map *nest_lock, unsigned long ip,
-                         int references)
+                         int references, int pin_count)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
@@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->waittime_stamp = 0;
        hlock->holdtime_stamp = lockstat_clock();
 #endif
-       hlock->pin_count = 0;
+       hlock->pin_count = pin_count;
 
        if (check && !mark_irqflags(curr, hlock))
                return 0;
@@ -3343,7 +3343,7 @@ found_it:
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
                                hlock->nest_lock, hlock->acquire_ip,
-                               hlock->references))
+                               hlock->references, hlock->pin_count))
                        return 0;
        }
 
@@ -3433,7 +3433,7 @@ found_it:
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
                                hlock->nest_lock, hlock->acquire_ip,
-                               hlock->references))
+                               hlock->references, hlock->pin_count))
                        return 0;
        }
 
@@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        current->lockdep_recursion = 1;
        trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
        __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), nest_lock, ip, 0);
+                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
index 9f75f25cc5d92667c27d70dd1b1a6091b42fbceb..775d36cc00506620829bd2ca14b5da21e4de79e3 100644 (file)
@@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
 static void __init
 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 {
+       static struct lock_class_key rcu_exp_sched_rdp_class;
        unsigned long flags;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rcu_get_root(rsp);
@@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        mutex_init(&rdp->exp_funnel_mutex);
        rcu_boot_init_nocb_percpu_data(rdp);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       if (rsp == &rcu_sched_state)
+               lockdep_set_class_and_name(&rdp->exp_funnel_mutex,
+                                          &rcu_exp_sched_rdp_class,
+                                          "rcu_data_exp_sched");
 }
 
 /*
index 2f9c9288481779c309f31c19dc3b74831ef70b3d..615953141951747dba2715ac32fed23b5d256627 100644 (file)
@@ -4934,7 +4934,15 @@ void init_idle(struct task_struct *idle, int cpu)
        idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
-       do_set_cpus_allowed(idle, cpumask_of(cpu));
+#ifdef CONFIG_SMP
+       /*
+        * Its possible that init_idle() gets called multiple times on a task,
+        * in that case do_set_cpus_allowed() will not do the right thing.
+        *
+        * And since this is boot we can forgo the serialization.
+        */
+       set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif
        /*
         * We're having a chicken and egg problem, even though we are
         * holding rq->lock, the cpu isn't yet set to this cpu so the
@@ -4951,7 +4959,7 @@ void init_idle(struct task_struct *idle, int cpu)
 
        rq->curr = rq->idle = idle;
        idle->on_rq = TASK_ON_RQ_QUEUED;
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        idle->on_cpu = 1;
 #endif
        raw_spin_unlock(&rq->lock);
@@ -4966,7 +4974,7 @@ void init_idle(struct task_struct *idle, int cpu)
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
        vtime_init_idle(idle, cpu);
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
 }
index 841b72f720e88041a99ded8852381555c307fb43..3a38775b50c2243ea83341a9034a0eb4e3a502a3 100644 (file)
@@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data)
                        continue;
 
                /* Check the deviation from the watchdog clocksource. */
-               if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
+               if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
                        pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
                                cs->name);
                        pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
index 13d1e84ddb80e983a325011fb8dd8b6ff9ce0600..8dbb7b1eab508712da538d0ef7cadc0e24b6c723 100644 (file)
 #include <linux/bug.h>
 #include <linux/errno.h>
 
+#include <asm/byteorder.h>
+#include <asm/word-at-a-time.h>
+#include <asm/page.h>
+
 #ifndef __HAVE_ARCH_STRNCASECMP
 /**
  * strncasecmp - Case insensitive, length-limited string comparison
@@ -146,6 +150,90 @@ size_t strlcpy(char *dest, const char *src, size_t size)
 EXPORT_SYMBOL(strlcpy);
 #endif
 
+#ifndef __HAVE_ARCH_STRSCPY
+/**
+ * strscpy - Copy a C-string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @count: Size of destination buffer
+ *
+ * Copy the string, or as much of it as fits, into the dest buffer.
+ * The routine returns the number of characters copied (not including
+ * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough.
+ * The behavior is undefined if the string buffers overlap.
+ * The destination buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * Preferred to strlcpy() since the API doesn't require reading memory
+ * from the src string beyond the specified "count" bytes, and since
+ * the return value is easier to error-check than strlcpy()'s.
+ * In addition, the implementation is robust to the string changing out
+ * from underneath it, unlike the current strlcpy() implementation.
+ *
+ * Preferred to strncpy() since it always returns a valid string, and
+ * doesn't unnecessarily force the tail of the destination buffer to be
+ * zeroed.  If the zeroing is desired, it's likely cleaner to use strscpy()
+ * with an overflow test, then just memset() the tail of the dest buffer.
+ */
+ssize_t strscpy(char *dest, const char *src, size_t count)
+{
+       const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+       size_t max = count;
+       long res = 0;
+
+       if (count == 0)
+               return -E2BIG;
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       /*
+        * If src is unaligned, don't cross a page boundary,
+        * since we don't know if the next page is mapped.
+        */
+       if ((long)src & (sizeof(long) - 1)) {
+               size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1));
+               if (limit < max)
+                       max = limit;
+       }
+#else
+       /* If src or dest is unaligned, don't do word-at-a-time. */
+       if (((long) dest | (long) src) & (sizeof(long) - 1))
+               max = 0;
+#endif
+
+       while (max >= sizeof(unsigned long)) {
+               unsigned long c, data;
+
+               c = *(unsigned long *)(src+res);
+               *(unsigned long *)(dest+res) = c;
+               if (has_zero(c, &data, &constants)) {
+                       data = prep_zero_mask(c, data, &constants);
+                       data = create_zero_mask(data);
+                       return res + find_zero(data);
+               }
+               res += sizeof(unsigned long);
+               count -= sizeof(unsigned long);
+               max -= sizeof(unsigned long);
+       }
+
+       while (count) {
+               char c;
+
+               c = src[res];
+               dest[res] = c;
+               if (!c)
+                       return res;
+               res++;
+               count--;
+       }
+
+       /* Hit buffer length without finding a NUL; force NUL-termination. */
+       if (res)
+               dest[res-1] = '\0';
+
+       return -E2BIG;
+}
+EXPORT_SYMBOL(strscpy);
+#endif
+
 #ifndef __HAVE_ARCH_STRCAT
 /**
  * strcat - Append one %NUL-terminated string to another
index 71a8998cd03a6b8b0d2dbfe36a24ce70766047e8..312a716fa14c2ef0d2780832bc378c05a3d08d16 100644 (file)
@@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
        list_for_each_entry(page, &pool->page_list, page_list) {
                if (dma < page->dma)
                        continue;
-               if (dma < (page->dma + pool->allocation))
+               if ((dma - page->dma) < pool->allocation)
                        return page;
        }
        return NULL;
index 999fb0aef8f16f9a126579e54fca79ad8e4f6487..9cc773483624e4cbb1592ddde74f9c8faa21ef87 100644 (file)
@@ -3201,6 +3201,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                if (iter_vma == vma)
                        continue;
 
+               /*
+                * Shared VMAs have their own reserves and do not affect
+                * MAP_PRIVATE accounting but it is possible that a shared
+                * VMA is using the same page so check and skip such VMAs.
+                */
+               if (iter_vma->vm_flags & VM_MAYSHARE)
+                       continue;
+
                /*
                 * Unmap the page from other VMAs without their own reserves.
                 * They get marked to be SIGKILLed if they fault in these
index 6ddaeba34e097a7553d33b8add26e26a27d3c81a..1fedbde68f595c2b83d5aa84962a667c1ada120a 100644 (file)
@@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
 }
 
 /*
+ * Return page count for single (non recursive) @memcg.
+ *
  * Implementation Note: reading percpu statistics for memcg.
  *
  * Both of vmstat[] and percpu_counter has threshold and do periodic
  * synchronization to implement "quick" read. There are trade-off between
  * reading cost and precision of value. Then, we may have a chance to implement
- * a periodic synchronizion of counter in memcg's counter.
+ * a periodic synchronization of counter in memcg's counter.
  *
  * But this _read() function is used for user interface now. The user accounts
  * memory usage by memory cgroup and he _always_ requires exact value because
@@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
  *
  * If there are kernel internal actions which can make use of some not-exact
  * value, and reading all cpu value can be performance bottleneck in some
- * common workload, threashold and synchonization as vmstat[] should be
+ * common workload, threshold and synchronization as vmstat[] should be
  * implemented.
  */
-static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
-                                enum mem_cgroup_stat_index idx)
+static unsigned long
+mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
 {
        long val = 0;
        int cpu;
 
+       /* Per-cpu values can be negative, use a signed accumulator */
        for_each_possible_cpu(cpu)
                val += per_cpu(memcg->stat->count[idx], cpu);
+       /*
+        * Summing races with updates, so val may be negative.  Avoid exposing
+        * transient negative values.
+        */
+       if (val < 0)
+               val = 0;
        return val;
 }
 
@@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
                for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                        if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                                continue;
-                       pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
+                       pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
                                K(mem_cgroup_read_stat(iter, i)));
                }
 
@@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg,
                               enum mem_cgroup_stat_index idx)
 {
        struct mem_cgroup *iter;
-       long val = 0;
+       unsigned long val = 0;
 
-       /* Per-cpu values can be negative, use a signed accumulator */
        for_each_mem_cgroup_tree(iter, memcg)
                val += mem_cgroup_read_stat(iter, idx);
 
-       if (val < 0) /* race ? */
-               val = 0;
        return val;
 }
 
@@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                        continue;
-               seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
+               seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
                           mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
        }
 
@@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
                           (u64)memsw * PAGE_SIZE);
 
        for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-               long long val = 0;
+               unsigned long long val = 0;
 
                if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                        continue;
                for_each_mem_cgroup_tree(mi, memcg)
                        val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
-               seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
+               seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
        }
 
        for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
@@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (memcg_wb_domain_init(memcg, GFP_KERNEL))
                goto out_free_stat;
 
-       spin_lock_init(&memcg->pcp_counter_lock);
        return memcg;
 
 out_free_stat:
index 7452a00bbb50c134b529c1d024dfc53fcfca093b..842ecd7aaf7fa6ac1371f6137dc155c91851505c 100644 (file)
@@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page,
        if (PageSwapBacked(page))
                SetPageSwapBacked(newpage);
 
+       /*
+        * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
+        * needs newpage's memcg set to transfer memcg dirty page accounting.
+        * So perform memcg migration in two steps:
+        * 1. set newpage->mem_cgroup (here)
+        * 2. clear page->mem_cgroup (below)
+        */
+       set_page_memcg(newpage, page_memcg(page));
+
        mapping = page_mapping(page);
        if (!mapping)
                rc = migrate_page(mapping, newpage, page, mode);
@@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                rc = fallback_migrate_page(mapping, newpage, page, mode);
 
        if (rc != MIGRATEPAGE_SUCCESS) {
+               set_page_memcg(newpage, NULL);
                newpage->mapping = NULL;
        } else {
-               mem_cgroup_migrate(page, newpage, false);
+               set_page_memcg(page, NULL);
                if (page_was_mapped)
                        remove_migration_ptes(page, newpage);
                page->mapping = NULL;
index c77ebe6cc87cd3066f24fd9e3682699448689fa8..4fcc5dd8d5a6c2776ac2f88d9011ac23c78fb769 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
                        size += BYTES_PER_WORD;
        }
 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
-       if (size >= kmalloc_size(INDEX_NODE + 1)
-           && cachep->object_size > cache_line_size()
-           && ALIGN(size, cachep->align) < PAGE_SIZE) {
+       /*
+        * To activate debug pagealloc, off-slab management is necessary
+        * requirement. In early phase of initialization, small sized slab
+        * doesn't get initialized so it would not be possible. So, we need
+        * to check size >= 256. It guarantees that all necessary small
+        * sized slab is initialized in current slab initialization sequence.
+        */
+       if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+               size >= 256 && cachep->object_size > cache_line_size() &&
+               ALIGN(size, cachep->align) < PAGE_SIZE) {
                cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
                size = PAGE_SIZE;
        }
index 805a95a481076dcc229c881322c2e6ef308692bd..830f8a7c1cb173caf3df0c9044bb81f9363338d8 100644 (file)
@@ -31,7 +31,6 @@
 static const char fmt_hex[] = "%#x\n";
 static const char fmt_long_hex[] = "%#lx\n";
 static const char fmt_dec[] = "%d\n";
-static const char fmt_udec[] = "%u\n";
 static const char fmt_ulong[] = "%lu\n";
 static const char fmt_u64[] = "%llu\n";
 
@@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev,
        if (netif_running(netdev)) {
                struct ethtool_cmd cmd;
                if (!__ethtool_get_settings(netdev, &cmd))
-                       ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
+                       ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
        }
        rtnl_unlock();
        return ret;
index dad4dd37e2aaad17b9493cb67796ed9650515a84..fab4599ba8b261dc43977af8349a336edc4d2799 100644 (file)
@@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags);
  */
 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
 {
+       unsigned char *data = skb->data;
+
        BUG_ON(len > skb->len);
-       skb->len -= len;
-       BUG_ON(skb->len < skb->data_len);
-       skb_postpull_rcsum(skb, skb->data, len);
-       return skb->data += len;
+       __skb_pull(skb, len);
+       skb_postpull_rcsum(skb, data, len);
+       return skb->data;
 }
 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
 
index cce97385f7436445f22c17605b5ee4da48c80cac..7d91f4612ac07406cfffd90defb6aa1d7436cb36 100644 (file)
@@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
 static int dsa_slave_port_attr_set(struct net_device *dev,
                                   struct switchdev_attr *attr)
 {
-       int ret = 0;
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       int ret;
 
        switch (attr->id) {
        case SWITCHDEV_ATTR_PORT_STP_STATE:
-               if (attr->trans == SWITCHDEV_TRANS_COMMIT)
-                       ret = dsa_slave_stp_update(dev, attr->u.stp_state);
+               if (attr->trans == SWITCHDEV_TRANS_PREPARE)
+                       ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
+               else
+                       ret = ds->drv->port_stp_update(ds, p->port,
+                                                      attr->u.stp_state);
                break;
        default:
                ret = -EOPNOTSUPP;
index 6fcbd215cdbc501fe6541054208d68d965b5286e..690bcbc59f26d1add82e9ebaa82e2c072b84ca47 100644 (file)
@@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        fl4.flowi4_tos = tos;
        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
        fl4.flowi4_tun_key.tun_id = 0;
+       fl4.flowi4_flags = 0;
 
        no_addr = idev->ifa_list == NULL;
 
index c6ad99ad0ffb713500646af1bfc40bbbb12880f4..c81deb85acb4da84480f2f54555f4d7cdcbbb209 100644 (file)
@@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        fl4.flowi4_mark = skb->mark;
        fl4.flowi4_tos = tos;
        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       fl4.flowi4_flags = 0;
        fl4.daddr = daddr;
        fl4.saddr = saddr;
        err = fib_lookup(net, &fl4, &res, 0);
index f204089e854cfbf80229cd82480aaaf57249adef..cb32ce250db0c0fd8cdc19a066f3956a893f2fba 100644 (file)
@@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
 
        fl6->flowi6_iif = LOOPBACK_IFINDEX;
 
-       if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
+       if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
+           fl6->flowi6_oif)
                flags |= RT6_LOOKUP_F_IFACE;
 
        if (!ipv6_addr_any(&fl6->saddr))
index f6b090df3930d32dc1fda0c8069ad1f7b3246d41..afca2eb4dfa777c75288dfb6fce9636b309a2ebc 100644 (file)
@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
        tunnel = container_of(work, struct l2tp_tunnel, del_work);
        sk = l2tp_tunnel_sock_lookup(tunnel);
        if (!sk)
-               return;
+               goto out;
 
        sock = sk->sk_socket;
 
@@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
        }
 
        l2tp_tunnel_sock_put(sk);
+out:
+       l2tp_tunnel_dec_refcount(tunnel);
 }
 
 /* Create a socket for the tunnel, if one isn't set up by
@@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
+       l2tp_tunnel_inc_refcount(tunnel);
        l2tp_tunnel_closeall(tunnel);
-       return (false == queue_work(l2tp_wq, &tunnel->del_work));
+       if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+               l2tp_tunnel_dec_refcount(tunnel);
+               return 1;
+       }
+       return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
 
index 197c3f59ecbf1d7975a987e57c13023ac9e2b357..b00f1f9611d64a7f46fdd37460d9c5ec9711f37f 100644 (file)
@@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc,
  *   within this document.
  *
  * Our basic strategy is to round-robin transports in priorities
- * according to sctp_state_prio_map[] e.g., if no such
+ * according to sctp_trans_score() e.g., if no such
  * transport with state SCTP_ACTIVE exists, round-robin through
  * SCTP_UNKNOWN, etc. You get the picture.
  */
-static const u8 sctp_trans_state_to_prio_map[] = {
-       [SCTP_ACTIVE]   = 3,    /* best case */
-       [SCTP_UNKNOWN]  = 2,
-       [SCTP_PF]       = 1,
-       [SCTP_INACTIVE] = 0,    /* worst case */
-};
-
 static u8 sctp_trans_score(const struct sctp_transport *trans)
 {
-       return sctp_trans_state_to_prio_map[trans->state];
+       switch (trans->state) {
+       case SCTP_ACTIVE:
+               return 3;       /* best case */
+       case SCTP_UNKNOWN:
+               return 2;
+       case SCTP_PF:
+               return 1;
+       default: /* case SCTP_INACTIVE */
+               return 0;       /* worst case */
+       }
 }
 
 static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
index 35df1266bf073aa9a7a4145da787ff42e95a7ee1..6098d4c42fa91287d3cde36ac05d860f76d4fe32 100644 (file)
@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
        int error;
        struct sctp_transport *transport = (struct sctp_transport *) peer;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
        /* Check whether a task is in the sock.  */
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                           transport, GFP_ATOMIC);
 
        if (error)
-               asoc->base.sk->sk_err = -error;
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_transport_put(transport);
 }
 
@@ -285,11 +286,12 @@ out_unlock:
 static void sctp_generate_timeout_event(struct sctp_association *asoc,
                                        sctp_event_timeout_t timeout_type)
 {
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
        int error = 0;
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy: timer %d\n", __func__,
                         timeout_type);
 
@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
                           (void *)timeout_type, GFP_ATOMIC);
 
        if (error)
-               asoc->base.sk->sk_err = -error;
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_association_put(asoc);
 }
 
@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
        int error = 0;
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
                           asoc->state, asoc->ep, asoc,
                           transport, GFP_ATOMIC);
 
-        if (error)
-                asoc->base.sk->sk_err = -error;
+       if (error)
+               sk->sk_err = -error;
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_transport_put(transport);
 }
 
@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
 {
        struct sctp_transport *transport = (struct sctp_transport *) data;
        struct sctp_association *asoc = transport->asoc;
-       struct net *net = sock_net(asoc->base.sk);
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
 
-       bh_lock_sock(asoc->base.sk);
-       if (sock_owned_by_user(asoc->base.sk)) {
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
                pr_debug("%s: sock is busy\n", __func__);
 
                /* Try again later.  */
@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
                   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
 
 out_unlock:
-       bh_unlock_sock(asoc->base.sk);
+       bh_unlock_sock(sk);
        sctp_association_put(asoc);
 }
 
index cb25c89da6239154475d6c31736e328d13f19134..f1e8dafbd5079b3406a769ba4854ecba229edca6 100644 (file)
@@ -39,25 +39,6 @@ static int
 fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
            struct rpcrdma_create_data_internal *cdata)
 {
-       struct ib_device_attr *devattr = &ia->ri_devattr;
-       struct ib_mr *mr;
-
-       /* Obtain an lkey to use for the regbufs, which are
-        * protected from remote access.
-        */
-       if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
-               ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-       } else {
-               mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE);
-               if (IS_ERR(mr)) {
-                       pr_err("%s: ib_get_dma_mr for failed with %lX\n",
-                              __func__, PTR_ERR(mr));
-                       return -ENOMEM;
-               }
-               ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
-               ia->ri_dma_mr = mr;
-       }
-
        return 0;
 }
 
index d6653f5d0830378cd08531afb61c0b766ae8b6b9..5318951b3b531ca322f1a0c3639a9079d3599555 100644 (file)
@@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
        struct ib_device_attr *devattr = &ia->ri_devattr;
        int depth, delta;
 
-       /* Obtain an lkey to use for the regbufs, which are
-        * protected from remote access.
-        */
-       ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-
        ia->ri_max_frmr_depth =
                        min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
                              devattr->max_fast_reg_page_list_len);
index 72cf8b15bbb4e331d49f937c58abd85f7dd70862..617b76f22154c41b41cdccd329cbbe9f00a3fabe 100644 (file)
@@ -23,7 +23,6 @@ static int
 physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
                 struct rpcrdma_create_data_internal *cdata)
 {
-       struct ib_device_attr *devattr = &ia->ri_devattr;
        struct ib_mr *mr;
 
        /* Obtain an rkey to use for RPC data payloads.
@@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
                       __func__, PTR_ERR(mr));
                return -ENOMEM;
        }
-       ia->ri_dma_mr = mr;
-
-       /* Obtain an lkey to use for regbufs.
-        */
-       if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
-               ia->ri_dma_lkey = ia->ri_device->local_dma_lkey;
-       else
-               ia->ri_dma_lkey = ia->ri_dma_mr->lkey;
 
+       ia->ri_dma_mr = mr;
        return 0;
 }
 
index 682996779970c6ccae749c9de566f06a9b205c80..eb081ad05e33bb65a89b4afb499177dff4d2de89 100644 (file)
@@ -1252,7 +1252,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags)
                goto out_free;
 
        iov->length = size;
-       iov->lkey = ia->ri_dma_lkey;
+       iov->lkey = ia->ri_pd->local_dma_lkey;
        rb->rg_size = size;
        rb->rg_owner = NULL;
        return rb;
index 02512221b8bc885dde93b987c561b344f6e96722..c09414e6f91b0428bd7cc5fd6f2b1c67f8830b23 100644 (file)
@@ -65,7 +65,6 @@ struct rpcrdma_ia {
        struct rdma_cm_id       *ri_id;
        struct ib_pd            *ri_pd;
        struct ib_mr            *ri_dma_mr;
-       u32                     ri_dma_lkey;
        struct completion       ri_done;
        int                     ri_async_rc;
        unsigned int            ri_max_frmr_depth;
index 03ee4d359f6a4922397a1a8a36c015a06aae1cac..ef31b40ad55000a5fd029d7479b546483cc782b3 100644 (file)
@@ -2179,8 +2179,21 @@ unlock:
                        if (UNIXCB(skb).fp)
                                scm.fp = scm_fp_dup(UNIXCB(skb).fp);
 
-                       sk_peek_offset_fwd(sk, chunk);
+                       if (skip) {
+                               sk_peek_offset_fwd(sk, chunk);
+                               skip -= chunk;
+                       }
 
+                       if (UNIXCB(skb).fp)
+                               break;
+
+                       last = skb;
+                       last_len = skb->len;
+                       unix_state_lock(sk);
+                       skb = skb_peek_next(skb, &sk->sk_receive_queue);
+                       if (skb)
+                               goto again;
+                       unix_state_unlock(sk);
                        break;
                }
        } while (size);
index 9119ac6a82702972d5973c0da0a1e9399d665b8a..c285a3b8a9f1a5d88e4fc229368ac6ad25473f29 100644 (file)
@@ -1,13 +1,13 @@
 /*
  * Here's a sample kernel module showing the use of jprobes to dump
- * the arguments of do_fork().
+ * the arguments of _do_fork().
  *
  * For more information on theory of operation of jprobes, see
  * Documentation/kprobes.txt
  *
  * Build and insert the kernel module as done in the kprobe example.
  * You will see the trace data in /var/log/messages and on the
- * console whenever do_fork() is invoked to create a new process.
+ * console whenever _do_fork() is invoked to create a new process.
  * (Some messages may be suppressed if syslogd is configured to
  * eliminate duplicate messages.)
  */
 #include <linux/kprobes.h>
 
 /*
- * Jumper probe for do_fork.
+ * Jumper probe for _do_fork.
  * Mirror principle enables access to arguments of the probed routine
  * from the probe handler.
  */
 
-/* Proxy routine having the same arguments as actual do_fork() routine */
-static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
+/* Proxy routine having the same arguments as actual _do_fork() routine */
+static long j_do_fork(unsigned long clone_flags, unsigned long stack_start,
              unsigned long stack_size, int __user *parent_tidptr,
              int __user *child_tidptr)
 {
@@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start,
 }
 
 static struct jprobe my_jprobe = {
-       .entry                  = jdo_fork,
+       .entry                  = j_do_fork,
        .kp = {
-               .symbol_name    = "do_fork",
+               .symbol_name    = "_do_fork",
        },
 };
 
index 366db1a9fb65b5662ffdb14564750c60b6f012c7..727eb21c9c5624f2998f321d59db1017339c69a3 100644 (file)
@@ -1,13 +1,13 @@
 /*
  * NOTE: This example is works on x86 and powerpc.
  * Here's a sample kernel module showing the use of kprobes to dump a
- * stack trace and selected registers when do_fork() is called.
+ * stack trace and selected registers when _do_fork() is called.
  *
  * For more information on theory of operation of kprobes, see
  * Documentation/kprobes.txt
  *
  * You will see the trace data in /var/log/messages and on the console
- * whenever do_fork() is invoked to create a new process.
+ * whenever _do_fork() is invoked to create a new process.
  */
 
 #include <linux/kernel.h>
@@ -16,7 +16,7 @@
 
 /* For each probe you need to allocate a kprobe structure */
 static struct kprobe kp = {
-       .symbol_name    = "do_fork",
+       .symbol_name    = "_do_fork",
 };
 
 /* kprobe pre_handler: called just before the probed instruction is executed */
index 1041b6731598137d22752334054cd9054fcdabb6..ebb1d1aed54782f2e4a0126e1de886c63767d056 100644 (file)
@@ -7,7 +7,7 @@
  *
  * usage: insmod kretprobe_example.ko func=<func_name>
  *
- * If no func_name is specified, do_fork is instrumented
+ * If no func_name is specified, _do_fork is instrumented
  *
  * For more information on theory of operation of kretprobes, see
  * Documentation/kprobes.txt
@@ -25,7 +25,7 @@
 #include <linux/limits.h>
 #include <linux/sched.h>
 
-static char func_name[NAME_MAX] = "do_fork";
+static char func_name[NAME_MAX] = "_do_fork";
 module_param_string(func, func_name, NAME_MAX, S_IRUGO);
 MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the"
                        " function's execution time");
index 3f874d24234f6b4c2e40ac6d2bf0390326b8a8bf..37323b0df374b1a89fb256a4077165bc05376421 100644 (file)
@@ -5,10 +5,12 @@ else
        call_threshold := 0
 endif
 
+KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
+
 CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
 
 CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
-               -fasan-shadow-offset=$(CONFIG_KASAN_SHADOW_OFFSET) \
+               -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET) \
                --param asan-stack=1 --param asan-globals=1 \
                --param asan-instrumentation-with-call-threshold=$(call_threshold))
 
index 6ce5945a0b892e79f2e98e645615cbc37b79f4d1..b071bf476fea7ede6aab95730f5844ac1bf5caa5 100644 (file)
 #include <stdint.h>
 #include <stdbool.h>
 #include <string.h>
-#include <getopt.h>
 #include <err.h>
-#include <arpa/inet.h>
 #include <openssl/bio.h>
-#include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/pkcs7.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
index c3899ca4811cc2e0add94f8f31e31b93be432fff..250a7a6450331ae0805aaf3100da46af66dd4827 100755 (executable)
 #include <getopt.h>
 #include <err.h>
 #include <arpa/inet.h>
+#include <openssl/opensslv.h>
 #include <openssl/bio.h>
 #include <openssl/evp.h>
 #include <openssl/pem.h>
-#include <openssl/cms.h>
 #include <openssl/err.h>
 #include <openssl/engine.h>
 
+/*
+ * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
+ * assume that it's not available and its header file is missing and that we
+ * should use PKCS#7 instead.  Switching to the older PKCS#7 format restricts
+ * the options we have on specifying the X.509 certificate we want.
+ *
+ * Further, older versions of OpenSSL don't support manually adding signers to
+ * the PKCS#7 message so have to accept that we get a certificate included in
+ * the signature message.  Nor do such older versions of OpenSSL support
+ * signing with anything other than SHA1 - so we're stuck with that if such is
+ * the case.
+ */
+#if OPENSSL_VERSION_NUMBER < 0x10000000L
+#define USE_PKCS7
+#endif
+#ifndef USE_PKCS7
+#include <openssl/cms.h>
+#else
+#include <openssl/pkcs7.h>
+#endif
+
 struct module_signature {
        uint8_t         algo;           /* Public-key crypto algorithm [0] */
        uint8_t         hash;           /* Digest algorithm [0] */
@@ -110,30 +131,42 @@ int main(int argc, char **argv)
        struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 };
        char *hash_algo = NULL;
        char *private_key_name, *x509_name, *module_name, *dest_name;
-       bool save_cms = false, replace_orig;
+       bool save_sig = false, replace_orig;
        bool sign_only = false;
        unsigned char buf[4096];
-       unsigned long module_size, cms_size;
-       unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR;
+       unsigned long module_size, sig_size;
+       unsigned int use_signed_attrs;
        const EVP_MD *digest_algo;
        EVP_PKEY *private_key;
+#ifndef USE_PKCS7
        CMS_ContentInfo *cms;
+       unsigned int use_keyid = 0;
+#else
+       PKCS7 *pkcs7;
+#endif
        X509 *x509;
        BIO *b, *bd = NULL, *bm;
        int opt, n;
-
        OpenSSL_add_all_algorithms();
        ERR_load_crypto_strings();
        ERR_clear_error();
 
        key_pass = getenv("KBUILD_SIGN_PIN");
 
+#ifndef USE_PKCS7
+       use_signed_attrs = CMS_NOATTR;
+#else
+       use_signed_attrs = PKCS7_NOATTR;
+#endif
+
        do {
                opt = getopt(argc, argv, "dpk");
                switch (opt) {
-               case 'p': save_cms = true; break;
-               case 'd': sign_only = true; save_cms = true; break;
+               case 'p': save_sig = true; break;
+               case 'd': sign_only = true; save_sig = true; break;
+#ifndef USE_PKCS7
                case 'k': use_keyid = CMS_USE_KEYID; break;
+#endif
                case -1: break;
                default: format();
                }
@@ -157,6 +190,14 @@ int main(int argc, char **argv)
                replace_orig = true;
        }
 
+#ifdef USE_PKCS7
+       if (strcmp(hash_algo, "sha1") != 0) {
+               fprintf(stderr, "sign-file: %s only supports SHA1 signing\n",
+                       OPENSSL_VERSION_TEXT);
+               exit(3);
+       }
+#endif
+
        /* Read the private key and the X.509 cert the PKCS#7 message
         * will point to.
         */
@@ -213,7 +254,8 @@ int main(int argc, char **argv)
        bm = BIO_new_file(module_name, "rb");
        ERR(!bm, "%s", module_name);
 
-       /* Load the CMS message from the digest buffer. */
+#ifndef USE_PKCS7
+       /* Load the signature message from the digest buffer. */
        cms = CMS_sign(NULL, NULL, NULL, NULL,
                       CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM);
        ERR(!cms, "CMS_sign");
@@ -221,17 +263,31 @@ int main(int argc, char **argv)
        ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo,
                             CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP |
                             use_keyid | use_signed_attrs),
-           "CMS_sign_add_signer");
+           "CMS_add1_signer");
        ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0,
            "CMS_final");
 
-       if (save_cms) {
-               char *cms_name;
+#else
+       pkcs7 = PKCS7_sign(x509, private_key, NULL, bm,
+                          PKCS7_NOCERTS | PKCS7_BINARY |
+                          PKCS7_DETACHED | use_signed_attrs);
+       ERR(!pkcs7, "PKCS7_sign");
+#endif
 
-               ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf");
-               b = BIO_new_file(cms_name, "wb");
-               ERR(!b, "%s", cms_name);
-               ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name);
+       if (save_sig) {
+               char *sig_file_name;
+
+               ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0,
+                   "asprintf");
+               b = BIO_new_file(sig_file_name, "wb");
+               ERR(!b, "%s", sig_file_name);
+#ifndef USE_PKCS7
+               ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0,
+                   "%s", sig_file_name);
+#else
+               ERR(i2d_PKCS7_bio(b, pkcs7) < 0,
+                       "%s", sig_file_name);
+#endif
                BIO_free(b);
        }
 
@@ -247,9 +303,13 @@ int main(int argc, char **argv)
        ERR(n < 0, "%s", module_name);
        module_size = BIO_number_written(bd);
 
+#ifndef USE_PKCS7
        ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name);
-       cms_size = BIO_number_written(bd) - module_size;
-       sig_info.sig_len = htonl(cms_size);
+#else
+       ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name);
+#endif
+       sig_size = BIO_number_written(bd) - module_size;
+       sig_info.sig_len = htonl(sig_size);
        ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name);
        ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name);
 
index c7952375ac5325cfb4c403fa1020671b5f31a150..39eac1fd5706c6370df8b18fe5ac42e85e1b2ed3 100644 (file)
@@ -134,6 +134,10 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                kdebug("- %u", key->serial);
                key_check(key);
 
+               /* Throw away the key data */
+               if (key->type->destroy)
+                       key->type->destroy(key);
+
                security_key_free(key);
 
                /* deal with the user's key tracking and quota */
@@ -148,10 +152,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               /* now throw away the key memory */
-               if (key->type->destroy)
-                       key->type->destroy(key);
-
                key_user_put(key->user);
 
                kfree(key->description);
index 2975632d51e2341e7e1a60286e0fa822cbec0279..c8fe6d17711915e50978b3b2cbba6804af5cf9fe 100644 (file)
@@ -41,6 +41,7 @@ FEATURE_TESTS ?=                      \
        libelf-getphdrnum               \
        libelf-mmap                     \
        libnuma                         \
+       numa_num_possible_cpus          \
        libperl                         \
        libpython                       \
        libpython-version               \
@@ -51,7 +52,8 @@ FEATURE_TESTS ?=                      \
        timerfd                         \
        libdw-dwarf-unwind              \
        zlib                            \
-       lzma
+       lzma                            \
+       get_cpuid
 
 FEATURE_DISPLAY ?=                     \
        dwarf                           \
@@ -61,13 +63,15 @@ FEATURE_DISPLAY ?=                  \
        libbfd                          \
        libelf                          \
        libnuma                         \
+       numa_num_possible_cpus          \
        libperl                         \
        libpython                       \
        libslang                        \
        libunwind                       \
        libdw-dwarf-unwind              \
        zlib                            \
-       lzma
+       lzma                            \
+       get_cpuid
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
index 74ca42093d70d72fac5a4b1776237be9873f480a..e43a2971bf5669a8cae7722e8d5058fb9ada56ea 100644 (file)
@@ -19,6 +19,7 @@ FILES=                                        \
        test-libelf-getphdrnum.bin      \
        test-libelf-mmap.bin            \
        test-libnuma.bin                \
+       test-numa_num_possible_cpus.bin \
        test-libperl.bin                \
        test-libpython.bin              \
        test-libpython-version.bin      \
@@ -34,7 +35,8 @@ FILES=                                        \
        test-compile-x32.bin            \
        test-zlib.bin                   \
        test-lzma.bin                   \
-       test-bpf.bin
+       test-bpf.bin                    \
+       test-get_cpuid.bin
 
 CC := $(CROSS_COMPILE)gcc -MD
 PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin:
 test-libnuma.bin:
        $(BUILD) -lnuma
 
+test-numa_num_possible_cpus.bin:
+       $(BUILD) -lnuma
+
 test-libunwind.bin:
        $(BUILD) -lelf
 
@@ -162,6 +167,9 @@ test-zlib.bin:
 test-lzma.bin:
        $(BUILD) -llzma
 
+test-get_cpuid.bin:
+       $(BUILD)
+
 test-bpf.bin:
        $(BUILD)
 
index 84689a67814a9a622bd3f12b69fe5a8f5733a499..33cf6f20bd4ec6812ac6b53cfe1b301053d8c940 100644 (file)
 # include "test-libnuma.c"
 #undef main
 
+#define main main_test_numa_num_possible_cpus
+# include "test-numa_num_possible_cpus.c"
+#undef main
+
 #define main main_test_timerfd
 # include "test-timerfd.c"
 #undef main
 # include "test-lzma.c"
 #undef main
 
+#define main main_test_get_cpuid
+# include "test-get_cpuid.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -136,6 +144,7 @@ int main(int argc, char *argv[])
        main_test_libbfd();
        main_test_backtrace();
        main_test_libnuma();
+       main_test_numa_num_possible_cpus();
        main_test_timerfd();
        main_test_stackprotector_all();
        main_test_libdw_dwarf_unwind();
@@ -143,6 +152,7 @@ int main(int argc, char *argv[])
        main_test_zlib();
        main_test_pthread_attr_setaffinity_np();
        main_test_lzma();
+       main_test_get_cpuid();
 
        return 0;
 }
diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c
new file mode 100644 (file)
index 0000000..d7a2c40
--- /dev/null
@@ -0,0 +1,7 @@
+#include <cpuid.h>
+
+int main(void)
+{
+       unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
+       return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
+}
diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c
new file mode 100644 (file)
index 0000000..2606e94
--- /dev/null
@@ -0,0 +1,6 @@
+#include <numa.h>
+
+int main(void)
+{
+       return numa_num_possible_cpus();
+}
index 4d885934b9190e9dc995d6f5fb80b3a90ef218a3..cf42b090477b9795ac1f48dff6c1c7b93bed7f65 100644 (file)
@@ -3795,7 +3795,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
        struct format_field *field;
        struct printk_map *printk;
        long long val, fval;
-       unsigned long addr;
+       unsigned long long addr;
        char *str;
        unsigned char *hex;
        int print;
@@ -3828,13 +3828,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                 */
                if (!(field->flags & FIELD_IS_ARRAY) &&
                    field->size == pevent->long_size) {
-                       addr = *(unsigned long *)(data + field->offset);
+
+                       /* Handle heterogeneous recording and processing
+                        * architectures
+                        *
+                        * CASE I:
+                        * Traces recorded on 32-bit devices (32-bit
+                        * addressing) and processed on 64-bit devices:
+                        * In this case, only 32 bits should be read.
+                        *
+                        * CASE II:
+                        * Traces recorded on 64 bit devices and processed
+                        * on 32-bit devices:
+                        * In this case, 64 bits must be read.
+                        */
+                       addr = (pevent->long_size == 8) ?
+                               *(unsigned long long *)(data + field->offset) :
+                               (unsigned long long)*(unsigned int *)(data + field->offset);
+
                        /* Check if it matches a print format */
                        printk = find_printk(pevent, addr);
                        if (printk)
                                trace_seq_puts(s, printk->printk);
                        else
-                               trace_seq_printf(s, "%lx", addr);
+                               trace_seq_printf(s, "%llx", addr);
                        break;
                }
                str = malloc(len + 1);
index 4a0501d7a3b412337960b07a695a772d2b7c1344..c94c9de3173ee187f87be72c0ffa128885115c89 100644 (file)
@@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc
 
                CYC packets are not requested by default.
 
-no_force_psb   This is a driver option and is not in the IA32_RTIT_CTL MSR.
-
-               It stops the driver resetting the byte count to zero whenever
-               enabling the trace (for example on context switches) which in
-               turn results in no PSB being forced.  However some processors
-               will produce a PSB anyway.
-
-               In any case, there is still a PSB when the trace is enabled for
-               the first time.
-
-               no_force_psb can be used to slightly decrease the trace size but
-               may make it harder for the decoder to recover from errors.
-
-               no_force_psb is not selected by default.
-
 
 new snapshot option
 -------------------
index 827557fc751123bf030363d355b5b801911d6cb1..38a08539f4bfc0405000dfd1ce543053409241a8 100644 (file)
@@ -573,9 +573,14 @@ ifndef NO_LIBNUMA
     msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev);
     NO_LIBNUMA := 1
   else
-    CFLAGS += -DHAVE_LIBNUMA_SUPPORT
-    EXTLIBS += -lnuma
-    $(call detected,CONFIG_NUMA)
+    ifeq ($(feature-numa_num_possible_cpus), 0)
+      msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8);
+      NO_LIBNUMA := 1
+    else
+      CFLAGS += -DHAVE_LIBNUMA_SUPPORT
+      EXTLIBS += -lnuma
+      $(call detected,CONFIG_NUMA)
+    endif
   endif
 endif
 
@@ -621,8 +626,13 @@ ifdef LIBBABELTRACE
 endif
 
 ifndef NO_AUXTRACE
-  $(call detected,CONFIG_AUXTRACE)
-  CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  ifeq ($(feature-get_cpuid), 0)
+    msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc);
+    NO_AUXTRACE := 1
+  else
+    $(call detected,CONFIG_AUXTRACE)
+    CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+  endif
 endif
 
 # Among the variables below, these:
index eb5f18b754028e863e3b72970842cc33d2fb997d..c6f9af78f6f5f9651c6339bd3140221b9b951b6d 100644 (file)
@@ -270,12 +270,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
        int ret = 0;
 
        if (module) {
-               list_for_each_entry(dso, &host_machine->dsos.head, node) {
-                       if (!dso->kernel)
-                               continue;
-                       if (strncmp(dso->short_name + 1, module,
-                                   dso->short_name_len - 2) == 0)
-                               goto found;
+               char module_name[128];
+
+               snprintf(module_name, sizeof(module_name), "[%s]", module);
+               map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name);
+               if (map) {
+                       dso = map->dso;
+                       goto found;
                }
                pr_debug("Failed to find module %s.\n", module);
                return -ENOENT;
index 8a4537ee9bc374166c31d05f6e48b6ca6943a4b3..fc3f7c922f99abf57246e0446b154299a1e6085d 100644 (file)
@@ -1580,7 +1580,10 @@ static int __perf_session__process_events(struct perf_session *session,
        file_offset = page_offset;
        head = data_offset - page_offset;
 
-       if (data_size && (data_offset + data_size < file_size))
+       if (data_size == 0)
+               goto out;
+
+       if (data_offset + data_size < file_size)
                file_size = data_offset + data_size;
 
        ui_progress__init(&prog, file_size, "Processing events...");
index 415c359de4654be8f68a30effa530dd06696b52b..2d065d065b676232eecbe8ea94348f4cf3a77f33 100644 (file)
@@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter)
                memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
 }
 
-static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
+static int check_per_pkg(struct perf_evsel *counter,
+                        struct perf_counts_values *vals, int cpu, bool *skip)
 {
        unsigned long *mask = counter->per_pkg_mask;
        struct cpu_map *cpus = perf_evsel__cpus(counter);
@@ -218,6 +219,17 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
                counter->per_pkg_mask = mask;
        }
 
+       /*
+        * we do not consider an event that has not run as a good
+        * instance to mark a package as used (skip=1). Otherwise
+        * we may run into a situation where the first CPU in a package
+        * is not running anything, yet the second is, and this function
+        * would mark the package as used after the first CPU and would
+        * not read the values from the second CPU.
+        */
+       if (!(vals->run && vals->ena))
+               return 0;
+
        s = cpu_map__get_socket(cpus, cpu);
        if (s < 0)
                return -1;
@@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
        static struct perf_counts_values zero;
        bool skip = false;
 
-       if (check_per_pkg(evsel, cpu, &skip)) {
+       if (check_per_pkg(evsel, count, cpu, &skip)) {
                pr_err("failed to read per-pkg counter\n");
                return -1;
        }
index 53bb5f59ec589c22f7b1cd211cb132a3ca98d702..475d88d0a1c9a772323b3218cfcf5f5900c0e809 100644 (file)
@@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v,
 #endif
 
 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
-int elf_getphdrnum(Elf *elf, size_t *dst)
+static int elf_getphdrnum(Elf *elf, size_t *dst)
 {
        GElf_Ehdr gehdr;
        GElf_Ehdr *ehdr;
@@ -1271,8 +1271,6 @@ out_close:
 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
                       bool temp)
 {
-       GElf_Ehdr *ehdr;
-
        kcore->elfclass = elfclass;
 
        if (temp)
@@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
        if (!gelf_newehdr(kcore->elf, elfclass))
                goto out_end;
 
-       ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
-       if (!ehdr)
-               goto out_end;
+       memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
 
        return 0;
 
@@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
                           u64 addr, u64 len)
 {
-       GElf_Phdr gphdr;
-       GElf_Phdr *phdr;
-
-       phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
-       if (!phdr)
-               return -1;
-
-       phdr->p_type    = PT_LOAD;
-       phdr->p_flags   = PF_R | PF_W | PF_X;
-       phdr->p_offset  = offset;
-       phdr->p_vaddr   = addr;
-       phdr->p_paddr   = 0;
-       phdr->p_filesz  = len;
-       phdr->p_memsz   = len;
-       phdr->p_align   = page_size;
-
-       if (!gelf_update_phdr(kcore->elf, idx, phdr))
+       GElf_Phdr phdr = {
+               .p_type         = PT_LOAD,
+               .p_flags        = PF_R | PF_W | PF_X,
+               .p_offset       = offset,
+               .p_vaddr        = addr,
+               .p_paddr        = 0,
+               .p_filesz       = len,
+               .p_memsz        = len,
+               .p_align        = page_size,
+       };
+
+       if (!gelf_update_phdr(kcore->elf, idx, &phdr))
                return -1;
 
        return 0;
index 7acafb3c5592d60501561986b1812fb6b121f271..c2cd9bf2348b5eb8e68603c32b7a3bf220c41aad 100644 (file)
@@ -709,7 +709,7 @@ bool find_process(const char *name)
 
        dir = opendir(procfs__mountpoint());
        if (!dir)
-               return -1;
+               return false;
 
        /* Walk through the directory. */
        while (ret && (d = readdir(dir)) != NULL) {
index 9655cb49c7cb8eb6427b078e0472451b777b9910..bde0ef1a63df4876d5149c85f4083eb92561dbb4 100644 (file)
@@ -71,8 +71,11 @@ unsigned int extra_msr_offset32;
 unsigned int extra_msr_offset64;
 unsigned int extra_delta_offset32;
 unsigned int extra_delta_offset64;
+unsigned int aperf_mperf_multiplier = 1;
 int do_smi;
 double bclk;
+double base_hz;
+double tsc_tweak = 1.0;
 unsigned int show_pkg;
 unsigned int show_core;
 unsigned int show_cpu;
@@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        /* %Busy */
        if (has_aperf) {
                if (!skip_c0)
-                       outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
+                       outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak);
                else
                        outp += sprintf(outp, "********");
        }
@@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        /* Bzy_MHz */
        if (has_aperf)
                outp += sprintf(outp, "%8.0f",
-                       1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
+                       1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float);
 
        /* TSC_MHz */
        outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
@@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        return -3;
                if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf))
                        return -4;
+               t->aperf = t->aperf * aperf_mperf_multiplier;
+               t->mperf = t->mperf * aperf_mperf_multiplier;
        }
 
        if (do_smi) {
@@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV,
 int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
 
+
+static void
+calculate_tsc_tweak()
+{
+       unsigned long long msr;
+       unsigned int base_ratio;
+
+       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+       base_ratio = (msr >> 8) & 0xFF;
+       base_hz = base_ratio * bclk * 1000000;
+       tsc_tweak = base_hz / tsc_hz;
+}
+
 static void
 dump_nhm_platform_info(void)
 {
@@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model)
 
        switch (model) {
        case 0x3A:      /* IVB */
-       case 0x3E:      /* IVB Xeon */
-
        case 0x3C:      /* HSW */
        case 0x3F:      /* HSX */
        case 0x45:      /* HSW */
@@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model)
        return 0;
 }
 
+unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model)
+{
+       if (is_knl(family, model))
+               return 1024;
+       return 1;
+}
+
 #define SLM_BCLK_FREQS 5
 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
 
@@ -2744,6 +2767,9 @@ void process_cpuid()
                }
        }
 
+       if (has_aperf)
+               aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model);
+
        do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model);
        do_snb_cstates = has_snb_msrs(family, model);
        do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2);
@@ -2762,6 +2788,9 @@ void process_cpuid()
        if (debug)
                dump_cstate_pstate_config_info();
 
+       if (has_skl_msrs(family, model))
+               calculate_tsc_tweak();
+
        return;
 }
 
@@ -3090,7 +3119,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(stderr, "turbostat version 4.7 17-June, 2015"
+       fprintf(stderr, "turbostat version 4.8 26-Sep, 2015"
                " - Len Brown <lenb@kernel.org>\n");
 }