]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'spi-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Apr 2014 20:23:53 +0000 (13:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 1 Apr 2014 20:23:53 +0000 (13:23 -0700)
Pull spi Updates from Mark Brown:
 "A busy release for both cleanups and new drivers this time along with
  further factoring out of replicated code into the core:

   - Provide support in the core for DMA mapping transfers - essentially
     all drivers weren't implementing this properly, now there's no
     excuse.
   - Dual and quad mode support for spidev.
   - Fix handling of cs_change in the generic implementation.
   - Remove the S3C_DMA code from the s3c64xx driver now that all the
     platforms using it have been converted to dmaengine.
   - Lots of improvements to the Renesas SPI controllers.
   - Drivers for Allwinner A10 and A31, Qualcomm QUP and Xylinx xtfpga.
   - Removal of the bitrotted ti-ssp driver"

* tag 'spi-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (199 commits)
  spi: Fix handling of cs_change in core implementation
  spi: bitbang: Make spi_bitbang_stop() return void
  spi: mpc52xx: Convert to use bits_per_word_mask
  spi: omap-100k: Fix memory leak
  spi: dw: Don't call kfree for memory allocated by devm_kzalloc
  spi: fsl-dspi: Fix memory leak
  spi: omap-uwire: add missing iounmap
  spi: clps711x: Convert to use master->max_speed_hz
  spi: clps711x: Enable driver compilation with COMPILE_TEST
  spi: omap-uwire: Remove full duplex check
  spi: Do not require a completion
  spi: topcliff-pch: Transform noisy message to dev_vdbg
  spi: coldfire-qspi: Simplify the code to set register bits for transfer speed
  spi: bcm63xx: Remove unused define for PFX
  spi: efm32: use $vendor,$device scheme for compatible string
  spi: clps711x: Remove <mach/hardware.h> dependency
  spi: topcliff-pch: Properly unregister platform devices on probe() error paths
  spi: fsl-espi: Remove unused bits_per_word variable in fsl_espi_bufs
  spi: altera: Remove the code to get unused platform_data
  spi: fsl-lib: Fix memory leak of pinfo
  ...

1329 files changed:
Documentation/ABI/testing/sysfs-devices-power
Documentation/ABI/testing/sysfs-power
Documentation/RCU/RTFP.txt
Documentation/RCU/checklist.txt
Documentation/arm64/memory.txt
Documentation/cpu-freq/core.txt
Documentation/cpu-freq/cpu-drivers.txt
Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
Documentation/devicetree/bindings/arm/marvell,dove.txt [new file with mode: 0644]
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/ata/apm-xgene.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt
Documentation/devicetree/bindings/interrupt-controller/allwinner,sun67i-sc-nmi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/s2mpa01.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/s2mps11.txt
Documentation/devicetree/bindings/mfd/tps65910.txt
Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/marvell,dove-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/marvell,kirkwood-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/marvell,mvebu-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
Documentation/devicetree/bindings/regulator/gpio-regulator.txt
Documentation/devicetree/bindings/regulator/pfuze100.txt
Documentation/devicetree/bindings/regulator/s5m8767-regulator.txt
Documentation/devicetree/bindings/regulator/ti-abb-regulator.txt
Documentation/devicetree/bindings/timer/allwinner,sun4i-timer.txt
Documentation/devicetree/bindings/timer/ti,keystone-timer.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
Documentation/kernel-per-CPU-kthreads.txt
Documentation/memory-barriers.txt
Documentation/power/pm_qos_interface.txt
Documentation/sysctl/kernel.txt
Documentation/trace/events-power.txt
Documentation/x86/boot.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/Kbuild
arch/alpha/include/asm/cputime.h [deleted file]
arch/arc/include/asm/Kbuild
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/zynq-7000.dtsi
arch/arm/include/asm/Kbuild
arch/arm/include/asm/topology.h
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_twd.c
arch/arm/mach-davinci/da850.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-imx/pm-imx6q.c
arch/arm/mach-mmp/pm-mmp2.c
arch/arm/mach-mmp/pm-pxa910.c
arch/arm/mach-omap1/ams-delta-fiq.c
arch/arm/mach-pxa/viper.c
arch/arm/mach-shmobile/Kconfig
arch/arm/mach-spear/spear1310.c
arch/arm/mach-spear/spear1340.c
arch/arm/mach-u300/Makefile
arch/arm/mach-zynq/Kconfig
arch/arm/mach-zynq/common.c
arch/arm64/Kconfig
arch/arm64/boot/dts/apm-storm.dtsi
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/compat.h
arch/arm64/include/asm/cpufeature.h [new file with mode: 0644]
arch/arm64/include/asm/debug-monitors.h
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/hwcap.h
arch/arm64/include/asm/io.h
arch/arm64/include/asm/irqflags.h
arch/arm64/include/asm/kgdb.h [new file with mode: 0644]
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/psci.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/tlb.h
arch/arm64/include/asm/topology.h [new file with mode: 0644]
arch/arm64/include/asm/uaccess.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/uapi/asm/Kbuild
arch/arm64/include/uapi/asm/perf_regs.h [new file with mode: 0644]
arch/arm64/kernel/Makefile
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/head.S
arch/arm64/kernel/kgdb.c [new file with mode: 0644]
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/perf_regs.c [new file with mode: 0644]
arch/arm64/kernel/process.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/smp_spin_table.c
arch/arm64/kernel/topology.c [new file with mode: 0644]
arch/arm64/kernel/vdso.c
arch/arm64/kvm/hyp-init.S
arch/arm64/mm/cache.S
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/init.c
arch/arm64/mm/proc.S
arch/avr32/include/asm/Kbuild
arch/avr32/include/asm/bugs.h
arch/avr32/include/asm/processor.h
arch/avr32/kernel/cpu.c
arch/avr32/mm/cache.c
arch/blackfin/include/asm/Kbuild
arch/blackfin/include/asm/irq.h
arch/c6x/include/asm/Kbuild
arch/cris/include/asm/Kbuild
arch/cris/include/asm/cputime.h [deleted file]
arch/frv/include/asm/Kbuild
arch/frv/include/asm/cputime.h [deleted file]
arch/hexagon/include/asm/Kbuild
arch/ia64/configs/generic_defconfig
arch/ia64/configs/tiger_defconfig
arch/ia64/configs/zx1_defconfig
arch/ia64/hp/common/sba_iommu.c
arch/ia64/include/asm/Kbuild
arch/ia64/include/asm/topology.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/efi.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/mca.c
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/time.c
arch/ia64/sn/kernel/irq.c
arch/ia64/sn/kernel/msi_sn.c
arch/m32r/include/asm/Kbuild
arch/m32r/include/asm/cputime.h [deleted file]
arch/m68k/Kconfig
arch/m68k/amiga/cia.c
arch/m68k/atari/ataints.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/kernel/head.S
arch/m68k/kernel/ints.c
arch/metag/include/asm/Kbuild
arch/microblaze/include/asm/Kbuild
arch/microblaze/include/asm/cputime.h [deleted file]
arch/mips/include/asm/Kbuild
arch/mips/include/asm/topology.h
arch/mips/include/asm/unistd.h
arch/mips/kernel/smtc.c
arch/mips/sgi-ip22/ip22-int.c
arch/mips/sgi-ip22/ip22-time.c
arch/mips/sibyte/bcm1480/irq.c
arch/mips/sibyte/bcm1480/smp.c
arch/mips/sibyte/sb1250/irq.c
arch/mips/sibyte/sb1250/smp.c
arch/mn10300/include/asm/Kbuild
arch/mn10300/include/asm/cputime.h [deleted file]
arch/mn10300/kernel/cevt-mn10300.c
arch/mn10300/kernel/mn10300-serial.c
arch/mn10300/kernel/mn10300-watchdog.c
arch/mn10300/kernel/smp.c
arch/mn10300/unit-asb2364/irq-fpga.c
arch/openrisc/include/asm/Kbuild
arch/parisc/include/asm/Kbuild
arch/parisc/kernel/irq.c
arch/powerpc/include/asm/Kbuild
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/irq.c
arch/powerpc/oprofile/op_model_cell.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/ehv_pic.c
arch/s390/Kconfig
arch/s390/appldata/appldata_os.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/hypfs/hypfs_vm.c
arch/s390/include/asm/Kbuild
arch/s390/include/asm/airq.h
arch/s390/include/asm/bitops.h
arch/s390/include/asm/ccwdev.h
arch/s390/include/asm/checksum.h
arch/s390/include/asm/compat.h
arch/s390/include/asm/futex.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/ptrace.h
arch/s390/include/asm/sclp.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/thread_info.h
arch/s390/include/asm/uaccess.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/kernel/Makefile
arch/s390/kernel/compat_exec_domain.c [deleted file]
arch/s390/kernel/compat_linux.c
arch/s390/kernel/compat_linux.h
arch/s390/kernel/compat_signal.c
arch/s390/kernel/compat_wrapper.S [deleted file]
arch/s390/kernel/compat_wrapper.c [new file with mode: 0644]
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry.h
arch/s390/kernel/entry64.S
arch/s390/kernel/irq.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/topology.c
arch/s390/kvm/diag.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/lib/Makefile
arch/s390/lib/find.c
arch/s390/lib/uaccess.h
arch/s390/lib/uaccess_mvcos.c
arch/s390/lib/uaccess_pt.c
arch/s390/mm/maccess.c
arch/s390/mm/pgtable.c
arch/s390/pci/pci_debug.c
arch/score/include/asm/Kbuild
arch/score/include/asm/cputime.h [deleted file]
arch/sh/Kconfig
arch/sh/include/asm/Kbuild
arch/sh/kernel/idle.c
arch/sh/kernel/irq.c
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/smp_64.h
arch/sparc/include/asm/topology_64.h
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/time_64.c
arch/tile/include/asm/Kbuild
arch/um/include/asm/Kbuild
arch/unicore32/include/asm/Kbuild
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/boot/compressed/efi_stub_64.S
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/cpucheck.c
arch/x86/boot/header.S
arch/x86/boot/tools/build.c
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/include/asm/Kbuild
arch/x86/include/asm/apic.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cputime.h [deleted file]
arch/x86/include/asm/efi.h
arch/x86/include/asm/floppy.h
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/nmi.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/topology.h
arch/x86/include/asm/unistd.h
arch/x86/include/asm/xsave.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_flat_64.c
arch/x86/kernel/apic/apic_noop.c
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/es7000_32.c
arch/x86/kernel/apic/numaq_32.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/summit_32.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/crash.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/hpet.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/module.c
arch/x86/kernel/nmi.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/time.c
arch/x86/kernel/tsc.c
arch/x86/kvm/cpuid.c
arch/x86/lib/hash.c
arch/x86/lib/memcpy_32.c
arch/x86/lib/msr.c
arch/x86/mm/dump_pagetables.c
arch/x86/mm/fault.c
arch/x86/mm/pageattr.c
arch/x86/mm/srat.c
arch/x86/platform/efi/Makefile
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/efi/efi_thunk_64.S [new file with mode: 0644]
arch/x86/platform/ts5500/ts5500.c
arch/x86/vdso/Makefile
arch/x86/xen/mmu.c
arch/x86/xen/spinlock.c
arch/xtensa/include/asm/Kbuild
arch/xtensa/kernel/irq.c
drivers/acpi/Kconfig
drivers/acpi/ac.c
drivers/acpi/acpi_cmos_rtc.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/accommon.h
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acdispat.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/achware.h
drivers/acpi/acpica/acinterp.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acopcode.h
drivers/acpi/acpica/acparser.h
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/acresrc.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/amlcode.h
drivers/acpi/acpica/amlresrc.h
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsmthdat.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/dsutils.c
drivers/acpi/acpica/dswexec.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/dswscope.c
drivers/acpi/acpica/dswstate.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evglock.c
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evhandler.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/evsci.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/acpica/evxfregn.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exconvrt.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmisc.c
drivers/acpi/acpica/exmutex.c
drivers/acpi/acpica/exnames.c
drivers/acpi/acpica/exoparg1.c
drivers/acpi/acpica/exoparg2.c
drivers/acpi/acpica/exoparg3.c
drivers/acpi/acpica/exoparg6.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/exresnte.c
drivers/acpi/acpica/exresolv.c
drivers/acpi/acpica/exresop.c
drivers/acpi/acpica/exstore.c
drivers/acpi/acpica/exstoren.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/acpica/exsystem.c
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/hwacpi.c
drivers/acpi/acpica/hwesleep.c
drivers/acpi/acpica/hwgpe.c
drivers/acpi/acpica/hwpci.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/hwvalid.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/hwxfsleep.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsalloc.c
drivers/acpi/acpica/nsarguments.c
drivers/acpi/acpica/nsconvert.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsdumpdv.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/nsload.c
drivers/acpi/acpica/nsnames.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/acpica/nsprepkg.c
drivers/acpi/acpica/nsrepair.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nssearch.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nswalk.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/nsxfobj.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/psobject.c
drivers/acpi/acpica/psopcode.c
drivers/acpi/acpica/psopinfo.c
drivers/acpi/acpica/psparse.c
drivers/acpi/acpica/psscope.c
drivers/acpi/acpica/pstree.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/pswalk.c
drivers/acpi/acpica/psxface.c
drivers/acpi/acpica/rsaddr.c
drivers/acpi/acpica/rscalc.c
drivers/acpi/acpica/rscreate.c
drivers/acpi/acpica/rsdump.c
drivers/acpi/acpica/rsdumpinfo.c
drivers/acpi/acpica/rsinfo.c
drivers/acpi/acpica/rsio.c
drivers/acpi/acpica/rsirq.c
drivers/acpi/acpica/rslist.c
drivers/acpi/acpica/rsmemory.c
drivers/acpi/acpica/rsmisc.c
drivers/acpi/acpica/rsserial.c
drivers/acpi/acpica/rsutils.c
drivers/acpi/acpica/rsxface.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/acpica/tbxfload.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utaddress.c
drivers/acpi/acpica/utalloc.c
drivers/acpi/acpica/utbuffer.c
drivers/acpi/acpica/utcache.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/uterror.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utexcep.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utlock.c
drivers/acpi/acpica/utmath.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utmutex.c
drivers/acpi/acpica/utobject.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utownerid.c
drivers/acpi/acpica/utpredef.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/acpica/utstate.c
drivers/acpi/acpica/utstring.c
drivers/acpi/acpica/uttrack.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxferror.c
drivers/acpi/acpica/utxfinit.c
drivers/acpi/acpica/utxfmutex.c
drivers/acpi/apei/Kconfig
drivers/acpi/battery.c
drivers/acpi/battery.h [new file with mode: 0644]
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/container.c
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/fan.c
drivers/acpi/glue.c
drivers/acpi/internal.h
drivers/acpi/osl.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/acpi/pci_root.c
drivers/acpi/power.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/acpi/tables.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/acard-ahci.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_da850.c [new file with mode: 0644]
drivers/ata/ahci_imx.c
drivers/ata/ahci_platform.c
drivers/ata/ahci_st.c [new file with mode: 0644]
drivers/ata/ahci_sunxi.c [new file with mode: 0644]
drivers/ata/ahci_xgene.c [new file with mode: 0644]
drivers/ata/ata_generic.c
drivers/ata/libahci.c
drivers/ata/libahci_platform.c [new file with mode: 0644]
drivers/ata/libata-acpi.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-zpodd.c
drivers/ata/pata_acpi.c
drivers/ata/pata_amd.c
drivers/ata/pata_arasan_cf.c
drivers/ata/pata_artop.c
drivers/ata/pata_at91.c
drivers/ata/pata_atiixp.c
drivers/ata/pata_atp867x.c
drivers/ata/pata_cmd640.c
drivers/ata/pata_cmd64x.c
drivers/ata/pata_cs5520.c
drivers/ata/pata_cs5530.c
drivers/ata/pata_cs5535.c
drivers/ata/pata_cs5536.c
drivers/ata/pata_cypress.c
drivers/ata/pata_efar.c
drivers/ata/pata_ep93xx.c
drivers/ata/pata_hpt366.c
drivers/ata/pata_hpt37x.c
drivers/ata/pata_hpt3x2n.c
drivers/ata/pata_hpt3x3.c
drivers/ata/pata_imx.c
drivers/ata/pata_it8213.c
drivers/ata/pata_it821x.c
drivers/ata/pata_jmicron.c
drivers/ata/pata_legacy.c
drivers/ata/pata_marvell.c
drivers/ata/pata_mpiix.c
drivers/ata/pata_netcell.c
drivers/ata/pata_ninja32.c
drivers/ata/pata_ns87410.c
drivers/ata/pata_ns87415.c
drivers/ata/pata_oldpiix.c
drivers/ata/pata_opti.c
drivers/ata/pata_optidma.c
drivers/ata/pata_pcmcia.c
drivers/ata/pata_pdc2027x.c
drivers/ata/pata_pdc202xx_old.c
drivers/ata/pata_piccolo.c
drivers/ata/pata_platform.c
drivers/ata/pata_pxa.c
drivers/ata/pata_radisys.c
drivers/ata/pata_rdc.c
drivers/ata/pata_rz1000.c
drivers/ata/pata_sc1200.c
drivers/ata/pata_scc.c
drivers/ata/pata_sch.c
drivers/ata/pata_serverworks.c
drivers/ata/pata_sil680.c
drivers/ata/pata_sis.c
drivers/ata/pata_sl82c105.c
drivers/ata/pata_triflex.c
drivers/ata/pata_via.c
drivers/ata/pdc_adma.c
drivers/ata/sata_dwc_460ex.c
drivers/ata/sata_highbank.c
drivers/ata/sata_nv.c
drivers/ata/sata_promise.c
drivers/ata/sata_qstor.c
drivers/ata/sata_sil.c
drivers/ata/sata_sis.c
drivers/ata/sata_svw.c
drivers/ata/sata_sx4.c
drivers/ata/sata_uli.c
drivers/ata/sata_via.c
drivers/ata/sata_vsc.c
drivers/base/devres.c
drivers/base/power/domain.c
drivers/base/power/main.c
drivers/base/power/power.h
drivers/base/power/qos.c
drivers/base/power/runtime.c
drivers/base/power/sysfs.c
drivers/base/regmap/internal.h
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap-debugfs.c
drivers/base/regmap/regmap-irq.c
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap.c
drivers/block/floppy.c
drivers/block/nvme-core.c
drivers/block/rbd.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/cadence_ttc_timer.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/sun4i_timer.c
drivers/clocksource/time-armada-370-xp.c
drivers/clocksource/time-orion.c
drivers/clocksource/timer-keystone.c [new file with mode: 0644]
drivers/clocksource/timer-u300.c [moved from arch/arm/mach-u300/timer.c with 99% similarity]
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/blackfin-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/cris-artpec3-cpufreq.c
drivers/cpufreq/cris-etraxfs-cpufreq.c
drivers/cpufreq/davinci-cpufreq.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/elanfreq.c
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/pxa3xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/sh-cpufreq.c
drivers/cpufreq/sparc-us2e-cpufreq.c
drivers/cpufreq/sparc-us3-cpufreq.c
drivers/cpufreq/spear-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpufreq/speedstep-ich.c
drivers/cpufreq/speedstep-smi.c
drivers/cpufreq/tegra-cpufreq.c
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/cpuidle-pseries.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/driver.c
drivers/cpuidle/governors/menu.c
drivers/devfreq/devfreq.c
drivers/firmware/dcdbas.c
drivers/firmware/efi/efi-stub-helper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/efivars.c
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/gma500/Kconfig
drivers/gpu/drm/gma500/mmu.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/udl/udl_gem.c
drivers/hv/vmbus_drv.c
drivers/i2c/busses/i2c-cpm.c
drivers/input/evdev.c
drivers/input/keyboard/adp5588-keys.c
drivers/input/misc/da9052_onkey.c
drivers/input/mouse/cypress_ps2.c
drivers/input/mouse/synaptics.c
drivers/input/mousedev.c
drivers/input/touchscreen/st1232.c
drivers/irqchip/Makefile
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-bcm2835.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mmp.c
drivers/irqchip/irq-moxart.c
drivers/irqchip/irq-orion.c
drivers/irqchip/irq-sirfsoc.c
drivers/irqchip/irq-sun4i.c
drivers/irqchip/irq-sunxi-nmi.c [new file with mode: 0644]
drivers/irqchip/irq-vic.c
drivers/irqchip/irq-vt8500.c
drivers/irqchip/irq-xtensa-mx.c
drivers/irqchip/irq-zevio.c
drivers/irqchip/irqchip.c
drivers/mfd/arizona-core.c
drivers/mfd/sec-core.c
drivers/mfd/sec-irq.c
drivers/mfd/wm5102-tables.c
drivers/mmc/host/dw_mmc.c
drivers/mtd/nand/sh_flctl.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ifb.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/of/base.c
drivers/pci/host/pcie-designware.c
drivers/pci/hotplug/acpiphp.h
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci-driver.c
drivers/pcmcia/sa11xx_base.c
drivers/pinctrl/Kconfig
drivers/pinctrl/devicetree.c
drivers/pinctrl/mvebu/Kconfig
drivers/pinctrl/mvebu/Makefile
drivers/pinctrl/mvebu/pinctrl-armada-370.c
drivers/pinctrl/mvebu/pinctrl-armada-375.c [new file with mode: 0644]
drivers/pinctrl/mvebu/pinctrl-armada-38x.c [new file with mode: 0644]
drivers/pinctrl/mvebu/pinctrl-armada-xp.c
drivers/pinctrl/mvebu/pinctrl-dove.c
drivers/pinctrl/mvebu/pinctrl-kirkwood.c
drivers/pinctrl/mvebu/pinctrl-mvebu.c
drivers/pinctrl/mvebu/pinctrl-mvebu.h
drivers/pinctrl/pinctrl-adi2-bf54x.c
drivers/pinctrl/pinctrl-adi2-bf60x.c
drivers/pinctrl/pinctrl-adi2.c
drivers/pinctrl/pinctrl-adi2.h
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinctrl-baytrail.c
drivers/pinctrl/pinctrl-exynos.c
drivers/pinctrl/pinctrl-imx.c
drivers/pinctrl/pinctrl-msm.c
drivers/pinctrl/pinctrl-msm.h
drivers/pinctrl/pinctrl-msm8x74.c
drivers/pinctrl/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-samsung.c
drivers/pinctrl/pinctrl-samsung.h
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/pinctrl-sunxi-pins.h
drivers/pinctrl/pinctrl-tegra.c
drivers/pinctrl/pinctrl-tegra.h
drivers/pinctrl/pinctrl-tegra114.c
drivers/pinctrl/pinctrl-tegra124.c
drivers/pinctrl/pinctrl-tegra20.c
drivers/pinctrl/pinctrl-tegra30.c
drivers/pinctrl/sh-pfc/pfc-r8a7790.c
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/pinctrl/sirf/pinctrl-atlas6.c
drivers/pinctrl/sirf/pinctrl-prima2.c
drivers/pinctrl/sirf/pinctrl-sirf.c
drivers/platform/x86/Kconfig
drivers/platform/x86/fujitsu-laptop.c
drivers/pnp/pnpbios/bioscalls.c
drivers/powercap/intel_rapl.c
drivers/ps3/ps3-vuart.c
drivers/regulator/88pm800.c
drivers/regulator/88pm8607.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/aat2870-regulator.c
drivers/regulator/act8865-regulator.c
drivers/regulator/anatop-regulator.c
drivers/regulator/arizona-ldo1.c
drivers/regulator/arizona-micsupp.c
drivers/regulator/as3711-regulator.c
drivers/regulator/as3722-regulator.c
drivers/regulator/bcm590xx-regulator.c [new file with mode: 0644]
drivers/regulator/core.c
drivers/regulator/da9052-regulator.c
drivers/regulator/da9055-regulator.c
drivers/regulator/da9063-regulator.c
drivers/regulator/da9210-regulator.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/dbx500-prcmu.c
drivers/regulator/dummy.c
drivers/regulator/fan53555.c
drivers/regulator/fixed.c
drivers/regulator/gpio-regulator.c
drivers/regulator/helpers.c
drivers/regulator/lp3971.c
drivers/regulator/lp872x.c
drivers/regulator/max14577.c
drivers/regulator/max1586.c
drivers/regulator/max77686.c
drivers/regulator/max77693.c
drivers/regulator/max8649.c
drivers/regulator/max8660.c
drivers/regulator/max8907-regulator.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8952.c
drivers/regulator/max8973-regulator.c
drivers/regulator/max8997.c
drivers/regulator/max8998.c
drivers/regulator/mc13xxx-regulator-core.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/rc5t583-regulator.c
drivers/regulator/s2mpa01.c [new file with mode: 0644]
drivers/regulator/s2mps11.c
drivers/regulator/s5m8767.c
drivers/regulator/st-pwm.c [new file with mode: 0644]
drivers/regulator/ti-abb-regulator.c
drivers/regulator/tps51632-regulator.c
drivers/regulator/tps62360-regulator.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/tps65090-regulator.c
drivers/regulator/tps65217-regulator.c
drivers/regulator/tps65218-regulator.c [new file with mode: 0644]
drivers/regulator/tps6524x-regulator.c
drivers/regulator/tps6586x-regulator.c
drivers/regulator/tps65910-regulator.c
drivers/regulator/tps80031-regulator.c
drivers/regulator/wm831x-dcdc.c
drivers/regulator/wm831x-isink.c
drivers/regulator/wm831x-ldo.c
drivers/regulator/wm8350-regulator.c
drivers/regulator/wm8994-regulator.c
drivers/s390/char/con3215.c
drivers/s390/char/con3270.c
drivers/s390/char/raw3270.c
drivers/s390/char/raw3270.h
drivers/s390/char/sclp_early.c
drivers/s390/cio/airq.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/cio.c
drivers/s390/cio/device.c
drivers/s390/net/qeth_core_main.c
drivers/scsi/atari_scsi.c
drivers/scsi/libsas/sas_ata.c
drivers/staging/fwserial/fwserial.c
drivers/staging/fwserial/fwserial.h
drivers/tty/serial/sh-sci.c
drivers/tty/tty_ldsem.c
drivers/usb/core/hub.c
drivers/vhost/net.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/output.c [deleted file]
drivers/xen/balloon.c
drivers/xen/events/events_2l.c
drivers/xen/events/events_base.c
drivers/xen/events/events_fifo.c
drivers/xen/xen-acpi-cpuhotplug.c
drivers/xen/xen-acpi-memhotplug.c
drivers/xen/xen-acpi-pad.c
fs/afs/internal.h
fs/afs/rxrpc.c
fs/anon_inodes.c
fs/compat.c
fs/compat_binfmt_elf.c
fs/compat_ioctl.c
fs/dcache.c
fs/efivarfs/file.c
fs/exec.c
fs/ext4/inode.c
fs/file.c
fs/mount.h
fs/namei.c
fs/namespace.c
fs/nfsd/vfs.c
fs/ocfs2/stackglue.c
fs/pnode.c
fs/pnode.h
fs/proc/stat.c
fs/proc/uptime.c
fs/read_write.c
fs/timerfd.c
include/acpi/acbuffer.h
include/acpi/acconfig.h
include/acpi/acexcep.h
include/acpi/acnames.h
include/acpi/acoutput.h
include/acpi/acpi.h
include/acpi/acpi_bus.h
include/acpi/acpi_drivers.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/acrestyp.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/acpi/actypes.h
include/acpi/platform/acenv.h
include/acpi/platform/acgcc.h
include/acpi/platform/aclinux.h
include/asm-generic/bitops/const_hweight.h
include/asm-generic/cputime_jiffies.h
include/asm-generic/cputime_nsecs.h
include/asm-generic/mcs_spinlock.h [new file with mode: 0644]
include/asm-generic/pgtable.h
include/asm-generic/rwsem.h
include/linux/acpi.h
include/linux/ahci_platform.h
include/linux/bitops.h
include/linux/clockchips.h
include/linux/compat.h
include/linux/cpufreq.h
include/linux/cputime.h [new file with mode: 0644]
include/linux/device.h
include/linux/efi.h
include/linux/futex.h
include/linux/hardirq.h
include/linux/hrtimer.h
include/linux/init.h
include/linux/interrupt.h
include/linux/io.h
include/linux/irq.h
include/linux/irq_work.h
include/linux/kernel.h
include/linux/kernel_stat.h
include/linux/kexec.h
include/linux/libata.h
include/linux/linkage.h
include/linux/lockdep.h
include/linux/mfd/samsung/core.h
include/linux/mfd/samsung/irq.h
include/linux/mfd/samsung/rtc.h
include/linux/mfd/samsung/s2mpa01.h [new file with mode: 0644]
include/linux/mfd/samsung/s2mps14.h [new file with mode: 0644]
include/linux/mfd/samsung/s5m8767.h
include/linux/mm.h
include/linux/mutex.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/nvme.h
include/linux/of.h
include/linux/pci-acpi.h
include/linux/pci_ids.h
include/linux/pm.h
include/linux/pm_qos.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/regmap.h
include/linux/regulator/driver.h
include/linux/regulator/pfuze100.h
include/linux/sched.h
include/linux/sched/prio.h [new file with mode: 0644]
include/linux/sched/rt.h
include/linux/skbuff.h
include/linux/srcu.h
include/linux/syscalls.h
include/linux/torture.h [new file with mode: 0644]
include/linux/usb/usbnet.h
include/linux/video_output.h [deleted file]
include/linux/workqueue.h
include/net/if_inet6.h
include/scsi/libsas.h
include/trace/events/power.h
include/uapi/asm-generic/unistd.h
init/Kconfig
ipc/compat.c
ipc/compat_mq.c
kernel/Makefile
kernel/audit.c
kernel/compat.c
kernel/cpu/Makefile [deleted file]
kernel/debug/debug_core.c
kernel/events/core.c
kernel/extable.c
kernel/fork.c
kernel/futex.c
kernel/hrtimer.c
kernel/irq/chip.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/proc.c
kernel/irq_work.c
kernel/kexec.c
kernel/ksysfs.c
kernel/locking/Makefile
kernel/locking/lockdep.c
kernel/locking/locktorture.c [new file with mode: 0644]
kernel/locking/mcs_spinlock.c [new file with mode: 0644]
kernel/locking/mcs_spinlock.h [new file with mode: 0644]
kernel/locking/mutex-debug.c
kernel/locking/mutex.c
kernel/locking/rtmutex.c
kernel/locking/rwsem-xadd.c
kernel/module.c
kernel/notifier.c
kernel/panic.c
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/power.h
kernel/power/qos.c
kernel/power/snapshot.c
kernel/power/suspend.c
kernel/power/wakelock.c
kernel/ptrace.c
kernel/rcu/Makefile
kernel/rcu/rcu.h
kernel/rcu/rcutorture.c [moved from kernel/rcu/torture.c with 58% similarity]
kernel/rcu/srcu.c
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/rcu/update.c
kernel/sched/Makefile
kernel/sched/auto_group.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c [moved from kernel/cpu/idle.c with 95% similarity]
kernel/sched/idle_task.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stop_task.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl.c
kernel/time/Kconfig
kernel/time/Makefile
kernel/time/clockevents.c
kernel/time/ntp.c
kernel/time/tick-broadcast-hrtimer.c [new file with mode: 0644]
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/timekeeping.c
kernel/time/timekeeping_debug.c
kernel/timer.c
kernel/torture.c [new file with mode: 0644]
kernel/trace/ring_buffer_benchmark.c
kernel/trace/trace.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_irqsoff.c
kernel/workqueue.c
lib/Kconfig.debug
lib/random32.c
lib/string.c
mm/mempolicy.c
mm/mmu_context.c
mm/percpu.c
mm/process_vm_access.c
mm/rmap.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/bridge/br_device.c
net/bridge/br_input.c
net/bridge/br_vlan.c
net/compat.c
net/core/dev.c
net/core/skbuff.c
net/ipv4/gre_demux.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/tcp_ipv4.c
net/ipv6/addrconf.c
net/l2tp/l2tp_core.c
net/netfilter/nfnetlink_queue_core.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/unix/af_unix.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/gcc-ld [new file with mode: 0644]
scripts/ld-version.sh [new file with mode: 0755]
scripts/mod/modpost.c
scripts/mod/modpost.h
security/keys/compat.c
tools/include/linux/hash.h [new file with mode: 0644]
tools/lib/api/Makefile
tools/lib/api/fs/fs.c [moved from tools/perf/util/fs.c with 91% similarity]
tools/lib/api/fs/fs.h [moved from tools/perf/util/include/linux/magic.h with 50% similarity]
tools/perf/Documentation/perf-mem.txt
tools/perf/Documentation/perf-probe.txt
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/arch/arm/Makefile
tools/perf/arch/arm/util/unwind-libunwind.c [moved from tools/perf/arch/arm/util/unwind.c with 95% similarity]
tools/perf/arch/x86/Makefile
tools/perf/arch/x86/include/perf_regs.h
tools/perf/arch/x86/tests/dwarf-unwind.c [new file with mode: 0644]
tools/perf/arch/x86/tests/regs_load.S [new file with mode: 0644]
tools/perf/arch/x86/util/unwind-libdw.c [new file with mode: 0644]
tools/perf/arch/x86/util/unwind-libunwind.c [moved from tools/perf/arch/x86/util/unwind.c with 95% similarity]
tools/perf/bench/bench.h
tools/perf/bench/futex-hash.c [new file with mode: 0644]
tools/perf/bench/futex-requeue.c [new file with mode: 0644]
tools/perf/bench/futex-wake.c [new file with mode: 0644]
tools/perf/bench/futex.h [new file with mode: 0644]
tools/perf/builtin-bench.c
tools/perf/builtin-diff.c
tools/perf/builtin-inject.c
tools/perf/builtin-kvm.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-timechart.c
tools/perf/builtin-top.c
tools/perf/config/Makefile
tools/perf/config/feature-checks/Makefile
tools/perf/config/feature-checks/test-all.c
tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c [new file with mode: 0644]
tools/perf/design.txt
tools/perf/perf-completion.sh
tools/perf/perf.h
tools/perf/tests/builtin-test.c
tools/perf/tests/dwarf-unwind.c [new file with mode: 0644]
tools/perf/tests/hists_link.c
tools/perf/tests/make
tools/perf/tests/parse-events.c
tools/perf/tests/sample-parsing.c
tools/perf/tests/tests.h
tools/perf/ui/browsers/hists.c
tools/perf/ui/gtk/hists.c
tools/perf/ui/hist.c
tools/perf/ui/stdio/hist.c
tools/perf/util/annotate.c
tools/perf/util/cpumap.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/fs.h [deleted file]
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/hash.h [deleted file]
tools/perf/util/include/linux/kernel.h
tools/perf/util/include/linux/list.h
tools/perf/util/include/linux/prefetch.h [deleted file]
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.h
tools/perf/util/parse-options.c
tools/perf/util/parse-options.h
tools/perf/util/perf_regs.c [new file with mode: 0644]
tools/perf/util/perf_regs.h
tools/perf/util/pmu.c
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/python-ext-sources
tools/perf/util/record.c
tools/perf/util/session.c
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/trace-event-parse.c
tools/perf/util/unwind-libdw.c [new file with mode: 0644]
tools/perf/util/unwind-libdw.h [new file with mode: 0644]
tools/perf/util/unwind-libunwind.c [moved from tools/perf/util/unwind.c with 92% similarity]
tools/perf/util/unwind.h
tools/perf/util/util.c
tools/testing/selftests/rcutorture/bin/functions.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh [moved from tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh with 79% similarity]
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/configs/lock/BUSTED [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/CFLIST [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/CFcommon [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/LOCK01 [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcu/BUSTED [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcu/CFLIST [moved from tools/testing/selftests/rcutorture/configs/CFLIST with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/CFcommon [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcu/SRCU-N [moved from tools/testing/selftests/rcutorture/configs/SRCU-N with 75% similarity]
tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot [moved from tools/testing/selftests/rcutorture/configs/SRCU-N.boot with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/SRCU-P [moved from tools/testing/selftests/rcutorture/configs/SRCU-P with 86% similarity]
tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot [moved from tools/testing/selftests/rcutorture/configs/SRCU-P.boot with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TINY01 [moved from tools/testing/selftests/rcutorture/configs/TINY01 with 92% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TINY02 [moved from tools/testing/selftests/rcutorture/configs/TINY02 with 92% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE01 [moved from tools/testing/selftests/rcutorture/configs/TREE01 with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot [moved from tools/testing/selftests/rcutorture/configs/TREE01.boot with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE02 [moved from tools/testing/selftests/rcutorture/configs/TREE02 with 92% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE03 [moved from tools/testing/selftests/rcutorture/configs/TREE03 with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE04 [moved from tools/testing/selftests/rcutorture/configs/TREE04 with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot [moved from tools/testing/selftests/rcutorture/configs/TREE04.boot with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE05 [moved from tools/testing/selftests/rcutorture/configs/TREE05 with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE05.boot [moved from tools/testing/selftests/rcutorture/configs/TREE05.boot with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE06 [moved from tools/testing/selftests/rcutorture/configs/TREE06 with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE07 [moved from tools/testing/selftests/rcutorture/configs/TREE07 with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE08 [moved from tools/testing/selftests/rcutorture/configs/TREE08 with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE08-T [moved from tools/testing/selftests/rcutorture/configs/TREE08-T with 96% similarity]
tools/testing/selftests/rcutorture/configs/rcu/TREE09 [moved from tools/testing/selftests/rcutorture/configs/TREE09 with 95% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/CFLIST [moved from tools/testing/selftests/rcutorture/configs/v0.0/CFLIST with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/N1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v0.0/N1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/N2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v0.0/N2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/N3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v0.0/N3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/N4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v0.0/N4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/N5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v0.0/N5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT1-nh [moved from tools/testing/selftests/rcutorture/configs/v0.0/NT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/NT3-NH [moved from tools/testing/selftests/rcutorture/configs/v0.0/NT3-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/P1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v0.0/P1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/P2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v0.0/P2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/P3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v0.0/P3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/P4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v0.0/P4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v0.0/P5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT1-nh [moved from tools/testing/selftests/rcutorture/configs/v0.0/PT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/PT2-NH [moved from tools/testing/selftests/rcutorture/configs/v0.0/PT2-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh [moved from tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh with 70% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/CFLIST [moved from tools/testing/selftests/rcutorture/configs/v3.12/CFLIST with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.12/N1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/N2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/N3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.12/N4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/N5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N6---t-nh-SD-smp-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/N6---t-nh-SD-smp-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N7-4-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.12/N7-4-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/N8-2-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.12/N8-2-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT1-nh [moved from tools/testing/selftests/rcutorture/configs/v3.12/NT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/NT3-NH [moved from tools/testing/selftests/rcutorture/configs/v3.12/NT3-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.12/P1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/P2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/P3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.12/P4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/P5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P6---t-nh-SD-smp-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/P6---t-nh-SD-smp-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-all [moved from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-all with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-HP-none [moved from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-HP-none with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P7-4-T-NH-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.12/P7-4-T-NH-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT1-nh [moved from tools/testing/selftests/rcutorture/configs/v3.12/PT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.12/PT2-NH [moved from tools/testing/selftests/rcutorture/configs/v3.12/PT2-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/CFLIST [moved from tools/testing/selftests/rcutorture/configs/v3.3/CFLIST with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/N1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.3/N1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/N2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.3/N2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/N3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.3/N3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/N4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.3/N4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/N5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.3/N5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT1-nh [moved from tools/testing/selftests/rcutorture/configs/v3.3/NT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/NT3-NH [moved from tools/testing/selftests/rcutorture/configs/v3.3/NT3-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/P1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.3/P1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/P2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.3/P2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/P3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.3/P3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/P4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.3/P4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.3/P5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT1-nh [moved from tools/testing/selftests/rcutorture/configs/v3.3/PT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/PT2-NH [moved from tools/testing/selftests/rcutorture/configs/v3.3/PT2-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh [moved from tools/testing/selftests/rcutorture/configs/ver_functions.sh with 72% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/CFLIST [moved from tools/testing/selftests/rcutorture/configs/v3.5/CFLIST with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/N1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.5/N1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/N2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.5/N2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/N3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.5/N3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/N4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.5/N4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/N5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.5/N5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT1-nh [moved from tools/testing/selftests/rcutorture/configs/v3.5/NT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/NT3-NH [moved from tools/testing/selftests/rcutorture/configs/v3.5/NT3-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/P1-S-T-NH-SD-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.5/P1-S-T-NH-SD-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/P2-2-t-nh-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.5/P2-2-t-nh-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/P3-3-T-nh-SD-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.5/P3-3-T-nh-SD-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/P4-A-t-NH-sd-SMP-HP [moved from tools/testing/selftests/rcutorture/configs/v3.5/P4-A-t-NH-sd-SMP-HP with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp [moved from tools/testing/selftests/rcutorture/configs/v3.5/P5-U-T-NH-sd-SMP-hp with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT1-nh [moved from tools/testing/selftests/rcutorture/configs/v3.5/PT1-nh with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/PT2-NH [moved from tools/testing/selftests/rcutorture/configs/v3.5/PT2-NH with 100% similarity]
tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh [moved from tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh with 69% similarity]
tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh [moved from tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh with 66% similarity]
tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt [moved from tools/testing/selftests/rcutorture/doc/TREE_RCU-Kconfig.txt with 100% similarity]
virt/kvm/kvm_main.c

index efe449bdf811db7a1c9f74f38a0371d2c0dd692e..7dbf96b724edb7c2035e358c5736a1bea03c6bb5 100644 (file)
@@ -187,7 +187,7 @@ Description:
                Not all drivers support this attribute.  If it isn't supported,
                attempts to read or write it will yield I/O errors.
 
-What:          /sys/devices/.../power/pm_qos_latency_us
+What:          /sys/devices/.../power/pm_qos_resume_latency_us
 Date:          March 2012
 Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
@@ -205,6 +205,31 @@ Description:
                This attribute has no effect on system-wide suspend/resume and
                hibernation.
 
+What:          /sys/devices/.../power/pm_qos_latency_tolerance_us
+Date:          January 2014
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
+Description:
+               The /sys/devices/.../power/pm_qos_latency_tolerance_us attribute
+               contains the PM QoS active state latency tolerance limit for the
+               given device in microseconds.  That is the maximum memory access
+               latency the device can suffer without any visible adverse
+               effects on user space functionality.  If that value is the
+               string "any", the latency does not matter to user space at all,
+               but hardware should not be allowed to set the latency tolerance
+               for the device automatically.
+
+               Reading "auto" from this file means that the maximum memory
+               access latency for the device may be determined automatically
+               by the hardware as needed.  Writing "auto" to it allows the
+               hardware to be switched to this mode if there are no other
+               latency tolerance requirements from the kernel side.
+
+               This attribute is only present if the feature controlled by it
+               is supported by the hardware.
+
+               This attribute has no effect on runtime suspend and resume of
+               devices and on system-wide suspend/resume and hibernation.
+
 What:          /sys/devices/.../power/pm_qos_no_power_off
 Date:          September 2012
 Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
index 205a7387844106804de496ec9b3a8c372b725977..64c9276e94218ce2379172afdd98c795bf1fab11 100644 (file)
@@ -12,8 +12,9 @@ Contact:      Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/state file controls the system power state.
                Reading from this file returns what states are supported,
-               which is hard-coded to 'standby' (Power-On Suspend), 'mem'
-               (Suspend-to-RAM), and 'disk' (Suspend-to-Disk).
+               which is hard-coded to 'freeze' (Low-Power Idle), 'standby'
+               (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
+               (Suspend-to-Disk).
 
                Writing to this file one of these strings causes the system to
                transition into that state. Please see the file
index 273e654d7d086531ae958e16af780d7a0d1404d1..2f0fcb2112d271b01ba74b16a4c65c82caebb009 100644 (file)
@@ -31,6 +31,14 @@ has lapsed, so this approach may be used in non-GPL software, if desired.
 (In contrast, implementation of RCU is permitted only in software licensed
 under either GPL or LGPL.  Sorry!!!)
 
+In 1987, Rashid et al. described lazy TLB-flush [RichardRashid87a].
+At first glance, this has nothing to do with RCU, but nevertheless
+this paper helped inspire the update-side batching used in the later
+RCU implementation in DYNIX/ptx.  In 1988, Barbara Liskov published
+a description of Argus that noted that use of out-of-date values can
+be tolerated in some situations.  Thus, this paper provides some early
+theoretical justification for use of stale data.
+
 In 1990, Pugh [Pugh90] noted that explicitly tracking which threads
 were reading a given data structure permitted deferred free to operate
 in the presence of non-terminating threads.  However, this explicit
@@ -41,11 +49,11 @@ providing a fine-grained locking design, however, it would be interesting
 to see how much of the performance advantage reported in 1990 remains
 today.
 
-At about this same time, Adams [Adams91] described ``chaotic relaxation'',
-where the normal barriers between successive iterations of convergent
-numerical algorithms are relaxed, so that iteration $n$ might use
-data from iteration $n-1$ or even $n-2$.  This introduces error,
-which typically slows convergence and thus increases the number of
+At about this same time, Andrews [Andrews91textbook] described ``chaotic
+relaxation'', where the normal barriers between successive iterations
+of convergent numerical algorithms are relaxed, so that iteration $n$
+might use data from iteration $n-1$ or even $n-2$.  This introduces
+error, which typically slows convergence and thus increases the number of
 iterations required.  However, this increase is sometimes more than made
 up for by a reduction in the number of expensive barrier operations,
 which are otherwise required to synchronize the threads at the end
@@ -55,7 +63,8 @@ is thus inapplicable to most data structures in operating-system kernels.
 
 In 1992, Henry (now Alexia) Massalin completed a dissertation advising
 parallel programmers to defer processing when feasible to simplify
-synchronization.  RCU makes extremely heavy use of this advice.
+synchronization [HMassalinPhD].  RCU makes extremely heavy use of
+this advice.
 
 In 1993, Jacobson [Jacobson93] verbally described what is perhaps the
 simplest deferred-free technique: simply waiting a fixed amount of time
@@ -90,27 +99,29 @@ mechanism, which is quite similar to RCU [Gamsa99].  These operating
 systems made pervasive use of RCU in place of "existence locks", which
 greatly simplifies locking hierarchies and helps avoid deadlocks.
 
-2001 saw the first RCU presentation involving Linux [McKenney01a]
-at OLS.  The resulting abundance of RCU patches was presented the
-following year [McKenney02a], and use of RCU in dcache was first
-described that same year [Linder02a].
+The year 2000 saw an email exchange that would likely have
+led to yet another independent invention of something like RCU
+[RustyRussell2000a,RustyRussell2000b].  Instead, 2001 saw the first
+RCU presentation involving Linux [McKenney01a] at OLS.  The resulting
+abundance of RCU patches was presented the following year [McKenney02a],
+and use of RCU in dcache was first described that same year [Linder02a].
 
 Also in 2002, Michael [Michael02b,Michael02a] presented "hazard-pointer"
 techniques that defer the destruction of data structures to simplify
 non-blocking synchronization (wait-free synchronization, lock-free
 synchronization, and obstruction-free synchronization are all examples of
-non-blocking synchronization).  In particular, this technique eliminates
-locking, reduces contention, reduces memory latency for readers, and
-parallelizes pipeline stalls and memory latency for writers.  However,
-these techniques still impose significant read-side overhead in the
-form of memory barriers.  Researchers at Sun worked along similar lines
-in the same timeframe [HerlihyLM02].  These techniques can be thought
-of as inside-out reference counts, where the count is represented by the
-number of hazard pointers referencing a given data structure rather than
-the more conventional counter field within the data structure itself.
-The key advantage of inside-out reference counts is that they can be
-stored in immortal variables, thus allowing races between access and
-deletion to be avoided.
+non-blocking synchronization).  The corresponding journal article appeared
+in 2004 [MagedMichael04a].  This technique eliminates locking, reduces
+contention, reduces memory latency for readers, and parallelizes pipeline
+stalls and memory latency for writers.  However, these techniques still
+impose significant read-side overhead in the form of memory barriers.
+Researchers at Sun worked along similar lines in the same timeframe
+[HerlihyLM02].  These techniques can be thought of as inside-out reference
+counts, where the count is represented by the number of hazard pointers
+referencing a given data structure rather than the more conventional
+counter field within the data structure itself.  The key advantage
+of inside-out reference counts is that they can be stored in immortal
+variables, thus allowing races between access and deletion to be avoided.
 
 By the same token, RCU can be thought of as a "bulk reference count",
 where some form of reference counter covers all reference by a given CPU
@@ -123,8 +134,10 @@ can be thought of in other terms as well.
 
 In 2003, the K42 group described how RCU could be used to create
 hot-pluggable implementations of operating-system functions [Appavoo03a].
-Later that year saw a paper describing an RCU implementation of System
-V IPC [Arcangeli03], and an introduction to RCU in Linux Journal
+Later that year saw a paper describing an RCU implementation
+of System V IPC [Arcangeli03] (following up on a suggestion by
+Hugh Dickins [Dickins02a] and an implementation by Mingming Cao
+[MingmingCao2002IPCRCU]), and an introduction to RCU in Linux Journal
 [McKenney03a].
 
 2004 has seen a Linux-Journal article on use of RCU in dcache
@@ -383,6 +396,21 @@ for Programming Languages and Operating Systems}"
 }
 }
 
+@phdthesis{HMassalinPhD
+,author="H. Massalin"
+,title="Synthesis: An Efficient Implementation of Fundamental Operating
+System Services"
+,school="Columbia University"
+,address="New York, NY"
+,year="1992"
+,annotation={
+       Mondo optimizing compiler.
+       Wait-free stuff.
+       Good advice: defer work to avoid synchronization.  See page 90
+               (PDF page 106), Section 5.4, fourth bullet point.
+}
+}
+
 @unpublished{Jacobson93
 ,author="Van Jacobson"
 ,title="Avoid Read-Side Locking Via Delayed Free"
@@ -671,6 +699,20 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni"
 [Viewed October 18, 2004]"
 }
 
+@conference{Michael02b
+,author="Maged M. Michael"
+,title="High Performance Dynamic Lock-Free Hash Tables and List-Based Sets"
+,Year="2002"
+,Month="August"
+,booktitle="{Proceedings of the 14\textsuperscript{th} Annual ACM
+Symposium on Parallel
+Algorithms and Architecture}"
+,pages="73-82"
+,annotation={
+Like the title says...
+}
+}
+
 @Conference{Linder02a
 ,Author="Hanna Linder and Dipankar Sarma and Maneesh Soni"
 ,Title="Scalability of the Directory Entry Cache"
@@ -727,6 +769,24 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
 }
 }
 
+@conference{Michael02a
+,author="Maged M. Michael"
+,title="Safe Memory Reclamation for Dynamic Lock-Free Objects Using Atomic
+Reads and Writes"
+,Year="2002"
+,Month="August"
+,booktitle="{Proceedings of the 21\textsuperscript{st} Annual ACM
+Symposium on Principles of Distributed Computing}"
+,pages="21-30"
+,annotation={
+       Each thread keeps an array of pointers to items that it is
+       currently referencing.  Sort of an inside-out garbage collection
+       mechanism, but one that requires the accessing code to explicitly
+       state its needs.  Also requires read-side memory barriers on
+       most architectures.
+}
+}
+
 @unpublished{Dickins02a
 ,author="Hugh Dickins"
 ,title="Use RCU for System-V IPC"
@@ -735,6 +795,17 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
 ,note="private communication"
 }
 
+@InProceedings{HerlihyLM02
+,author={Maurice Herlihy and Victor Luchangco and Mark Moir}
+,title="The Repeat Offender Problem: A Mechanism for Supporting Dynamic-Sized,
+Lock-Free Data Structures"
+,booktitle={Proceedings of 16\textsuperscript{th} International
+Symposium on Distributed Computing}
+,year=2002
+,month="October"
+,pages="339-353"
+}
+
 @unpublished{Sarma02b
 ,Author="Dipankar Sarma"
 ,Title="Some dcache\_rcu benchmark numbers"
@@ -749,6 +820,19 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
 }
 }
 
+@unpublished{MingmingCao2002IPCRCU
+,Author="Mingming Cao"
+,Title="[PATCH]updated ipc lock patch"
+,month="October"
+,year="2002"
+,note="Available:
+\url{https://lkml.org/lkml/2002/10/24/262}
+[Viewed February 15, 2014]"
+,annotation={
+       Mingming Cao's patch to introduce RCU to SysV IPC.
+}
+}
+
 @unpublished{LinusTorvalds2003a
 ,Author="Linus Torvalds"
 ,Title="Re: {[PATCH]} small fixes in brlock.h"
@@ -982,6 +1066,23 @@ Realtime Applications"
 }
 }
 
+@article{MagedMichael04a
+,author="Maged M. Michael"
+,title="Hazard Pointers: Safe Memory Reclamation for Lock-Free Objects"
+,Year="2004"
+,Month="June"
+,journal="IEEE Transactions on Parallel and Distributed Systems"
+,volume="15"
+,number="6"
+,pages="491-504"
+,url="Available:
+\url{http://www.research.ibm.com/people/m/michael/ieeetpds-2004.pdf}
+[Viewed March 1, 2005]"
+,annotation={
+       New canonical hazard-pointer citation.
+}
+}
+
 @phdthesis{PaulEdwardMcKenneyPhD
 ,author="Paul E. McKenney"
 ,title="Exploiting Deferred Destruction:
index 91266193b8f49e840709db77d75f8357428b2806..9d10d1db16a53ffc464c484cc7fe1b7ef2a1e864 100644 (file)
@@ -256,10 +256,10 @@ over a rather long period of time, but improvements are always welcome!
                variations on this theme.
 
        b.      Limiting update rate.  For example, if updates occur only
-               once per hour, then no explicit rate limiting is required,
-               unless your system is already badly broken.  The dcache
-               subsystem takes this approach -- updates are guarded
-               by a global lock, limiting their rate.
+               once per hour, then no explicit rate limiting is
+               required, unless your system is already badly broken.
+               Older versions of the dcache subsystem take this approach,
+               guarding updates with a global lock, limiting their rate.
 
        c.      Trusted update -- if updates can only be done manually by
                superuser or some other trusted user, then it might not
@@ -268,7 +268,8 @@ over a rather long period of time, but improvements are always welcome!
                the machine.
 
        d.      Use call_rcu_bh() rather than call_rcu(), in order to take
-               advantage of call_rcu_bh()'s faster grace periods.
+               advantage of call_rcu_bh()'s faster grace periods.  (This
+               is only a partial solution, though.)
 
        e.      Periodically invoke synchronize_rcu(), permitting a limited
                number of updates per grace period.
@@ -276,6 +277,13 @@ over a rather long period of time, but improvements are always welcome!
        The same cautions apply to call_rcu_bh(), call_rcu_sched(),
        call_srcu(), and kfree_rcu().
 
+       Note that although these primitives do take action to avoid memory
+       exhaustion when any given CPU has too many callbacks, a determined
+       user could still exhaust memory.  This is especially the case
+       if a system with a large number of CPUs has been configured to
+       offload all of its RCU callbacks onto a single CPU, or if the
+       system has relatively little free memory.
+
 9.     All RCU list-traversal primitives, which include
        rcu_dereference(), list_for_each_entry_rcu(), and
        list_for_each_safe_rcu(), must be either within an RCU read-side
index 5e054bfe4dde8e30e178c073d19c8c2b520a7e0f..85e24c4f215c45136eb3393763dc802b007ac408 100644 (file)
@@ -35,11 +35,13 @@ ffffffbc00000000    ffffffbdffffffff           8GB          vmemmap
 
 ffffffbe00000000       ffffffbffbbfffff          ~8GB          [guard, future vmmemap]
 
-ffffffbffbc00000       ffffffbffbdfffff           2MB          earlyprintk device
+ffffffbffa000000       ffffffbffaffffff          16MB          PCI I/O space
+
+ffffffbffb000000       ffffffbffbbfffff          12MB          [guard]
 
-ffffffbffbe00000       ffffffbffbe0ffff          64KB          PCI I/O space
+ffffffbffbc00000       ffffffbffbdfffff           2MB          earlyprintk device
 
-ffffffbffbe10000       ffffffbcffffffff          ~2MB          [guard]
+ffffffbffbe00000       ffffffbffbffffff           2MB          [guard]
 
 ffffffbffc000000       ffffffbfffffffff          64MB          modules
 
@@ -60,11 +62,13 @@ fffffdfc00000000    fffffdfdffffffff           8GB          vmemmap
 
 fffffdfe00000000       fffffdfffbbfffff          ~8GB          [guard, future vmmemap]
 
-fffffdfffbc00000       fffffdfffbdfffff           2MB          earlyprintk device
+fffffdfffa000000       fffffdfffaffffff          16MB          PCI I/O space
+
+fffffdfffb000000       fffffdfffbbfffff          12MB          [guard]
 
-fffffdfffbe00000       fffffdfffbe0ffff          64KB          PCI I/O space
+fffffdfffbc00000       fffffdfffbdfffff           2MB          earlyprintk device
 
-fffffdfffbe10000       fffffdfffbffffff          ~2MB          [guard]
+fffffdfffbe00000       fffffdfffbffffff           2MB          [guard]
 
 fffffdfffc000000       fffffdffffffffff          64MB          modules
 
index ce0666e5103682a766caff09a4af75513c38f77f..0060d76b445f3e42d89bf0a47d15440c2cd440e7 100644 (file)
@@ -92,7 +92,3 @@ values:
 cpu    - number of the affected CPU
 old    - old frequency
 new    - new frequency
-
-If the cpufreq core detects the frequency has changed while the system
-was suspended, these notifiers are called with CPUFREQ_RESUMECHANGE as
-second argument.
index 8b1a4451422e747a2278c0ba48ed4ca58e131ba3..48da5fdcb9f11b5671859a63e61ce2c9f40bab03 100644 (file)
@@ -61,7 +61,13 @@ target_index         -       See below on the differences.
 
 And optionally
 
-cpufreq_driver.exit -          A pointer to a per-CPU cleanup function.
+cpufreq_driver.exit -          A pointer to a per-CPU cleanup
+                               function called during CPU_POST_DEAD
+                               phase of cpu hotplug process.
+
+cpufreq_driver.stop_cpu -      A pointer to a per-CPU stop function
+                               called during CPU_DOWN_PREPARE phase of
+                               cpu hotplug process.
 
 cpufreq_driver.resume -                A pointer to a per-CPU resume function
                                which is called with interrupts disabled
index d74091a8a3bfd243490d83faedd452ec9a2c426c..5fc03134a9996473e4fe3ce1cd895b49da5b96f1 100644 (file)
@@ -1,4 +1,4 @@
-Marvell Armada 370 and Armada XP Interrupt Controller
+Marvell Armada 370, 375, 38x, XP Interrupt Controller
 -----------------------------------------------------
 
 Required properties:
@@ -16,7 +16,13 @@ Required properties:
   automatically map to the interrupt controller registers of the
   current CPU)
 
+Optional properties:
 
+- interrupts: If defined, then it indicates that this MPIC is
+  connected as a slave to another interrupt controller. This is
+  typically the case on Armada 375 and Armada 38x, where the MPIC is
+  connected as a slave to the Cortex-A9 GIC. The provided interrupt
+  indicate to which GIC interrupt the MPIC output is connected.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/arm/marvell,dove.txt b/Documentation/devicetree/bindings/arm/marvell,dove.txt
new file mode 100644 (file)
index 0000000..aaaf64c
--- /dev/null
@@ -0,0 +1,22 @@
+Marvell Dove Platforms Device Tree Bindings
+-----------------------------------------------
+
+Boards with a Marvell Dove SoC shall have the following properties:
+
+Required root node property:
+- compatible: must contain "marvell,dove";
+
+* Global Configuration registers
+
+Global Configuration registers of Dove SoC are shared by a syscon node.
+
+Required properties:
+- compatible: must contain "marvell,dove-global-config" and "syscon".
+- reg: base address and size of the Global Configuration registers.
+
+Example:
+
+gconf: global-config@e802c {
+       compatible = "marvell,dove-global-config", "syscon";
+       reg = <0xe802c 0x14>;
+};
index 89de1564950ce64cf2bdb1e087da04e2043a0db6..48b285ffa3a650e7d0adb028e7f577617b2dff9c 100644 (file)
@@ -4,17 +4,33 @@ SATA nodes are defined to describe on-chip Serial ATA controllers.
 Each SATA controller should have its own node.
 
 Required properties:
-- compatible        : compatible list, contains "snps,spear-ahci"
+- compatible        : compatible list, one of "snps,spear-ahci",
+                      "snps,exynos5440-ahci", "ibm,476gtr-ahci",
+                      "allwinner,sun4i-a10-ahci", "fsl,imx53-ahci"
+                      "fsl,imx6q-ahci" or "snps,dwc-ahci"
 - interrupts        : <interrupt mapping for SATA IRQ>
 - reg               : <registers mapping>
 
 Optional properties:
 - dma-coherent      : Present if dma operations are coherent
+- clocks            : a list of phandle + clock specifier pairs
+- target-supply     : regulator for SATA target power
 
-Example:
+"fsl,imx53-ahci", "fsl,imx6q-ahci" required properties:
+- clocks            : must contain the sata, sata_ref and ahb clocks
+- clock-names       : must contain "ahb" for the ahb clock
+
+Examples:
         sata@ffe08000 {
                compatible = "snps,spear-ahci";
                reg = <0xffe08000 0x1000>;
                interrupts = <115>;
-
         };
+
+       ahci: sata@01c18000 {
+               compatible = "allwinner,sun4i-a10-ahci";
+               reg = <0x01c18000 0x1000>;
+               interrupts = <56>;
+               clocks = <&pll6 0>, <&ahb_gates 25>;
+               target-supply = <&reg_ahci_5v>;
+       };
diff --git a/Documentation/devicetree/bindings/ata/apm-xgene.txt b/Documentation/devicetree/bindings/ata/apm-xgene.txt
new file mode 100644 (file)
index 0000000..7bcfbf5
--- /dev/null
@@ -0,0 +1,76 @@
+* APM X-Gene 6.0 Gb/s SATA host controller nodes
+
+SATA host controller nodes are defined to describe on-chip Serial ATA
+controllers. Each SATA controller (pair of ports) have its own node.
+
+Required properties:
+- compatible           : Shall contain:
+  * "apm,xgene-ahci"
+- reg                  : First memory resource shall be the AHCI memory
+                         resource.
+                         Second memory resource shall be the host controller
+                         core memory resource.
+                         Third memory resource shall be the host controller
+                         diagnostic memory resource.
+                         4th memory resource shall be the host controller
+                         AXI memory resource.
+                         5th optional memory resource shall be the host
+                         controller MUX memory resource if required.
+- interrupts           : Interrupt-specifier for SATA host controller IRQ.
+- clocks               : Reference to the clock entry.
+- phys                 : A list of phandles + phy-specifiers, one for each
+                         entry in phy-names.
+- phy-names            : Should contain:
+  * "sata-phy" for the SATA 6.0Gbps PHY
+
+Optional properties:
+- status               : Shall be "ok" if enabled or "disabled" if disabled.
+                         Default is "ok".
+
+Example:
+               sataclk: sataclk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <1>;
+                       clock-frequency = <100000000>;
+                       clock-output-names = "sataclk";
+               };
+
+               phy2: phy@1f22a000 {
+                       compatible = "apm,xgene-phy";
+                       reg = <0x0 0x1f22a000 0x0 0x100>;
+                       #phy-cells = <1>;
+               };
+
+               phy3: phy@1f23a000 {
+                       compatible = "apm,xgene-phy";
+                       reg = <0x0 0x1f23a000 0x0 0x100>;
+                       #phy-cells = <1>;
+               };
+
+               sata2: sata@1a400000 {
+                       compatible = "apm,xgene-ahci";
+                       reg = <0x0 0x1a400000 0x0 0x1000>,
+                             <0x0 0x1f220000 0x0 0x1000>,
+                             <0x0 0x1f22d000 0x0 0x1000>,
+                             <0x0 0x1f22e000 0x0 0x1000>,
+                             <0x0 0x1f227000 0x0 0x1000>;
+                       interrupts = <0x0 0x87 0x4>;
+                       status = "ok";
+                       clocks = <&sataclk 0>;
+                       phys = <&phy2 0>;
+                       phy-names = "sata-phy";
+               };
+
+               sata3: sata@1a800000 {
+                       compatible = "apm,xgene-ahci-pcie";
+                       reg = <0x0 0x1a800000 0x0 0x1000>,
+                             <0x0 0x1f230000 0x0 0x1000>,
+                             <0x0 0x1f23d000 0x0 0x1000>,
+                             <0x0 0x1f23e000 0x0 0x1000>,
+                             <0x0 0x1f237000 0x0 0x1000>;
+                       interrupts = <0x0 0x88 0x4>;
+                       status = "ok";
+                       clocks = <&sataclk 0>;
+                       phys = <&phy3 0>;
+                       phy-names = "sata-phy";
+               };
index 32cec4b26cd08f69e5380c05234b4c0fb50bc9e4..b290ca150d30eabb5bd25ac64ff8efb682caab47 100644 (file)
@@ -2,7 +2,7 @@ Allwinner Sunxi Interrupt Controller
 
 Required properties:
 
-- compatible : should be "allwinner,sun4i-ic"
+- compatible : should be "allwinner,sun4i-a10-ic"
 - reg : Specifies base physical address and size of the registers.
 - interrupt-controller : Identifies the node as an interrupt controller
 - #interrupt-cells : Specifies the number of cells needed to encode an
@@ -11,7 +11,7 @@ Required properties:
 Example:
 
 intc: interrupt-controller {
-       compatible = "allwinner,sun4i-ic";
+       compatible = "allwinner,sun4i-a10-ic";
        reg = <0x01c20400 0x400>;
        interrupt-controller;
        #interrupt-cells = <1>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun67i-sc-nmi.txt b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun67i-sc-nmi.txt
new file mode 100644 (file)
index 0000000..d1c5cda
--- /dev/null
@@ -0,0 +1,27 @@
+Allwinner Sunxi NMI Controller
+==============================
+
+Required properties:
+
+- compatible : should be "allwinner,sun7i-a20-sc-nmi" or
+  "allwinner,sun6i-a31-sc-nmi"
+- reg : Specifies base physical address and size of the registers.
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+  interrupt source. The value shall be 2. The first cell is the IRQ number, the
+  second cell the trigger type as defined in interrupt.txt in this directory.
+- interrupt-parent: Specifies the parent interrupt controller.
+- interrupts: Specifies the interrupt line (NMI) which is handled by
+  the interrupt controller in the parent controller's notation. This value
+  shall be the NMI.
+
+Example:
+
+sc-nmi-intc@01c00030 {
+       compatible = "allwinner,sun7i-a20-sc-nmi";
+       interrupt-controller;
+       #interrupt-cells = <2>;
+       reg = <0x01c00030 0x0c>;
+       interrupt-parent = <&gic>;
+       interrupts = <0 0 4>;
+};
diff --git a/Documentation/devicetree/bindings/mfd/s2mpa01.txt b/Documentation/devicetree/bindings/mfd/s2mpa01.txt
new file mode 100644 (file)
index 0000000..c13d3d8
--- /dev/null
@@ -0,0 +1,90 @@
+
+* Samsung S2MPA01 Voltage and Current Regulator
+
+The Samsung S2MPA01 is a multi-function device which includes high
+efficiency buck converters including Dual-Phase buck converter, various LDOs,
+and an RTC. It is interfaced to the host controller using an I2C interface.
+Each sub-block is addressed by the host system using different I2C slave
+addresses.
+
+Required properties:
+- compatible: Should be "samsung,s2mpa01-pmic".
+- reg: Specifies the I2C slave address of the PMIC block. It should be 0x66.
+
+Optional properties:
+- interrupt-parent: Specifies the phandle of the interrupt controller to which
+  the interrupts from s2mpa01 are delivered to.
+- interrupts: An interrupt specifier for the sole interrupt generated by the
+  device.
+
+Optional nodes:
+- regulators: The regulators of s2mpa01 that have to be instantiated should be
+  included in a sub-node named 'regulators'. Regulator nodes and constraints
+  included in this sub-node use the standard regulator bindings which are
+  documented elsewhere.
+
+Properties for BUCK regulator nodes:
+- regulator-ramp-delay: ramp delay in uV/us. May be 6250, 12500
+  (default), 25000, or 50000. May be 0 for disabling the ramp delay on
+  BUCK{1,2,3,4}.
+
+ In the absence of the regulator-ramp-delay property, the default ramp
+ delay will be used.
+
+  NOTE: Some BUCKs share the ramp rate setting i.e. same ramp value will be set
+  for a particular group of BUCKs. So provide same regulator-ramp-delay=<value>.
+
+  The following BUCKs share ramp settings:
+  * 1 and 6
+  * 2 and 4
+  * 8, 9, and 10
+
+The following are the names of the regulators that the s2mpa01 PMIC block
+supports. Note: The 'n' in LDOn and BUCKn represents the LDO or BUCK number
+as per the datasheet of s2mpa01.
+
+       - LDOn
+                 - valid values for n are 1 to 26
+                 - Example: LDO1, LD02, LDO26
+       - BUCKn
+                 - valid values for n are 1 to 10.
+                 - Example: BUCK1, BUCK2, BUCK9
+
+Example:
+
+       s2mpa01_pmic@66 {
+               compatible = "samsung,s2mpa01-pmic";
+               reg = <0x66>;
+
+               regulators {
+                       ldo1_reg: LDO1 {
+                               regulator-name = "VDD_ALIVE";
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <1000000>;
+                       };
+
+                       ldo2_reg: LDO2 {
+                               regulator-name = "VDDQ_MMC2";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-always-on;
+                       };
+
+                       buck1_reg: BUCK1 {
+                               regulator-name = "vdd_mif";
+                               regulator-min-microvolt = <950000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                       };
+
+                       buck2_reg: BUCK2 {
+                               regulator-name = "vdd_arm";
+                               regulator-min-microvolt = <950000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-always-on;
+                               regulator-boot-on;
+                               regulator-ramp-delay = <50000>;
+                       };
+               };
+       };
index 15ee89c3cc7b3406451c91038c3c4cb312ec5488..f69bec294f0200d55806109b0c70c94f7ae7c732 100644 (file)
@@ -1,5 +1,5 @@
 
-* Samsung S2MPS11 Voltage and Current Regulator
+* Samsung S2MPS11 and S2MPS14 Voltage and Current Regulator
 
 The Samsung S2MPS11 is a multi-function device which includes voltage and
 current regulators, RTC, charger controller and other sub-blocks. It is
@@ -7,7 +7,7 @@ interfaced to the host controller using an I2C interface. Each sub-block is
 addressed by the host system using different I2C slave addresses.
 
 Required properties:
-- compatible: Should be "samsung,s2mps11-pmic".
+- compatible: Should be "samsung,s2mps11-pmic" or "samsung,s2mps14-pmic".
 - reg: Specifies the I2C slave address of the pmic block. It should be 0x66.
 
 Optional properties:
@@ -59,10 +59,14 @@ supports. Note: The 'n' in LDOn and BUCKn represents the LDO or BUCK number
 as per the datasheet of s2mps11.
 
        - LDOn
-                 - valid values for n are 1 to 38
+                 - valid values for n are:
+                       - S2MPS11: 1 to 38
+                       - S2MPS14: 1 to 25
                  - Example: LDO1, LD02, LDO28
        - BUCKn
-                 - valid values for n are 1 to 10.
+                 - valid values for n are:
+                       - S2MPS11: 1 to 10
+                       - S2MPS14: 1 to 5
                  - Example: BUCK1, BUCK2, BUCK9
 
 Example:
index b4bd98af1cc7979ec1f6c7c6645173cd5c8b2d35..38833e63a59f901f79578f3ec7bba01227ec770d 100644 (file)
@@ -11,7 +11,7 @@ Required properties:
 - #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
   The first cell is the IRQ number.
   The second cell is the flags, encoded as the trigger masks from
-  Documentation/devicetree/bindings/interrupts.txt
+  Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
 - regulators: This is the list of child nodes that specify the regulator
   initialization data for defined regulators. Not all regulators for the given
   device need to be present. The definition for each of these nodes is defined
index 01ef408e205f18ce5ac3c16b44e559f01f877f08..adda2a8d1d5298dcf3b35634d26a3022fc686a14 100644 (file)
@@ -5,6 +5,7 @@ part and usage.
 
 Required properties:
 - compatible: "marvell,88f6710-pinctrl"
+- reg: register specifier of MPP registers
 
 Available mpp pins/groups and functions:
 Note: brackets (x) are not part of the mpp name for marvell,function and given
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
new file mode 100644 (file)
index 0000000..7de0cda
--- /dev/null
@@ -0,0 +1,82 @@
+* Marvell Armada 375 SoC pinctrl driver for mpp
+
+Please refer to marvell,mvebu-pinctrl.txt in this directory for common binding
+part and usage.
+
+Required properties:
+- compatible: "marvell,88f6720-pinctrl"
+- reg: register specifier of MPP registers
+
+Available mpp pins/groups and functions:
+Note: brackets (x) are not part of the mpp name for marvell,function and given
+only for more detailed description in this document.
+
+name          pins     functions
+================================================================================
+mpp0          0        gpio, dev(ad2), spi0(cs1), spi1(cs1)
+mpp1          1        gpio, dev(ad3), spi0(mosi), spi1(mosi)
+mpp2          2        gpio, dev(ad4), ptp(eventreq), led(c0), audio(sdi)
+mpp3          3        gpio, dev(ad5), ptp(triggen), led(p3), audio(mclk)
+mpp4          4        gpio, dev(ad6), spi0(miso), spi1(miso)
+mpp5          5        gpio, dev(ad7), spi0(cs2), spi1(cs2)
+mpp6          6        gpio, dev(ad0), led(p1), audio(rclk)
+mpp7          7        gpio, dev(ad1), ptp(clk), led(p2), audio(extclk)
+mpp8          8        gpio, dev (bootcs), spi0(cs0), spi1(cs0)
+mpp9          9        gpio, nf(wen), spi0(sck), spi1(sck)
+mpp10        10        gpio, nf(ren), dram(vttctrl), led(c1)
+mpp11        11        gpio, dev(a0), led(c2), audio(sdo)
+mpp12        12        gpio, dev(a1), audio(bclk)
+mpp13        13        gpio, dev(readyn), pcie0(rstoutn), pcie1(rstoutn)
+mpp14        14        gpio, i2c0(sda), uart1(txd)
+mpp15        15        gpio, i2c0(sck), uart1(rxd)
+mpp16        16        gpio, uart0(txd)
+mpp17        17        gpio, uart0(rxd)
+mpp18        18        gpio, tdm(intn)
+mpp19        19        gpio, tdm(rstn)
+mpp20        20        gpio, tdm(pclk)
+mpp21        21        gpio, tdm(fsync)
+mpp22        22        gpio, tdm(drx)
+mpp23        23        gpio, tdm(dtx)
+mpp24        24        gpio, led(p0), ge1(rxd0), sd(cmd), uart0(rts)
+mpp25        25        gpio, led(p2), ge1(rxd1), sd(d0), uart0(cts)
+mpp26        26        gpio, pcie0(clkreq), ge1(rxd2), sd(d2), uart1(rts)
+mpp27        27        gpio, pcie1(clkreq), ge1(rxd3), sd(d1), uart1(cts)
+mpp28        28        gpio, led(p3), ge1(txctl), sd(clk)
+mpp29        29        gpio, pcie1(clkreq), ge1(rxclk), sd(d3)
+mpp30        30        gpio, ge1(txd0), spi1(cs0)
+mpp31        31        gpio, ge1(txd1), spi1(mosi)
+mpp32        32        gpio, ge1(txd2), spi1(sck), ptp(triggen)
+mpp33        33        gpio, ge1(txd3), spi1(miso)
+mpp34        34        gpio, ge1(txclkout), spi1(sck)
+mpp35        35        gpio, ge1(rxctl), spi1(cs1), spi0(cs2)
+mpp36        36        gpio, pcie0(clkreq)
+mpp37        37        gpio, pcie0(clkreq), tdm(intn), ge(mdc)
+mpp38        38        gpio, pcie1(clkreq), ge(mdio)
+mpp39        39        gpio, ref(clkout)
+mpp40        40        gpio, uart1(txd)
+mpp41        41        gpio, uart1(rxd)
+mpp42        42        gpio, spi1(cs2), led(c0)
+mpp43        43        gpio, sata0(prsnt), dram(vttctrl)
+mpp44        44        gpio, sata0(prsnt)
+mpp45        45        gpio, spi0(cs2), pcie0(rstoutn)
+mpp46        46        gpio, led(p0), ge0(txd0), ge1(txd0)
+mpp47        47        gpio, led(p1), ge0(txd1), ge1(txd1)
+mpp48        48        gpio, led(p2), ge0(txd2), ge1(txd2)
+mpp49        49        gpio, led(p3), ge0(txd3), ge1(txd3)
+mpp50        50        gpio, led(c0), ge0(rxd0), ge1(rxd0)
+mpp51        51        gpio, led(c1), ge0(rxd1), ge1(rxd1)
+mpp52        52        gpio, led(c2), ge0(rxd2), ge1(rxd2)
+mpp53        53        gpio, pcie1(rstoutn), ge0(rxd3), ge1(rxd3)
+mpp54        54        gpio, pcie0(rstoutn), ge0(rxctl), ge1(rxctl)
+mpp55        55        gpio, ge0(rxclk), ge1(rxclk)
+mpp56        56        gpio, ge0(txclkout), ge1(txclkout)
+mpp57        57        gpio, ge0(txctl), ge1(txctl)
+mpp58        58        gpio, led(c0)
+mpp59        59        gpio, led(c1)
+mpp60        60        gpio, uart1(txd), led(c2)
+mpp61        61        gpio, i2c1(sda), uart1(rxd), spi1(cs2), led(p0)
+mpp62        62        gpio, i2c1(sck), led(p1)
+mpp63        63        gpio, ptp(triggen), led(p2)
+mpp64        64        gpio, dram(vttctrl), led(p3)
+mpp65        65        gpio, sata1(prsnt)
+mpp66        66        gpio, ptp(eventreq), spi1(cs3)
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
new file mode 100644 (file)
index 0000000..b17c968
--- /dev/null
@@ -0,0 +1,80 @@
+* Marvell Armada 380/385 SoC pinctrl driver for mpp
+
+Please refer to marvell,mvebu-pinctrl.txt in this directory for common binding
+part and usage.
+
+Required properties:
+- compatible: "marvell,88f6810-pinctrl", "marvell,88f6820-pinctrl" or
+  "marvell,88f6828-pinctrl" depending on the specific variant of the
+  SoC being used.
+- reg: register specifier of MPP registers
+
+Available mpp pins/groups and functions:
+Note: brackets (x) are not part of the mpp name for marvell,function and given
+only for more detailed description in this document.
+
+name          pins     functions
+================================================================================
+mpp0          0        gpio, ua0(rxd)
+mpp1          1        gpio, ua0(txd)
+mpp2          2        gpio, i2c0(sck)
+mpp3          3        gpio, i2c0(sda)
+mpp4          4        gpio, ge(mdc), ua1(txd), ua0(rts)
+mpp5          5        gpio, ge(mdio), ua1(rxd), ua0(cts)
+mpp6          6        gpio, ge0(txclkout), ge0(crs), dev(cs3)
+mpp7          7        gpio, ge0(txd0), dev(ad9)
+mpp8          8        gpio, ge0(txd1), dev(ad10)
+mpp9          9        gpio, ge0(txd2), dev(ad11)
+mpp10         10       gpio, ge0(txd3), dev(ad12)
+mpp11         11       gpio, ge0(txctl), dev(ad13)
+mpp12         12       gpio, ge0(rxd0), pcie0(rstout), pcie1(rstout) [1], spi0(cs1), dev(ad14)
+mpp13         13       gpio, ge0(rxd1), pcie0(clkreq), pcie1(clkreq) [1], spi0(cs2), dev(ad15)
+mpp14         14       gpio, ge0(rxd2), ptp(clk), m(vtt_ctrl), spi0(cs3), dev(wen1)
+mpp15         15       gpio, ge0(rxd3), ge(mdc slave), pcie0(rstout), spi0(mosi), pcie1(rstout) [1]
+mpp16         16       gpio, ge0(rxctl), ge(mdio slave), m(decc_err), spi0(miso), pcie0(clkreq)
+mpp17         17       gpio, ge0(rxclk), ptp(clk), ua1(rxd), spi0(sck), sata1(prsnt)
+mpp18         18       gpio, ge0(rxerr), ptp(trig_gen), ua1(txd), spi0(cs0), pcie1(rstout) [1]
+mpp19         19       gpio, ge0(col), ptp(event_req), pcie0(clkreq), sata1(prsnt), ua0(cts)
+mpp20         20       gpio, ge0(txclk), ptp(clk), pcie1(rstout) [1], sata0(prsnt), ua0(rts)
+mpp21         21       gpio, spi0(cs1), ge1(rxd0), sata0(prsnt), sd0(cmd), dev(bootcs)
+mpp22         22       gpio, spi0(mosi), dev(ad0)
+mpp23         23       gpio, spi0(sck), dev(ad2)
+mpp24         24       gpio, spi0(miso), ua0(cts), ua1(rxd), sd0(d4), dev(ready)
+mpp25         25       gpio, spi0(cs0), ua0(rts), ua1(txd), sd0(d5), dev(cs0)
+mpp26         26       gpio, spi0(cs2), i2c1(sck), sd0(d6), dev(cs1)
+mpp27         27       gpio, spi0(cs3), ge1(txclkout), i2c1(sda), sd0(d7), dev(cs2)
+mpp28         28       gpio, ge1(txd0), sd0(clk), dev(ad5)
+mpp29         29       gpio, ge1(txd1), dev(ale0)
+mpp30         30       gpio, ge1(txd2), dev(oen)
+mpp31         31       gpio, ge1(txd3), dev(ale1)
+mpp32         32       gpio, ge1(txctl), dev(wen0)
+mpp33         33       gpio, m(decc_err), dev(ad3)
+mpp34         34       gpio, dev(ad1)
+mpp35         35       gpio, ref(clk_out1), dev(a1)
+mpp36         36       gpio, ptp(trig_gen), dev(a0)
+mpp37         37       gpio, ptp(clk), ge1(rxclk), sd0(d3), dev(ad8)
+mpp38         38       gpio, ptp(event_req), ge1(rxd1), ref(clk_out0), sd0(d0), dev(ad4)
+mpp39         39       gpio, i2c1(sck), ge1(rxd2), ua0(cts), sd0(d1), dev(a2)
+mpp40         40       gpio, i2c1(sda), ge1(rxd3), ua0(rts), sd0(d2), dev(ad6)
+mpp41         41       gpio, ua1(rxd), ge1(rxctl), ua0(cts), spi1(cs3), dev(burst/last)
+mpp42         42       gpio, ua1(txd), ua0(rts), dev(ad7)
+mpp43         43       gpio, pcie0(clkreq), m(vtt_ctrl), m(decc_err), pcie0(rstout), dev(clkout)
+mpp44         44       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [3], pcie0(rstout)
+mpp45         45       gpio, ref(clk_out0), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
+mpp46         46       gpio, ref(clk_out1), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
+mpp47         47       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], spi1(cs2), sata3(prsnt) [2]
+mpp48         48       gpio, sata0(prsnt), m(vtt_ctrl), tdm2c(pclk), audio(mclk), sd0(d4)
+mpp49         49       gpio, sata2(prsnt) [2], sata3(prsnt) [2], tdm2c(fsync), audio(lrclk), sd0(d5)
+mpp50         50       gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(drx), audio(extclk), sd0(cmd)
+mpp51         51       gpio, tdm2c(dtx), audio(sdo), m(decc_err)
+mpp52         52       gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(intn), audio(sdi), sd0(d6)
+mpp53         53       gpio, sata1(prsnt), sata0(prsnt), tdm2c(rstn), audio(bclk), sd0(d7)
+mpp54         54       gpio, sata0(prsnt), sata1(prsnt), pcie0(rstout), pcie1(rstout) [1], sd0(d3)
+mpp55         55       gpio, ua1(cts), ge(mdio), pcie1(clkreq) [1], spi1(cs1), sd0(d0)
+mpp56         56       gpio, ua1(rts), ge(mdc), m(decc_err), spi1(mosi)
+mpp57         57       gpio, spi1(sck), sd0(clk)
+mpp58         58       gpio, pcie1(clkreq) [1], i2c1(sck), pcie2(clkreq), spi1(miso), sd0(d1)
+mpp59         59       gpio, pcie0(rstout), i2c1(sda), pcie1(rstout) [1], spi1(cs0), sd0(d2)
+
+[1]: only available on 88F6820 and 88F6828
+[2]: only available on 88F6828
index bfa0a2e5e0cb929d91f666537fe45a99199005d0..373dbccd7ab0e7c5b4905b7c700e8fada265be1a 100644 (file)
@@ -6,6 +6,7 @@ part and usage.
 Required properties:
 - compatible: "marvell,mv78230-pinctrl", "marvell,mv78260-pinctrl",
               "marvell,mv78460-pinctrl"
+- reg: register specifier of MPP registers
 
 This driver supports all Armada XP variants, i.e. mv78230, mv78260, and mv78460.
 
index 50ec3512a292c3cbfc8a2ab4aba5c005c10ad2db..cf52477cc7ee3affd5527ccfeaef0fd841e80cff 100644 (file)
@@ -6,6 +6,7 @@ part and usage.
 Required properties:
 - compatible: "marvell,dove-pinctrl"
 - clocks: (optional) phandle of pdma clock
+- reg: register specifiers of MPP, MPP4, and PMU MPP registers
 
 Available mpp pins/groups and functions:
 Note: brackets (x) are not part of the mpp name for marvell,function and given
index 95daf6335c3796e2256ddb94e3e21190cf0c1b35..730444a9a4de8a3aba0e6be2864f974b4f5da1a5 100644 (file)
@@ -8,6 +8,7 @@ Required properties:
               "marvell,88f6190-pinctrl", "marvell,88f6192-pinctrl",
               "marvell,88f6281-pinctrl", "marvell,88f6282-pinctrl"
               "marvell,98dx4122-pinctrl"
+- reg: register specifier of MPP registers
 
 This driver supports all kirkwood variants, i.e. 88f6180, 88f619x, and 88f628x.
 It also support the 88f6281-based variant in the 98dx412x Bobcat SoCs.
index 0a26c3aa4e6d0ccc20ad78c981f52fb4733e14aa..0c09f4eb2af09db6aa785d6c6ea70f0086de6240 100644 (file)
@@ -37,7 +37,7 @@ uart1: serial@12100 {
 
 pinctrl: pinctrl@d0200 {
        compatible = "marvell,dove-pinctrl";
-       reg = <0xd0200 0x20>;
+       reg = <0xd0200 0x14>, <0xd0440 0x04>, <0xd802c 0x08>;
 
        pmx_uart1_sw: pmx-uart1-sw {
                marvell,pins = "mpp_uart1";
index bc0dfdfdb14860577483963c01979b3f8b18ffbd..66dcaa9efd7401916d3ec8cc9e03c9ebaff50a22 100644 (file)
@@ -63,6 +63,13 @@ Optional properties:
                /* input, enable bits, disable bits, mask */
                pinctrl-single,input-schmitt-enable = <0x30 0x40 0 0x70>;
 
+- pinctrl-single,low-power-mode : array of value that are used to configure
+  low power mode of this pin. For some silicons, the low power mode will
+  control the output of the pin when the pad including the pin enter low
+  power mode.
+               /* low power mode value, mask */
+               pinctrl-single,low-power-mode = <0x288 0x388>;
+
 - pinctrl-single,gpio-range : list of value that are used to configure a GPIO
   range. They're value of subnode phandle, pin base in pinctrl device, pin
   number in this range, GPIO function value of this GPIO range.
index 05bf82a07dfdea5c51a1fdf0c09a9a9b8ef95e37..4bd5be0e5e7dd51eaf7cf23a92a2bf884dd264f1 100644 (file)
@@ -11,18 +11,68 @@ Pull Up (PU) are driven by the related PIO block.
 ST pinctrl driver controls PIO multiplexing block and also interacts with
 gpio driver to configure a pin.
 
-Required properties: (PIO multiplexing block)
+GPIO bank can have one of the two possible types of interrupt-wirings.
+
+First type is via irqmux, single interrupt is used by multiple gpio banks. This
+reduces number of overall interrupts numbers required. All these banks belong to
+a single pincontroller.
+                 _________
+                |         |----> [gpio-bank (n)    ]
+                |         |----> [gpio-bank (n + 1)]
+       [irqN]-- | irq-mux |----> [gpio-bank (n + 2)]
+                |         |----> [gpio-bank (...  )]
+                |_________|----> [gpio-bank (n + 7)]
+
+Second type has a dedicated interrupt per gpio bank.
+
+       [irqN]----> [gpio-bank (n)]
+
+
+Pin controller node:
+Required properties:
 - compatible   : should be "st,<SOC>-<pio-block>-pinctrl"
        like st,stih415-sbc-pinctrl, st,stih415-front-pinctrl and so on.
-- gpio-controller : Indicates this device is a GPIO controller
-- #gpio-cells    : Should be one. The first cell is the pin number.
+- st,syscfg            : Should be a phandle of the syscfg node.
 - st,retime-pin-mask   : Should be mask to specify which pins can be retimed.
        If the property is not present, it is assumed that all the pins in the
        bank are capable of retiming. Retiming is mainly used to improve the
        IO timing margins of external synchronous interfaces.
-- st,bank-name         : Should be a name string for this bank as
-                       specified in datasheet.
-- st,syscfg            : Should be a phandle of the syscfg node.
+- ranges : defines mapping between pin controller node (parent) to gpio-bank
+  node (children).
+
+Optional properties:
+- interrupts   : Interrupt number of the irqmux. If the interrupt is shared
+  with other gpio banks via irqmux.
+  a irqline and gpio banks.
+- reg          : irqmux memory resource. If irqmux is present.
+- reg-names    : irqmux resource should be named as "irqmux".
+
+GPIO controller/bank node.
+Required properties:
+- gpio-controller : Indicates this device is a GPIO controller
+- #gpio-cells    : Should be one. The first cell is the pin number.
+- st,bank-name   : Should be a name string for this bank as specified in
+  datasheet.
+
+Optional properties:
+- interrupts   : Interrupt number for this gpio bank. If there is a dedicated
+  interrupt wired up for this gpio bank.
+
+- interrupt-controller : Indicates this device is a interrupt controller. GPIO
+  bank can be an interrupt controller iff one of the interrupt type either via
+irqmux or a dedicated interrupt per bank is specified.
+
+- #interrupt-cells: the value of this property should be 2.
+     - First Cell: represents the external gpio interrupt number local to the
+       gpio interrupt space of the controller.
+     - Second Cell: flags to identify the type of the interrupt
+       - 1 = rising edge triggered
+       - 2 = falling edge triggered
+       - 3 = rising and falling edge triggered
+       - 4 = high level triggered
+       - 8 = low level triggered
+for related macros look in:
+include/dt-bindings/interrupt-controller/irq.h
 
 Example:
        pin-controller-sbc {
@@ -30,10 +80,17 @@ Example:
                #size-cells     = <1>;
                compatible      = "st,stih415-sbc-pinctrl";
                st,syscfg       = <&syscfg_sbc>;
+               reg             = <0xfe61f080 0x4>;
+               reg-names       = "irqmux";
+               interrupts      = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts-names = "irqmux";
                ranges          = <0 0xfe610000 0x5000>;
+
                PIO0: gpio@fe610000 {
                        gpio-controller;
                        #gpio-cells     = <1>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        reg             = <0 0x100>;
                        st,bank-name    = "PIO0";
                };
@@ -105,6 +162,10 @@ pin-controller {
 
 sdhci0:sdhci@fe810000{
        ...
+       interrupt-parent = <&PIO3>;
+       #interrupt-cells = <2>;
+       interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
+       interrupts-names = "card-detect";
        pinctrl-names = "default";
        pinctrl-0       = <&pinctrl_mmc>;
 };
index 4c352be5dd615a463ae56bde90af1b321307a2eb..9fb89e3f61eac0b6794e32ccded19ff9671c200c 100644 (file)
@@ -1,7 +1,7 @@
 Qualcomm MSM8974 TLMM block
 
 Required properties:
-- compatible: "qcom,msm8x74-pinctrl"
+- compatible: "qcom,msm8974-pinctrl"
 - reg: Should be the base address and length of the TLMM block.
 - interrupts: Should be the parent IRQ of the TLMM block.
 - interrupt-controller: Marks the device node as an interrupt controller.
@@ -42,14 +42,14 @@ Non-empty subnodes must specify the 'pins' property.
 Note that not all properties are valid for all pins.
 
 
-Valid values for qcom,pins are:
+Valid values for pins are:
   gpio0-gpio145
     Supports mux, bias and drive-strength
 
   sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd, sdc2_data
     Supports bias and drive-strength
 
-Valid values for qcom,function are:
+Valid values for function are:
   blsp_i2c2, blsp_i2c6, blsp_i2c11, blsp_spi1, blsp_uart2, blsp_uart8, slimbus
 
   (Note that this is not yet the complete list of functions)
@@ -73,18 +73,18 @@ Example:
 
                uart2_default: uart2_default {
                        mux {
-                               qcom,pins = "gpio4", "gpio5";
-                               qcom,function = "blsp_uart2";
+                               pins = "gpio4", "gpio5";
+                               function = "blsp_uart2";
                        };
 
                        tx {
-                               qcom,pins = "gpio4";
+                               pins = "gpio4";
                                drive-strength = <4>;
                                bias-disable;
                        };
 
                        rx {
-                               qcom,pins = "gpio5";
+                               pins = "gpio5";
                                drive-strength = <2>;
                                bias-pull-up;
                        };
index 257677de3e6badac3a185f8c897126d777d48b49..2b32783ba8210dda7fd78b231c512a9e672d694a 100644 (file)
@@ -16,6 +16,7 @@ Required Properties:
   - "samsung,exynos4210-pinctrl": for Exynos4210 compatible pin-controller.
   - "samsung,exynos4x12-pinctrl": for Exynos4x12 compatible pin-controller.
   - "samsung,exynos5250-pinctrl": for Exynos5250 compatible pin-controller.
+  - "samsung,exynos5260-pinctrl": for Exynos5260 compatible pin-controller.
   - "samsung,exynos5420-pinctrl": for Exynos5420 compatible pin-controller.
 
 - reg: Base address of the pin controller hardware module and length of
index 63c659800c0320a9b08a8878fcb3ab327f270d9f..e5cac1e0ca8a734d43549ad125a837beef457e13 100644 (file)
@@ -8,8 +8,12 @@ Required properties:
 Optional properties:
 - enable-gpio          : GPIO to use to enable/disable the regulator.
 - gpios                        : GPIO group used to control voltage.
+- gpios-states         : gpios pin's initial states array. 0: LOW, 1: HIGH.
+                         defualt is LOW if nothing is specified.
 - startup-delay-us     : Startup time in microseconds.
 - enable-active-high   : Polarity of GPIO is active high (default is low).
+- regulator-type       : Specifies what is being regulated, must be either
+                         "voltage" or "current", defaults to current.
 
 Any property defined as part of the core regulator binding defined in
 regulator.txt can also be used.
index fc989b2e8057b3ced0b306ac53befb4a3edc1f81..34ef5d16d0f1697c51b6759b8a572f33d6b85894 100644 (file)
@@ -1,7 +1,7 @@
 PFUZE100 family of regulators
 
 Required properties:
-- compatible: "fsl,pfuze100"
+- compatible: "fsl,pfuze100" or "fsl,pfuze200"
 - reg: I2C slave address
 
 Required child node:
@@ -10,11 +10,14 @@ Required child node:
   Documentation/devicetree/bindings/regulator/regulator.txt.
 
   The valid names for regulators are:
+  --PFUZE100
   sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
+  --PFUZE200
+  sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6
 
 Each regulator is defined using the standard binding for regulators.
 
-Example:
+Example 1: PFUZE100
 
        pmic: pfuze100@08 {
                compatible = "fsl,pfuze100";
@@ -113,3 +116,92 @@ Example:
                        };
                };
        };
+
+
+Example 2: PFUZE200
+
+       pmic: pfuze200@08 {
+               compatible = "fsl,pfuze200";
+               reg = <0x08>;
+
+               regulators {
+                       sw1a_reg: sw1ab {
+                               regulator-min-microvolt = <300000>;
+                               regulator-max-microvolt = <1875000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                               regulator-ramp-delay = <6250>;
+                       };
+
+                       sw2_reg: sw2 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw3a_reg: sw3a {
+                               regulator-min-microvolt = <400000>;
+                               regulator-max-microvolt = <1975000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw3b_reg: sw3b {
+                               regulator-min-microvolt = <400000>;
+                               regulator-max-microvolt = <1975000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       swbst_reg: swbst {
+                               regulator-min-microvolt = <5000000>;
+                               regulator-max-microvolt = <5150000>;
+                       };
+
+                       snvs_reg: vsnvs {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vref_reg: vrefddr {
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vgen1_reg: vgen1 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <1550000>;
+                       };
+
+                       vgen2_reg: vgen2 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <1550000>;
+                       };
+
+                       vgen3_reg: vgen3 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                       };
+
+                       vgen4_reg: vgen4 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen5_reg: vgen5 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen6_reg: vgen6 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+               };
+       };
index fc6b38f035bd4b8a4e369e67d6eb25bb4d87edf9..d290988ed975fee4883ff6a49a0e5d79ae82feee 100644 (file)
@@ -69,13 +69,16 @@ sub-node should be of the format as listed below.
                };
        };
 The above regulator entries are defined in regulator bindings documentation
-except op_mode description.
+except these properties:
        - op_mode: describes the different operating modes of the LDO's with
                power mode change in SOC. The different possible values are,
                0 - always off mode
                1 - on in normal mode
                2 - low power mode
                3 - suspend mode
+       - s5m8767,pmic-ext-control-gpios: (optional) GPIO specifier for one
+               GPIO controlling this regulator (enable/disable); This is
+               valid only for buck9.
 
 The following are the names of the regulators that the s5m8767 pmic block
 supports. Note: The 'n' in LDOn and BUCKn represents the LDO or BUCK number
@@ -148,5 +151,13 @@ Example:
                                regulator-always-on;
                                regulator-boot-on;
                        };
+
+                       vemmc_reg: BUCK9 {
+                               regulator-name = "VMEM_VDD_2.8V";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               op_mode = <3>; /* Standby Mode */
+                               s5m8767,pmic-ext-control-gpios = <&gpk0 2 0>;
+                       };
                };
        };
index 2e57a33e9029a39fa80c2da517e7d4bb8df4aa4d..c58db75f959e601fcf326a0637d79617253a8206 100644 (file)
@@ -4,10 +4,14 @@ Required Properties:
 - compatible: Should be one of:
   - "ti,abb-v1" for older SoCs like OMAP3
   - "ti,abb-v2" for newer SoCs like OMAP4, OMAP5
+  - "ti,abb-v3" for a generic definition where setup and control registers are
+     provided (example: DRA7)
 - reg: Address and length of the register set for the device. It contains
   the information of registers in the same order as described by reg-names
 - reg-names: Should contain the reg names
-  - "base-address"     - contains base address of ABB module
+  - "base-address"     - contains base address of ABB module (ti,abb-v1,ti,abb-v2)
+  - "control-address"  - contains control register address of ABB module (ti,abb-v3)
+  - "setup-address"    - contains setup register address of ABB module (ti,abb-v3)
   - "int-address"      - contains address of interrupt register for ABB module
   (also see Optional properties)
 - #address-cell: should be 0
index 48aeb7884ed3733ae46e757cf9ee5da4b1063a51..5c2e23574ca025aea14151eb4243052c4cf14901 100644 (file)
@@ -2,7 +2,7 @@ Allwinner A1X SoCs Timer Controller
 
 Required properties:
 
-- compatible : should be "allwinner,sun4i-timer"
+- compatible : should be "allwinner,sun4i-a10-timer"
 - reg : Specifies base physical address and size of the registers.
 - interrupts : The interrupt of the first timer
 - clocks: phandle to the source clock (usually a 24 MHz fixed clock)
@@ -10,7 +10,7 @@ Required properties:
 Example:
 
 timer {
-       compatible = "allwinner,sun4i-timer";
+       compatible = "allwinner,sun4i-a10-timer";
        reg = <0x01c20c00 0x400>;
        interrupts = <22>;
        clocks = <&osc>;
diff --git a/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt b/Documentation/devicetree/bindings/timer/ti,keystone-timer.txt
new file mode 100644 (file)
index 0000000..5fbe361
--- /dev/null
@@ -0,0 +1,29 @@
+* Device tree bindings for Texas instruments Keystone timer
+
+This document provides bindings for the 64-bit timer in the KeyStone
+architecture devices. The timer can be configured as a general-purpose 64-bit
+timer, dual general-purpose 32-bit timers. When configured as dual 32-bit
+timers, each half can operate in conjunction (chain mode) or independently
+(unchained mode) of each other.
+
+It is global timer is a free running up-counter and can generate interrupt
+when the counter reaches preset counter values.
+
+Documentation:
+http://www.ti.com/lit/ug/sprugv5a/sprugv5a.pdf
+
+Required properties:
+
+- compatible : should be "ti,keystone-timer".
+- reg : specifies base physical address and count of the registers.
+- interrupts : interrupt generated by the timer.
+- clocks : the clock feeding the timer clock.
+
+Example:
+
+timer@22f0000 {
+       compatible = "ti,keystone-timer";
+       reg = <0x022f0000 0x80>;
+       interrupts = <GIC_SPI 110 IRQ_TYPE_EDGE_RISING>;
+       clocks = <&clktimer15>;
+};
index 7116fda7077ffce993b0d2456869934ab8c3ee09..121d5fcbd94aa200d219ef36d6445169fd24ebd7 100644 (file)
@@ -231,6 +231,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        acpi_no_auto_ssdt       [HW,ACPI] Disable automatic loading of SSDT
 
+       acpica_no_return_repair [HW, ACPI]
+                       Disable AML predefined validation mechanism
+                       This mechanism can repair the evaluation result to make
+                       the return objects more ACPI specification compliant.
+                       This option is useful for developers to identify the
+                       root cause of an AML interpreter issue when the issue
+                       has something to do with the repair mechanism.
+
        acpi_os_name=   [HW,ACPI] Tell ACPI BIOS the name of the OS
                        Format: To spoof as Windows 98: ="Microsoft Windows"
 
@@ -1011,6 +1019,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        parameter will force ia64_sal_cache_flush to call
                        ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
 
+       forcepae [X86-32]
+                       Forcefully enable Physical Address Extension (PAE).
+                       Many Pentium M systems disable PAE but may have a
+                       functionally usable PAE implementation.
+                       Warning: use of this parameter will taint the kernel
+                       and may cause unknown problems.
+
        ftrace=[tracer]
                        [FTRACE] will set and start the specified tracer
                        as early as possible in order to facilitate early
@@ -2053,8 +2068,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        IOAPICs that may be present in the system.
 
        nokaslr         [X86]
-                       Disable kernel base offset ASLR (Address Space
-                       Layout Randomization) if built into the kernel.
+                       Disable kernel and module base offset ASLR (Address
+                       Space Layout Randomization) if built into the kernel.
 
        noautogroup     Disable scheduler automatic task group creation.
 
index 827104fb9364cf60df2359e45c3336f61d4568c5..f3cd299fcc41203f9fe73ad16cd70aa77a421ef3 100644 (file)
@@ -162,7 +162,18 @@ Purpose: Execute workqueue requests
 To reduce its OS jitter, do any of the following:
 1.     Run your workload at a real-time priority, which will allow
        preempting the kworker daemons.
-2.     Do any of the following needed to avoid jitter that your
+2.     A given workqueue can be made visible in the sysfs filesystem
+       by passing the WQ_SYSFS to that workqueue's alloc_workqueue().
+       Such a workqueue can be confined to a given subset of the
+       CPUs using the /sys/devices/virtual/workqueue/*/cpumask sysfs
+       files.  The set of WQ_SYSFS workqueues can be displayed using
+       "ls sys/devices/virtual/workqueue".  That said, the workqueues
+       maintainer would like to caution people against indiscriminately
+       sprinkling WQ_SYSFS across all the workqueues.  The reason for
+       caution is that it is easy to add WQ_SYSFS, but because sysfs is
+       part of the formal user/kernel API, it can be nearly impossible
+       to remove it, even if its addition was a mistake.
+3.     Do any of the following needed to avoid jitter that your
        application cannot tolerate:
        a.      Build your kernel with CONFIG_SLUB=y rather than
                CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
index 102dc19c411980d0aa17beeb12f0bcdd38447e29..11c1d2049662cc7cff72193e87707a46ad77eca0 100644 (file)
@@ -608,26 +608,30 @@ as follows:
        b = p;  /* BUG: Compiler can reorder!!! */
        do_something();
 
-The solution is again ACCESS_ONCE(), which preserves the ordering between
-the load from variable 'a' and the store to variable 'b':
+The solution is again ACCESS_ONCE() and barrier(), which preserves the
+ordering between the load from variable 'a' and the store to variable 'b':
 
        q = ACCESS_ONCE(a);
        if (q) {
+               barrier();
                ACCESS_ONCE(b) = p;
                do_something();
        } else {
+               barrier();
                ACCESS_ONCE(b) = p;
                do_something_else();
        }
 
-You could also use barrier() to prevent the compiler from moving
-the stores to variable 'b', but barrier() would not prevent the
-compiler from proving to itself that a==1 always, so ACCESS_ONCE()
-is also needed.
+The initial ACCESS_ONCE() is required to prevent the compiler from
+proving the value of 'a', and the pair of barrier() invocations are
+required to prevent the compiler from pulling the two identical stores
+to 'b' out from the legs of the "if" statement.
 
 It is important to note that control dependencies absolutely require a
 a conditional.  For example, the following "optimized" version of
-the above example breaks ordering:
+the above example breaks ordering, which is why the barrier() invocations
+are absolutely required if you have identical stores in both legs of
+the "if" statement:
 
        q = ACCESS_ONCE(a);
        ACCESS_ONCE(b) = p;  /* BUG: No ordering vs. load from a!!! */
@@ -643,9 +647,11 @@ It is of course legal for the prior load to be part of the conditional,
 for example, as follows:
 
        if (ACCESS_ONCE(a) > 0) {
+               barrier();
                ACCESS_ONCE(b) = q / 2;
                do_something();
        } else {
+               barrier();
                ACCESS_ONCE(b) = q / 3;
                do_something_else();
        }
@@ -659,9 +665,11 @@ the needed conditional.  For example:
 
        q = ACCESS_ONCE(a);
        if (q % MAX) {
+               barrier();
                ACCESS_ONCE(b) = p;
                do_something();
        } else {
+               barrier();
                ACCESS_ONCE(b) = p;
                do_something_else();
        }
@@ -723,8 +731,13 @@ In summary:
       use smb_rmb(), smp_wmb(), or, in the case of prior stores and
       later loads, smp_mb().
 
+  (*) If both legs of the "if" statement begin with identical stores
+      to the same variable, a barrier() statement is required at the
+      beginning of each leg of the "if" statement.
+
   (*) Control dependencies require at least one run-time conditional
-      between the prior load and the subsequent store.  If the compiler
+      between the prior load and the subsequent store, and this
+      conditional must involve the prior load.  If the compiler
       is able to optimize the conditional away, it will have also
       optimized away the ordering.  Careful use of ACCESS_ONCE() can
       help to preserve the needed conditional.
@@ -1249,6 +1262,23 @@ The ACCESS_ONCE() function can prevent any number of optimizations that,
 while perfectly safe in single-threaded code, can be fatal in concurrent
 code.  Here are some examples of these sorts of optimizations:
 
+ (*) The compiler is within its rights to reorder loads and stores
+     to the same variable, and in some cases, the CPU is within its
+     rights to reorder loads to the same variable.  This means that
+     the following code:
+
+       a[0] = x;
+       a[1] = x;
+
+     Might result in an older value of x stored in a[1] than in a[0].
+     Prevent both the compiler and the CPU from doing this as follows:
+
+       a[0] = ACCESS_ONCE(x);
+       a[1] = ACCESS_ONCE(x);
+
+     In short, ACCESS_ONCE() provides cache coherence for accesses from
+     multiple CPUs to a single variable.
+
  (*) The compiler is within its rights to merge successive loads from
      the same variable.  Such merging can cause the compiler to "optimize"
      the following code:
@@ -1644,12 +1674,12 @@ for each construct.  These operations all imply certain barriers:
      Memory operations issued after the ACQUIRE will be completed after the
      ACQUIRE operation has completed.
 
-     Memory operations issued before the ACQUIRE may be completed after the
-     ACQUIRE operation has completed.  An smp_mb__before_spinlock(), combined
-     with a following ACQUIRE, orders prior loads against subsequent stores and
-     stores and prior stores against subsequent stores.  Note that this is
-     weaker than smp_mb()!  The smp_mb__before_spinlock() primitive is free on
-     many architectures.
+     Memory operations issued before the ACQUIRE may be completed after
+     the ACQUIRE operation has completed.  An smp_mb__before_spinlock(),
+     combined with a following ACQUIRE, orders prior loads against
+     subsequent loads and stores and also orders prior stores against
+     subsequent stores.  Note that this is weaker than smp_mb()!  The
+     smp_mb__before_spinlock() primitive is free on many architectures.
 
  (2) RELEASE operation implication:
 
@@ -1694,24 +1724,21 @@ may occur as:
 
        ACQUIRE M, STORE *B, STORE *A, RELEASE M
 
-This same reordering can of course occur if the lock's ACQUIRE and RELEASE are
-to the same lock variable, but only from the perspective of another CPU not
-holding that lock.
-
-In short, a RELEASE followed by an ACQUIRE may -not- be assumed to be a full
-memory barrier because it is possible for a preceding RELEASE to pass a
-later ACQUIRE from the viewpoint of the CPU, but not from the viewpoint
-of the compiler.  Note that deadlocks cannot be introduced by this
-interchange because if such a deadlock threatened, the RELEASE would
-simply complete.
-
-If it is necessary for a RELEASE-ACQUIRE pair to produce a full barrier, the
-ACQUIRE can be followed by an smp_mb__after_unlock_lock() invocation.  This
-will produce a full barrier if either (a) the RELEASE and the ACQUIRE are
-executed by the same CPU or task, or (b) the RELEASE and ACQUIRE act on the
-same variable.  The smp_mb__after_unlock_lock() primitive is free on many
-architectures.  Without smp_mb__after_unlock_lock(), the critical sections
-corresponding to the RELEASE and the ACQUIRE can cross:
+When the ACQUIRE and RELEASE are a lock acquisition and release,
+respectively, this same reordering can occur if the lock's ACQUIRE and
+RELEASE are to the same lock variable, but only from the perspective of
+another CPU not holding that lock.  In short, a ACQUIRE followed by an
+RELEASE may -not- be assumed to be a full memory barrier.
+
+Similarly, the reverse case of a RELEASE followed by an ACQUIRE does not
+imply a full memory barrier.  If it is necessary for a RELEASE-ACQUIRE
+pair to produce a full barrier, the ACQUIRE can be followed by an
+smp_mb__after_unlock_lock() invocation.  This will produce a full barrier
+if either (a) the RELEASE and the ACQUIRE are executed by the same
+CPU or task, or (b) the RELEASE and ACQUIRE act on the same variable.
+The smp_mb__after_unlock_lock() primitive is free on many architectures.
+Without smp_mb__after_unlock_lock(), the CPU's execution of the critical
+sections corresponding to the RELEASE and the ACQUIRE can cross, so that:
 
        *A = a;
        RELEASE M
@@ -1722,7 +1749,36 @@ could occur as:
 
        ACQUIRE N, STORE *B, STORE *A, RELEASE M
 
-With smp_mb__after_unlock_lock(), they cannot, so that:
+It might appear that this reordering could introduce a deadlock.
+However, this cannot happen because if such a deadlock threatened,
+the RELEASE would simply complete, thereby avoiding the deadlock.
+
+       Why does this work?
+
+       One key point is that we are only talking about the CPU doing
+       the reordering, not the compiler.  If the compiler (or, for
+       that matter, the developer) switched the operations, deadlock
+       -could- occur.
+
+       But suppose the CPU reordered the operations.  In this case,
+       the unlock precedes the lock in the assembly code.  The CPU
+       simply elected to try executing the later lock operation first.
+       If there is a deadlock, this lock operation will simply spin (or
+       try to sleep, but more on that later).  The CPU will eventually
+       execute the unlock operation (which preceded the lock operation
+       in the assembly code), which will unravel the potential deadlock,
+       allowing the lock operation to succeed.
+
+       But what if the lock is a sleeplock?  In that case, the code will
+       try to enter the scheduler, where it will eventually encounter
+       a memory barrier, which will force the earlier unlock operation
+       to complete, again unraveling the deadlock.  There might be
+       a sleep-unlock race, but the locking primitive needs to resolve
+       such races properly in any case.
+
+With smp_mb__after_unlock_lock(), the two critical sections cannot overlap.
+For example, with the following code, the store to *A will always be
+seen by other CPUs before the store to *B:
 
        *A = a;
        RELEASE M
@@ -1730,13 +1786,18 @@ With smp_mb__after_unlock_lock(), they cannot, so that:
        smp_mb__after_unlock_lock();
        *B = b;
 
-will always occur as either of the following:
+The operations will always occur in one of the following orders:
 
-       STORE *A, RELEASE, ACQUIRE, STORE *B
-       STORE *A, ACQUIRE, RELEASE, STORE *B
+       STORE *A, RELEASE, ACQUIRE, smp_mb__after_unlock_lock(), STORE *B
+       STORE *A, ACQUIRE, RELEASE, smp_mb__after_unlock_lock(), STORE *B
+       ACQUIRE, STORE *A, RELEASE, smp_mb__after_unlock_lock(), STORE *B
 
 If the RELEASE and ACQUIRE were instead both operating on the same lock
-variable, only the first of these two alternatives can occur.
+variable, only the first of these alternatives can occur.  In addition,
+the more strongly ordered systems may rule out some of the above orders.
+But in any case, as noted earlier, the smp_mb__after_unlock_lock()
+ensures that the store to *A will always be seen as happening before
+the store to *B.
 
 Locks and semaphores may not provide any guarantee of ordering on UP compiled
 systems, and so cannot be counted on in such a situation to actually achieve
@@ -2757,7 +2818,7 @@ in that order, but, without intervention, the sequence may have almost any
 combination of elements combined or discarded, provided the program's view of
 the world remains consistent.  Note that ACCESS_ONCE() is -not- optional
 in the above example, as there are architectures where a given CPU might
-interchange successive loads to the same location.  On such architectures,
+reorder successive loads to the same location.  On such architectures,
 ACCESS_ONCE() does whatever is necessary to prevent this, for example, on
 Itanium the volatile casts used by ACCESS_ONCE() cause GCC to emit the
 special ld.acq and st.rel instructions that prevent such reordering.
index 483632087788db0532718d2a26ab2283c379c38d..a5da5c7e7128bce8ff79beb019444c861fa1b68a 100644 (file)
@@ -88,17 +88,19 @@ node.
 
 2. PM QoS per-device latency and flags framework
 
-For each device, there are two lists of PM QoS requests. One is maintained
-along with the aggregated target of latency value and the other is for PM QoS
-flags. Values are updated in response to changes of the request list.
+For each device, there are three lists of PM QoS requests. Two of them are
+maintained along with the aggregated targets of resume latency and active
+state latency tolerance (in microseconds) and the third one is for PM QoS flags.
+Values are updated in response to changes of the request list.
 
-Target latency value is simply the minimum of the request values held in the
-parameter list elements.  The PM QoS flags aggregate value is a gather (bitwise
-OR) of all list elements' values. Two device PM QoS flags are defined currently:
-PM_QOS_FLAG_NO_POWER_OFF and PM_QOS_FLAG_REMOTE_WAKEUP.
+The target values of resume latency and active state latency tolerance are
+simply the minimum of the request values held in the parameter list elements.
+The PM QoS flags aggregate value is a gather (bitwise OR) of all list elements'
+values.  Two device PM QoS flags are defined currently: PM_QOS_FLAG_NO_POWER_OFF
+and PM_QOS_FLAG_REMOTE_WAKEUP.
 
-Note: the aggregated target value is implemented as an atomic variable so that
-reading the aggregated value does not require any locking mechanism.
+Note: The aggregated target values are implemented in such a way that reading
+the aggregated value does not require any locking mechanism.
 
 
 From kernel mode the use of this interface is the following:
@@ -132,19 +134,21 @@ The meaning of the return values is as follows:
        PM_QOS_FLAGS_UNDEFINED: The device's PM QoS structure has not been
                        initialized or the list of requests is empty.
 
-int dev_pm_qos_add_ancestor_request(dev, handle, value)
+int dev_pm_qos_add_ancestor_request(dev, handle, type, value)
 Add a PM QoS request for the first direct ancestor of the given device whose
-power.ignore_children flag is unset.
+power.ignore_children flag is unset (for DEV_PM_QOS_RESUME_LATENCY requests)
+or whose power.set_latency_tolerance callback pointer is not NULL (for
+DEV_PM_QOS_LATENCY_TOLERANCE requests).
 
 int dev_pm_qos_expose_latency_limit(device, value)
-Add a request to the device's PM QoS list of latency constraints and create
-a sysfs attribute pm_qos_resume_latency_us under the device's power directory
-allowing user space to manipulate that request.
+Add a request to the device's PM QoS list of resume latency constraints and
+create a sysfs attribute pm_qos_resume_latency_us under the device's power
+directory allowing user space to manipulate that request.
 
 void dev_pm_qos_hide_latency_limit(device)
 Drop the request added by dev_pm_qos_expose_latency_limit() from the device's
-PM QoS list of latency constraints and remove sysfs attribute pm_qos_resume_latency_us
-from the device's power directory.
+PM QoS list of resume latency constraints and remove sysfs attribute
+pm_qos_resume_latency_us from the device's power directory.
 
 int dev_pm_qos_expose_flags(device, value)
 Add a request to the device's PM QoS list of flags and create sysfs attributes
@@ -163,7 +167,7 @@ a per-device notification tree and a global notification tree.
 int dev_pm_qos_add_notifier(device, notifier):
 Adds a notification callback function for the device.
 The callback is called when the aggregated value of the device constraints list
-is changed.
+is changed (for resume latency device PM QoS only).
 
 int dev_pm_qos_remove_notifier(device, notifier):
 Removes the notification callback function for the device.
@@ -171,14 +175,48 @@ Removes the notification callback function for the device.
 int dev_pm_qos_add_global_notifier(notifier):
 Adds a notification callback function in the global notification tree of the
 framework.
-The callback is called when the aggregated value for any device is changed.
+The callback is called when the aggregated value for any device is changed
+(for resume latency device PM QoS only).
 
 int dev_pm_qos_remove_global_notifier(notifier):
 Removes the notification callback function from the global notification tree
 of the framework.
 
 
-From user mode:
-No API for user space access to the per-device latency constraints is provided
-yet - still under discussion.
-
+Active state latency tolerance
+
+This device PM QoS type is used to support systems in which hardware may switch
+to energy-saving operation modes on the fly.  In those systems, if the operation
+mode chosen by the hardware attempts to save energy in an overly aggressive way,
+it may cause excess latencies to be visible to software, causing it to miss
+certain protocol requirements or target frame or sample rates etc.
+
+If there is a latency tolerance control mechanism for a given device available
+to software, the .set_latency_tolerance callback in that device's dev_pm_info
+structure should be populated.  The routine pointed to by it is should implement
+whatever is necessary to transfer the effective requirement value to the
+hardware.
+
+Whenever the effective latency tolerance changes for the device, its
+.set_latency_tolerance() callback will be executed and the effective value will
+be passed to it.  If that value is negative, which means that the list of
+latency tolerance requirements for the device is empty, the callback is expected
+to switch the underlying hardware latency tolerance control mechanism to an
+autonomous mode if available.  If that value is PM_QOS_LATENCY_ANY, in turn, and
+the hardware supports a special "no requirement" setting, the callback is
+expected to use it.  That allows software to prevent the hardware from
+automatically updating the device's latency tolerance in response to its power
+state changes (e.g. during transitions from D3cold to D0), which generally may
+be done in the autonomous latency tolerance control mode.
+
+If .set_latency_tolerance() is present for the device, sysfs attribute
+pm_qos_latency_tolerance_us will be present in the devivce's power directory.
+Then, user space can use that attribute to specify its latency tolerance
+requirement for the device, if any.  Writing "any" to it means "no requirement,
+but do not let the hardware control latency tolerance" and writing "auto" to it
+allows the hardware to be switched to the autonomous mode if there are no other
+requirements from the kernel side in the device's list.
+
+Kernel code can use the functions described above along with the
+DEV_PM_QOS_LATENCY_TOLERANCE device PM QoS type to add, remove and update
+latency tolerance requirements for devices.
index e55124e7c40cd0eef8afb92c34913cd68c42faac..ec8be46bf48da2146cbf2689a6aa51a315e1f2f4 100644 (file)
@@ -320,10 +320,11 @@ This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
 
 ==============================================================
 
-hung_task_warning:
+hung_task_warnings:
 
 The maximum number of warnings to report. During a check interval
-When this value is reached, no more the warnings will be reported.
+if a hung task is detected, this value is decreased by 1.
+When this value reaches 0, no more warnings will be reported.
 This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
 
 -1: report an infinite number of warnings.
@@ -441,8 +442,7 @@ feature should be disabled. Otherwise, if the system overhead from the
 feature is too high then the rate the kernel samples for NUMA hinting
 faults may be controlled by the numa_balancing_scan_period_min_ms,
 numa_balancing_scan_delay_ms, numa_balancing_scan_period_max_ms,
-numa_balancing_scan_size_mb, numa_balancing_settle_count sysctls and
-numa_balancing_migrate_deferred.
+numa_balancing_scan_size_mb, and numa_balancing_settle_count sysctls.
 
 ==============================================================
 
@@ -483,13 +483,6 @@ rate for each task.
 numa_balancing_scan_size_mb is how many megabytes worth of pages are
 scanned for a given scan.
 
-numa_balancing_migrate_deferred is how many page migrations get skipped
-unconditionally, after a page migration is skipped because a page is shared
-with other tasks. This reduces page migration overhead, and determines
-how much stronger the "move task near its memory" policy scheduler becomes,
-versus the "move memory near its task" memory management policy, for workloads
-with shared memory.
-
 ==============================================================
 
 osrelease, ostype & version:
index 3bd33b8dc7c460f71885b169077f3df9ec2adaac..21d514ced212436ea70aa5a218e20a5f8407523e 100644 (file)
@@ -92,5 +92,5 @@ dev_pm_qos_remove_request          "device=%s type=%s new_value=%d"
 
 The first parameter gives the device name which tries to add/update/remove
 QoS requests.
-The second parameter gives the request type (e.g. "DEV_PM_QOS_LATENCY").
+The second parameter gives the request type (e.g. "DEV_PM_QOS_RESUME_LATENCY").
 The third parameter is value to be added/updated/removed.
index cb81741d3b0bd92b70aa87918e632edd0bfef3f1..a75e3adaa39da277fb89150fb1d31daf8d1296ef 100644 (file)
@@ -182,7 +182,7 @@ Offset      Proto   Name            Meaning
 0226/1 2.02+(3 ext_loader_ver  Extended boot loader version
 0227/1 2.02+(3 ext_loader_type Extended boot loader ID
 0228/4 2.02+   cmd_line_ptr    32-bit pointer to the kernel command line
-022C/4 2.03+   ramdisk_max     Highest legal initrd address
+022C/4 2.03+   initrd_addr_max Highest legal initrd address
 0230/4 2.05+   kernel_alignment Physical addr alignment required for kernel
 0234/1 2.05+   relocatable_kernel Whether kernel is relocatable or not
 0235/1 2.10+   min_alignment   Minimum alignment, as a power of two
@@ -534,7 +534,7 @@ Protocol:   2.02+
   zero, the kernel will assume that your boot loader does not support
   the 2.02+ protocol.
 
-Field name:    ramdisk_max
+Field name:    initrd_addr_max
 Type:          read
 Offset/size:   0x22c/4
 Protocol:      2.03+
index 338db2514585e66b0680af565140c8691a42834e..5fe2c0d661223cf5c19081beebeadd9bfccb6227 100644 (file)
@@ -242,8 +242,8 @@ S:  Maintained
 F:     drivers/platform/x86/acer-wmi.c
 
 ACPI
-M:     Len Brown <lenb@kernel.org>
 M:     Rafael J. Wysocki <rjw@rjwysocki.net>
+M:     Len Brown <lenb@kernel.org>
 L:     linux-acpi@vger.kernel.org
 W:     https://01.org/linux-acpi
 Q:     https://patchwork.kernel.org/project/linux-acpi/list/
@@ -1320,6 +1320,7 @@ M:        Linus Walleij <linus.walleij@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     arch/arm/mach-u300/
+F:     drivers/clocksource/timer-u300.c
 F:     drivers/i2c/busses/i2c-stu300.c
 F:     drivers/rtc/rtc-coh901331.c
 F:     drivers/watchdog/coh901327_wdt.c
@@ -1832,8 +1833,8 @@ F:        net/bluetooth/
 F:     include/net/bluetooth/
 
 BONDING DRIVER
-M:     Jay Vosburgh <fubar@us.ibm.com>
-M:     Veaceslav Falico <vfalico@redhat.com>
+M:     Jay Vosburgh <j.vosburgh@gmail.com>
+M:     Veaceslav Falico <vfalico@gmail.com>
 M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
@@ -2801,9 +2802,9 @@ S:        Supported
 F:     drivers/acpi/dock.c
 
 DOCUMENTATION
-M:     Rob Landley <rob@landley.net>
+M:     Randy Dunlap <rdunlap@infradead.org>
 L:     linux-doc@vger.kernel.org
-T:     TBD
+T:     quilt http://www.infradead.org/~rdunlap/Doc/patches/
 S:     Maintained
 F:     Documentation/
 
@@ -3657,8 +3658,8 @@ S:        Maintained
 F:     fs/freevxfs/
 
 FREEZER
-M:     Pavel Machek <pavel@ucw.cz>
 M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M:     Pavel Machek <pavel@ucw.cz>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/freezing-of-tasks.txt
@@ -4022,8 +4023,8 @@ S:        Maintained
 F:     drivers/video/hgafb.c
 
 HIBERNATION (aka Software Suspend, aka swsusp)
-M:     Pavel Machek <pavel@ucw.cz>
 M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
+M:     Pavel Machek <pavel@ucw.cz>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     arch/x86/power/
@@ -6006,6 +6007,7 @@ F:        include/uapi/linux/net.h
 F:     include/uapi/linux/netdevice.h
 F:     tools/net/
 F:     tools/testing/selftests/net/
+F:     lib/random32.c
 
 NETWORKING [IPv4/IPv6]
 M:     "David S. Miller" <davem@davemloft.net>
@@ -7404,10 +7406,26 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 S:     Supported
 F:     arch/s390/
 F:     drivers/s390/
-F:     block/partitions/ibm.c
 F:     Documentation/s390/
 F:     Documentation/DocBook/s390*
 
+S390 COMMON I/O LAYER
+M:     Sebastian Ott <sebott@linux.vnet.ibm.com>
+M:     Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+L:     linux-s390@vger.kernel.org
+W:     http://www.ibm.com/developerworks/linux/linux390/
+S:     Supported
+F:     drivers/s390/cio/
+
+S390 DASD DRIVER
+M:     Stefan Weinhuber <wein@de.ibm.com>
+M:     Stefan Haberland <stefan.haberland@de.ibm.com>
+L:     linux-s390@vger.kernel.org
+W:     http://www.ibm.com/developerworks/linux/linux390/
+S:     Supported
+F:     drivers/s390/block/dasd*
+F:     block/partitions/ibm.c
+
 S390 NETWORK DRIVERS
 M:     Ursula Braun <ursula.braun@de.ibm.com>
 M:     Frank Blaschka <blaschka@linux.vnet.ibm.com>
@@ -7417,6 +7435,15 @@ W:       http://www.ibm.com/developerworks/linux/linux390/
 S:     Supported
 F:     drivers/s390/net/
 
+S390 PCI SUBSYSTEM
+M:     Sebastian Ott <sebott@linux.vnet.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@de.ibm.com>
+L:     linux-s390@vger.kernel.org
+W:     http://www.ibm.com/developerworks/linux/linux390/
+S:     Supported
+F:     arch/s390/pci/
+F:     drivers/pci/hotplug/s390_pci_hpc.c
+
 S390 ZCRYPT DRIVER
 M:     Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
 M:     linux390@de.ibm.com
@@ -8426,9 +8453,9 @@ F:        arch/sh/
 F:     drivers/sh/
 
 SUSPEND TO RAM
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 M:     Len Brown <len.brown@intel.com>
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/
index c10b734339dbe10bb257a330c4eb9dad9eb1265e..e5ac8a62e6e57c855c4da06e3d80c2438fccee48 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 14
 SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index a73a8e208a4ae00154c0e1adb6ef4a3f311ebcb6..96e54bed50889e4fe62c71ac6125ac6fe0dad5f2 100644 (file)
@@ -1,7 +1,9 @@
 
-generic-y += clkdev.h
 
+generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += exec.h
-generic-y += trace_clock.h
-generic-y += preempt.h
 generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/cputime.h b/arch/alpha/include/asm/cputime.h
deleted file mode 100644 (file)
index 19577fd..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ALPHA_CPUTIME_H
-#define __ALPHA_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __ALPHA_CPUTIME_H */
index 0d3362991c316efd8236933c178523565c3f29ad..e76fd79f32b0126ee2e83dfaa5ba05d3d4c98452 100644 (file)
@@ -1,15 +1,15 @@
 generic-y += auxvec.h
 generic-y += barrier.h
-generic-y += bugs.h
 generic-y += bitsperlong.h
+generic-y += bugs.h
 generic-y += clkdev.h
 generic-y += cputime.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += errno.h
-generic-y += fcntl.h
 generic-y += fb.h
+generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += hardirq.h
 generic-y += hash.h
@@ -22,6 +22,7 @@ generic-y += kmap_types.h
 generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += param.h
@@ -30,6 +31,7 @@ generic-y += pci.h
 generic-y += percpu.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sembuf.h
@@ -48,4 +50,3 @@ generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
 generic-y += xor.h
-generic-y += preempt.h
index 19c65509a22d8b10cc48e667fb747bdda0d6ee1e..3b075dd19b51ce7c28eb2b243c075a5a7e2be56a 100644 (file)
                                R8A7791_CLK_MSIOF1 R8A7791_CLK_SCIFB2
                        >;
                        clock-output-names =
-                               "scifa2", "scifa1", "scifa0", "misof2", "scifb0",
+                               "scifa2", "scifa1", "scifa0", "msiof2", "scifb0",
                                "scifb1", "msiof1", "scifb2";
                };
                mstp3_clks: mstp3_clks@e615013c {
index d4d2763f47948b65f7f0890696699a4b2b98f245..249b6e0ba7371bd69ee0bb6d7914c4b8b9efb417 100644 (file)
                };
 
                intc: interrupt-controller@01c20400 {
-                       compatible = "allwinner,sun4i-ic";
+                       compatible = "allwinner,sun4i-a10-ic";
                        reg = <0x01c20400 0x400>;
                        interrupt-controller;
                        #interrupt-cells = <1>;
                };
 
                timer@01c20c00 {
-                       compatible = "allwinner,sun4i-timer";
+                       compatible = "allwinner,sun4i-a10-timer";
                        reg = <0x01c20c00 0x90>;
                        interrupts = <22>;
                        clocks = <&osc24M>;
index 79fd412005b02860c5dcb8712418273584fa3b8f..ddb25452d78e10239ef0a7a85ca5ba73cb59c3e4 100644 (file)
                };
 
                intc: interrupt-controller@01c20400 {
-                       compatible = "allwinner,sun4i-ic";
+                       compatible = "allwinner,sun4i-a10-ic";
                        reg = <0x01c20400 0x400>;
                        interrupt-controller;
                        #interrupt-cells = <1>;
                };
 
                timer@01c20c00 {
-                       compatible = "allwinner,sun4i-timer";
+                       compatible = "allwinner,sun4i-a10-timer";
                        reg = <0x01c20c00 0x90>;
                        interrupts = <22>;
                        clocks = <&osc24M>;
index c463fd730c9155d8c9a491cf176a7320831e29fd..b373c74a9b3dfc4c87faa2ce945f8943022e08cf 100644 (file)
                ranges;
 
                intc: interrupt-controller@01c20400 {
-                       compatible = "allwinner,sun4i-ic";
+                       compatible = "allwinner,sun4i-a10-ic";
                        reg = <0x01c20400 0x400>;
                        interrupt-controller;
                        #interrupt-cells = <1>;
                };
 
                timer@01c20c00 {
-                       compatible = "allwinner,sun4i-timer";
+                       compatible = "allwinner,sun4i-a10-timer";
                        reg = <0x01c20c00 0x90>;
                        interrupts = <22>;
                        clocks = <&osc24M>;
index 5256ad9be52c691022ce99e81b679b012f766350..38d43febda4c0f2721c249fe89ea69a4bc6ca63b 100644 (file)
                #size-cells = <1>;
                ranges;
 
+               nmi_intc: interrupt-controller@01f00c0c {
+                       compatible = "allwinner,sun6i-a31-sc-nmi";
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       reg = <0x01f00c0c 0x38>;
+                       interrupts = <0 32 4>;
+               };
+
                pio: pinctrl@01c20800 {
                        compatible = "allwinner,sun6i-a31-pinctrl";
                        reg = <0x01c20800 0x400>;
                };
 
                timer@01c20c00 {
-                       compatible = "allwinner,sun4i-timer";
+                       compatible = "allwinner,sun4i-a10-timer";
                        reg = <0x01c20c00 0xa0>;
                        interrupts = <0 18 4>,
                                     <0 19 4>,
index 6f25cf559ad0c99f5b376ba2a05e2e629878f35b..cadcf2f9881d460fd9fbdc6e40d072a8610bf8d6 100644 (file)
                #size-cells = <1>;
                ranges;
 
+               nmi_intc: interrupt-controller@01c00030 {
+                       compatible = "allwinner,sun7i-a20-sc-nmi";
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       reg = <0x01c00030 0x0c>;
+                       interrupts = <0 0 4>;
+               };
+
                emac: ethernet@01c0b000 {
                        compatible = "allwinner,sun4i-a10-emac";
                        reg = <0x01c0b000 0x1000>;
                };
 
                timer@01c20c00 {
-                       compatible = "allwinner,sun4i-timer";
+                       compatible = "allwinner,sun4i-a10-timer";
                        reg = <0x01c20c00 0x90>;
                        interrupts = <0 22 4>,
                                     <0 23 4>,
index 8b67b19392eca7ac1359dfe1db0ec4a16e5d1ade..789d0bacc11025ac2c333be1bb29748dcdeec3d2 100644 (file)
                        device_type = "cpu";
                        reg = <0>;
                        clocks = <&clkc 3>;
+                       operating-points = <
+                               /* kHz    uV */
+                               666667  1000000
+                               333334  1000000
+                               222223  1000000
+                       >;
                };
 
                cpu@1 {
index 3278afe2c3ab9014e2ed80904e5e6e98508e14f7..23e728ecf8ab3707403c811b6b43baead7198380 100644 (file)
@@ -7,16 +7,19 @@ generic-y += current.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
+generic-y += hash.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
 generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += local.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += msgbuf.h
 generic-y += param.h
 generic-y += parport.h
 generic-y += poll.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += sections.h
 generic-y += segment.h
@@ -33,5 +36,3 @@ generic-y += termios.h
 generic-y += timex.h
 generic-y += trace_clock.h
 generic-y += unaligned.h
-generic-y += preempt.h
-generic-y += hash.h
index 58b8b84adcd2cf5f295e6869b68350f9dcadc798..2fe85fff5ccacd3dfe67d72c4b6fd5d65342e5e9 100644 (file)
@@ -20,9 +20,6 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
 #define topology_core_cpumask(cpu)     (&cpu_topology[cpu].core_sibling)
 #define topology_thread_cpumask(cpu)   (&cpu_topology[cpu].thread_sibling)
 
-#define mc_capable()   (cpu_topology[0].socket_id != -1)
-#define smt_capable()  (cpu_topology[0].thread_id != -1)
-
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
index 92f7b15dd22121d4aa674fd78cd95cac8924c07f..adabeababeb03d5fc34efe76fa5c12d4eab90a5d 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/uaccess.h>
 #include <linux/random.h>
 #include <linux/hw_breakpoint.h>
-#include <linux/cpuidle.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
 
@@ -133,7 +132,11 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
 
 void (*arm_pm_idle)(void);
 
-static void default_idle(void)
+/*
+ * Called from the core idle loop.
+ */
+
+void arch_cpu_idle(void)
 {
        if (arm_pm_idle)
                arm_pm_idle();
@@ -167,15 +170,6 @@ void arch_cpu_idle_dead(void)
 }
 #endif
 
-/*
- * Called from the core idle loop.
- */
-void arch_cpu_idle(void)
-{
-       if (cpuidle_idle_call())
-               default_idle();
-}
-
 /*
  * Called by kexec, immediately prior to machine_kexec().
  *
index b7b4c86e338b0264919152caee8bec654e2b7757..7c4fada440f0fd5d4cdfe575b972beba689e6985 100644 (file)
@@ -674,8 +674,7 @@ static int cpufreq_callback(struct notifier_block *nb,
        }
 
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
                loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
                                                global_l_p_j_ref_freq,
                                                freq->new);
index 6591e26fc13f4eab5fcd3a7b1292a3ca7cf3bd92..dfc32130bc443ddc407d463b486109ba49632db2 100644 (file)
@@ -166,7 +166,7 @@ static int twd_cpufreq_transition(struct notifier_block *nb,
         * frequency.  The timer is local to a cpu, so cross-call to the
         * changing cpu.
         */
-       if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+       if (state == CPUFREQ_POSTCHANGE)
                smp_call_function_single(freqs->cpu, twd_update_frequency,
                        NULL, 1);
 
index 2ab00434b2eb64405a2c4b75f315950cbe9663c0..85399c98f84a7f30657ecc57beb267d404593560 100644 (file)
@@ -472,7 +472,7 @@ static struct clk_lookup da850_clks[] = {
        CLK("spi_davinci.0",    NULL,           &spi0_clk),
        CLK("spi_davinci.1",    NULL,           &spi1_clk),
        CLK("vpif",             NULL,           &vpif_clk),
-       CLK("ahci",             NULL,           &sata_clk),
+       CLK("ahci_da850",               NULL,           &sata_clk),
        CLK("davinci-rproc.0",  NULL,           &dsp_clk),
        CLK("ehrpwm",           "fck",          &ehrpwm_clk),
        CLK("ehrpwm",           "tbclk",        &ehrpwm_tbclk),
index 0486cdf28c8d552205ff4285fe7d7476721cc2a6..56ea41d5f8491307ab89b3178365071cec34ee3d 100644 (file)
@@ -1020,111 +1020,29 @@ int __init da8xx_register_spi_bus(int instance, unsigned num_chipselect)
 }
 
 #ifdef CONFIG_ARCH_DAVINCI_DA850
-
 static struct resource da850_sata_resources[] = {
        {
                .start  = DA850_SATA_BASE,
                .end    = DA850_SATA_BASE + 0x1fff,
                .flags  = IORESOURCE_MEM,
        },
+       {
+               .start  = DA8XX_SYSCFG1_BASE + DA8XX_PWRDN_REG,
+               .end    = DA8XX_SYSCFG1_BASE + DA8XX_PWRDN_REG + 0x3,
+               .flags  = IORESOURCE_MEM,
+       },
        {
                .start  = IRQ_DA850_SATAINT,
                .flags  = IORESOURCE_IRQ,
        },
 };
 
-/* SATA PHY Control Register offset from AHCI base */
-#define SATA_P0PHYCR_REG       0x178
-
-#define SATA_PHY_MPY(x)                ((x) << 0)
-#define SATA_PHY_LOS(x)                ((x) << 6)
-#define SATA_PHY_RXCDR(x)      ((x) << 10)
-#define SATA_PHY_RXEQ(x)       ((x) << 13)
-#define SATA_PHY_TXSWING(x)    ((x) << 19)
-#define SATA_PHY_ENPLL(x)      ((x) << 31)
-
-static struct clk *da850_sata_clk;
-static unsigned long da850_sata_refclkpn;
-
-/* Supported DA850 SATA crystal frequencies */
-#define KHZ_TO_HZ(freq) ((freq) * 1000)
-static unsigned long da850_sata_xtal[] = {
-       KHZ_TO_HZ(300000),
-       KHZ_TO_HZ(250000),
-       0,                      /* Reserved */
-       KHZ_TO_HZ(187500),
-       KHZ_TO_HZ(150000),
-       KHZ_TO_HZ(125000),
-       KHZ_TO_HZ(120000),
-       KHZ_TO_HZ(100000),
-       KHZ_TO_HZ(75000),
-       KHZ_TO_HZ(60000),
-};
-
-static int da850_sata_init(struct device *dev, void __iomem *addr)
-{
-       int i, ret;
-       unsigned int val;
-
-       da850_sata_clk = clk_get(dev, NULL);
-       if (IS_ERR(da850_sata_clk))
-               return PTR_ERR(da850_sata_clk);
-
-       ret = clk_prepare_enable(da850_sata_clk);
-       if (ret)
-               goto err0;
-
-       /* Enable SATA clock receiver */
-       val = __raw_readl(DA8XX_SYSCFG1_VIRT(DA8XX_PWRDN_REG));
-       val &= ~BIT(0);
-       __raw_writel(val, DA8XX_SYSCFG1_VIRT(DA8XX_PWRDN_REG));
-
-       /* Get the multiplier needed for 1.5GHz PLL output */
-       for (i = 0; i < ARRAY_SIZE(da850_sata_xtal); i++)
-               if (da850_sata_xtal[i] == da850_sata_refclkpn)
-                       break;
-
-       if (i == ARRAY_SIZE(da850_sata_xtal)) {
-               ret = -EINVAL;
-               goto err1;
-       }
-
-       val = SATA_PHY_MPY(i + 1) |
-               SATA_PHY_LOS(1) |
-               SATA_PHY_RXCDR(4) |
-               SATA_PHY_RXEQ(1) |
-               SATA_PHY_TXSWING(3) |
-               SATA_PHY_ENPLL(1);
-
-       __raw_writel(val, addr + SATA_P0PHYCR_REG);
-
-       return 0;
-
-err1:
-       clk_disable_unprepare(da850_sata_clk);
-err0:
-       clk_put(da850_sata_clk);
-       return ret;
-}
-
-static void da850_sata_exit(struct device *dev)
-{
-       clk_disable_unprepare(da850_sata_clk);
-       clk_put(da850_sata_clk);
-}
-
-static struct ahci_platform_data da850_sata_pdata = {
-       .init   = da850_sata_init,
-       .exit   = da850_sata_exit,
-};
-
 static u64 da850_sata_dmamask = DMA_BIT_MASK(32);
 
 static struct platform_device da850_sata_device = {
-       .name   = "ahci",
+       .name   = "ahci_da850",
        .id     = -1,
        .dev    = {
-               .platform_data          = &da850_sata_pdata,
                .dma_mask               = &da850_sata_dmamask,
                .coherent_dma_mask      = DMA_BIT_MASK(32),
        },
@@ -1134,9 +1052,8 @@ static struct platform_device da850_sata_device = {
 
 int __init da850_register_sata(unsigned long refclkpn)
 {
-       da850_sata_refclkpn = refclkpn;
-       if (!da850_sata_refclkpn)
-               return -EINVAL;
+       /* please see comment in drivers/ata/ahci_da850.c */
+       BUG_ON(refclkpn != 100 * 1000 * 1000);
 
        return platform_device_register(&da850_sata_device);
 }
index 7a9b98589db7260ea0c56deca6872b3667158c2a..29e3fe6a66696b858850ca2190eb0383a72c9a3a 100644 (file)
@@ -120,7 +120,7 @@ static void imx6q_enable_wb(bool enable)
 
 int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
 {
-       struct irq_desc *iomuxc_irq_desc;
+       struct irq_data *iomuxc_irq_data = irq_get_irq_data(32);
        u32 val = readl_relaxed(ccm_base + CLPCR);
 
        val &= ~BM_CLPCR_LPM;
@@ -167,10 +167,9 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
         * 3) Software should mask IRQ #32 right after CCM Low-Power mode
         *    is set (set bits 0-1 of CCM_CLPCR).
         */
-       iomuxc_irq_desc = irq_to_desc(32);
-       imx_gpc_irq_unmask(&iomuxc_irq_desc->irq_data);
+       imx_gpc_irq_unmask(iomuxc_irq_data);
        writel_relaxed(val, ccm_base + CLPCR);
-       imx_gpc_irq_mask(&iomuxc_irq_desc->irq_data);
+       imx_gpc_irq_mask(iomuxc_irq_data);
 
        return 0;
 }
index 461a191a32d248ba3e2715aa9f3cfb607d6df7d8..43b1a516957fdfc2cc32b75a0e284289ad6d73ec 100644 (file)
 
 int mmp2_set_wake(struct irq_data *d, unsigned int on)
 {
-       int irq = d->irq;
-       struct irq_desc *desc = irq_to_desc(irq);
        unsigned long data = 0;
-
-       if (unlikely(irq >= nr_irqs)) {
-               pr_err("IRQ nubmers are out of boundary!\n");
-               return -EINVAL;
-       }
-
-       if (on) {
-               if (desc->action)
-                       desc->action->flags |= IRQF_NO_SUSPEND;
-       } else {
-               if (desc->action)
-                       desc->action->flags &= ~IRQF_NO_SUSPEND;
-       }
+       int irq = d->irq;
 
        /* enable wakeup sources */
        switch (irq) {
index 48981ca801a5527b8b2df375237d595ec98dc388..04c9daf9f8d767a7225631a7f36e443fae260d88 100644 (file)
 
 int pxa910_set_wake(struct irq_data *data, unsigned int on)
 {
-       int irq = data->irq;
-       struct irq_desc *desc = irq_to_desc(data->irq);
        uint32_t awucrm = 0, apcr = 0;
-
-       if (unlikely(irq >= nr_irqs)) {
-               pr_err("IRQ nubmers are out of boundary!\n");
-               return -EINVAL;
-       }
-
-       if (on) {
-               if (desc->action)
-                       desc->action->flags |= IRQF_NO_SUSPEND;
-       } else {
-               if (desc->action)
-                       desc->action->flags &= ~IRQF_NO_SUSPEND;
-       }
+       int irq = data->irq;
 
        /* setting wakeup sources */
        switch (irq) {
@@ -115,9 +101,11 @@ int pxa910_set_wake(struct irq_data *data, unsigned int on)
                if (irq >= IRQ_GPIO_START && irq < IRQ_BOARD_START) {
                        awucrm = MPMU_AWUCRM_WAKEUP(2);
                        apcr |= MPMU_APCR_SLPWP2;
-               } else
+               } else {
+                       /* FIXME: This should return a proper error code ! */
                        printk(KERN_ERR "Error: no defined wake up source irq: %d\n",
                                irq);
+               }
        }
 
        if (on) {
index f12a12af35237d434d8186838717850965443d08..d1f12095f3155d37ac72e61bc73d9cf2b84dcc2e 100644 (file)
@@ -44,13 +44,10 @@ static unsigned int irq_counter[16];
 
 static irqreturn_t deferred_fiq(int irq, void *dev_id)
 {
-       struct irq_desc *irq_desc;
-       struct irq_chip *irq_chip = NULL;
        int gpio, irq_num, fiq_count;
+       struct irq_chip *irq_chip;
 
-       irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
-       if (irq_desc)
-               irq_chip = irq_desc->irq_data.chip;
+       irq_chip = irq_get_chip(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
 
        /*
         * For each handled GPIO interrupt, keep calling its interrupt handler
index 29905b127ad988b68ec02a09b441cb48e9c59704..41f27f667ca89ccebdf0bd9b54b63f34922eb60a 100644 (file)
@@ -885,9 +885,6 @@ static int viper_cpufreq_notifier(struct notifier_block *nb,
                        viper_set_core_cpu_voltage(freq->new, 0);
                }
                break;
-       case CPUFREQ_RESUMECHANGE:
-               viper_set_core_cpu_voltage(freq->new, 0);
-               break;
        default:
                /* ignore */
                break;
index 05fa505df5850d0de63d013e9ebdecfdea1461e4..f6db7dcae3f4dd044bc8ffea87cbd7779dd4602b 100644 (file)
@@ -24,17 +24,21 @@ comment "Renesas ARM SoCs System Type"
 
 config ARCH_EMEV2
        bool "Emma Mobile EV2"
+       select SYS_SUPPORTS_EM_STI
 
 config ARCH_R7S72100
        bool "RZ/A1H (R7S72100)"
+       select SYS_SUPPORTS_SH_MTU2
 
 config ARCH_R8A7790
        bool "R-Car H2 (R8A77900)"
        select RENESAS_IRQC
+       select SYS_SUPPORTS_SH_CMT
 
 config ARCH_R8A7791
        bool "R-Car M2 (R8A77910)"
        select RENESAS_IRQC
+       select SYS_SUPPORTS_SH_CMT
 
 comment "Renesas ARM SoCs Board Type"
 
@@ -68,6 +72,8 @@ config ARCH_SH7372
        select ARM_CPU_SUSPEND if PM || CPU_IDLE
        select CPU_V7
        select SH_CLK_CPG
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_TMU
 
 config ARCH_SH73A0
        bool "SH-Mobile AG5 (R8A73A00)"
@@ -77,6 +83,8 @@ config ARCH_SH73A0
        select I2C
        select SH_CLK_CPG
        select RENESAS_INTC_IRQPIN
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_TMU
 
 config ARCH_R8A73A4
        bool "R-Mobile APE6 (R8A73A40)"
@@ -87,6 +95,8 @@ config ARCH_R8A73A4
        select RENESAS_IRQC
        select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_TMU
 
 config ARCH_R8A7740
        bool "R-Mobile A1 (R8A77400)"
@@ -95,6 +105,8 @@ config ARCH_R8A7740
        select CPU_V7
        select SH_CLK_CPG
        select RENESAS_INTC_IRQPIN
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_TMU
 
 config ARCH_R8A7778
        bool "R-Car M1A (R8A77781)"
@@ -104,6 +116,7 @@ config ARCH_R8A7778
        select ARM_GIC
        select USB_ARCH_HAS_EHCI
        select USB_ARCH_HAS_OHCI
+       select SYS_SUPPORTS_SH_TMU
 
 config ARCH_R8A7779
        bool "R-Car H1 (R8A77790)"
@@ -114,6 +127,7 @@ config ARCH_R8A7779
        select USB_ARCH_HAS_EHCI
        select USB_ARCH_HAS_OHCI
        select RENESAS_INTC_IRQPIN
+       select SYS_SUPPORTS_SH_TMU
 
 config ARCH_R8A7790
        bool "R-Car H2 (R8A77900)"
@@ -123,6 +137,7 @@ config ARCH_R8A7790
        select MIGHT_HAVE_PCI
        select SH_CLK_CPG
        select RENESAS_IRQC
+       select SYS_SUPPORTS_SH_CMT
 
 config ARCH_R8A7791
        bool "R-Car M2 (R8A77910)"
@@ -132,6 +147,7 @@ config ARCH_R8A7791
        select MIGHT_HAVE_PCI
        select SH_CLK_CPG
        select RENESAS_IRQC
+       select SYS_SUPPORTS_SH_CMT
 
 config ARCH_EMEV2
        bool "Emma Mobile EV2"
@@ -141,6 +157,7 @@ config ARCH_EMEV2
        select MIGHT_HAVE_PCI
        select USE_OF
        select AUTO_ZRELADDR
+       select SYS_SUPPORTS_EM_STI
 
 config ARCH_R7S72100
        bool "RZ/A1H (R7S72100)"
@@ -148,6 +165,7 @@ config ARCH_R7S72100
        select ARM_GIC
        select CPU_V7
        select SH_CLK_CPG
+       select SYS_SUPPORTS_SH_MTU2
 
 comment "Renesas ARM SoCs Board Type"
 
@@ -321,24 +339,6 @@ config SHMOBILE_TIMER_HZ
          want to select a HZ value such as 128 that can evenly divide RCLK.
          A HZ value that does not divide evenly may cause timer drift.
 
-config SH_TIMER_CMT
-       bool "CMT timer driver"
-       default y
-       help
-         This enables build of the CMT timer driver.
-
-config SH_TIMER_TMU
-       bool "TMU timer driver"
-       default y
-       help
-         This enables build of the TMU timer driver.
-
-config EM_TIMER_STI
-       bool "STI timer driver"
-       default y
-       help
-         This enables build of the STI timer driver.
-
 endmenu
 
 endif
index 7ad003001ab7639e71b4df8309b73707a25df98e..824b12a56a422dc8c6b941b16746027305b80a98 100644 (file)
@@ -28,6 +28,7 @@
 static void __init spear1310_dt_init(void)
 {
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+       platform_device_register_simple("spear-cpufreq", -1, NULL, 0);
 }
 
 static const char * const spear1310_dt_board_compat[] = {
index 3fb6834247298c32cbe2b82542f79a159cc6d74a..7b6bff7154e11ee81f6f5be1e962df81665e888c 100644 (file)
@@ -143,6 +143,7 @@ static void __init spear1340_dt_init(void)
 {
        of_platform_populate(NULL, of_default_bus_match_table,
                        spear1340_auxdata_lookup, NULL);
+       platform_device_register_simple("spear-cpufreq", -1, NULL, 0);
 }
 
 static const char * const spear1340_dt_board_compat[] = {
index 0f362b64fb878c02a03d4647ba7076353631b926..3ec74ac95bc1c1087c04776c0c1b2e6176273f4f 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the linux kernel, U300 machine.
 #
 
-obj-y          := core.o timer.o
+obj-y          := core.o
 obj-m          :=
 obj-n          :=
 obj-           :=
index 6b04260aa142da12e290b20aab00a35cc105d364..f03e75bd0b2b232f9afdfbbd4ea30e5da95a0adf 100644 (file)
@@ -2,6 +2,8 @@ config ARCH_ZYNQ
        bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
        select ARM_AMBA
        select ARM_GIC
+       select ARCH_HAS_CPUFREQ
+       select ARCH_HAS_OPP
        select COMMON_CLK
        select CPU_V7
        select GENERIC_CLOCKEVENTS
@@ -13,6 +15,6 @@ config ARCH_ZYNQ
        select HAVE_SMP
        select SPARSE_IRQ
        select CADENCE_TTC_TIMER
-       select ARM_GLOBAL_TIMER
+       select ARM_GLOBAL_TIMER if !CPU_FREQ
        help
          Support for Xilinx Zynq ARM Cortex A9 Platform
index 8c09a8393fb63056a171e12351e3a52d35d5de4d..a39be8e8085607c49156af97a4ba46462e0dac26 100644 (file)
@@ -64,6 +64,8 @@ static struct platform_device zynq_cpuidle_device = {
  */
 static void __init zynq_init_machine(void)
 {
+       struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
+
        /*
         * 64KB way size, 8-way associativity, parity disabled
         */
@@ -72,6 +74,7 @@ static void __init zynq_init_machine(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 
        platform_device_register(&zynq_cpuidle_device);
+       platform_device_register_full(&devinfo);
 }
 
 static void __init zynq_timer_init(void)
index 27bbcfc7202a8df09f6ec35cd3939f0f5c26edfd..07aa3556952cd773adf64d1ff8d2e2a63f8e3faa 100644 (file)
@@ -16,6 +16,7 @@ config ARM64
        select DCACHE_WORD_ACCESS
        select GENERIC_CLOCKEVENTS
        select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+       select GENERIC_CPU_AUTOPROBE
        select GENERIC_IOMAP
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
@@ -26,6 +27,7 @@ config ARM64
        select GENERIC_TIME_VSYSCALL
        select HARDIRQS_SW_RESEND
        select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
@@ -38,6 +40,8 @@ config ARM64
        select HAVE_MEMBLOCK
        select HAVE_PATA_PLATFORM
        select HAVE_PERF_EVENTS
+       select HAVE_PERF_REGS
+       select HAVE_PERF_USER_STACK_DUMP
        select IRQ_DOMAIN
        select MODULES_USE_ELF_RELA
        select NO_BOOTMEM
@@ -73,7 +77,7 @@ config LOCKDEP_SUPPORT
 config TRACE_IRQFLAGS_SUPPORT
        def_bool y
 
-config RWSEM_GENERIC_SPINLOCK
+config RWSEM_XCHGADD_ALGORITHM
        def_bool y
 
 config GENERIC_HWEIGHT
@@ -85,7 +89,7 @@ config GENERIC_CSUM
 config GENERIC_CALIBRATE_DELAY
        def_bool y
 
-config ZONE_DMA32
+config ZONE_DMA
        def_bool y
 
 config ARCH_DMA_ADDR_T_64BIT
@@ -164,6 +168,22 @@ config SMP
 
          If you don't know what to do here, say N.
 
+config SCHED_MC
+       bool "Multi-core scheduler support"
+       depends on SMP
+       help
+         Multi-core scheduler support improves the CPU scheduler's decision
+         making when dealing with multi-core CPU chips at a cost of slightly
+         increased overhead in some places. If unsure say N here.
+
+config SCHED_SMT
+       bool "SMT scheduler support"
+       depends on SMP
+       help
+         Improves the CPU scheduler's decision making when dealing with
+         MultiThreading at a cost of slightly increased overhead in some
+         places. If unsure say N here.
+
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
@@ -301,6 +321,16 @@ menu "CPU Power Management"
 
 source "drivers/cpuidle/Kconfig"
 
+source "drivers/cpufreq/Kconfig"
+
+endmenu
+
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+source "drivers/cpufreq/Kconfig"
+
 endmenu
 
 source "net/Kconfig"
index d37d7369e260f3f7d2564711217f6d6ac19a859a..93f4b2dd92484863e8015da4a622a0c17745de5a 100644 (file)
                                reg-names = "csr-reg";
                                clock-output-names = "eth8clk";
                        };
+
+                       sataphy1clk: sataphy1clk@1f21c000 {
+                               compatible = "apm,xgene-device-clock";
+                               #clock-cells = <1>;
+                               clocks = <&socplldiv2 0>;
+                               reg = <0x0 0x1f21c000 0x0 0x1000>;
+                               reg-names = "csr-reg";
+                               clock-output-names = "sataphy1clk";
+                               status = "disabled";
+                               csr-offset = <0x4>;
+                               csr-mask = <0x00>;
+                               enable-offset = <0x0>;
+                               enable-mask = <0x06>;
+                       };
+
+                       sataphy2clk: sataphy1clk@1f22c000 {
+                               compatible = "apm,xgene-device-clock";
+                               #clock-cells = <1>;
+                               clocks = <&socplldiv2 0>;
+                               reg = <0x0 0x1f22c000 0x0 0x1000>;
+                               reg-names = "csr-reg";
+                               clock-output-names = "sataphy2clk";
+                               status = "ok";
+                               csr-offset = <0x4>;
+                               csr-mask = <0x3a>;
+                               enable-offset = <0x0>;
+                               enable-mask = <0x06>;
+                       };
+
+                       sataphy3clk: sataphy1clk@1f23c000 {
+                               compatible = "apm,xgene-device-clock";
+                               #clock-cells = <1>;
+                               clocks = <&socplldiv2 0>;
+                               reg = <0x0 0x1f23c000 0x0 0x1000>;
+                               reg-names = "csr-reg";
+                               clock-output-names = "sataphy3clk";
+                               status = "ok";
+                               csr-offset = <0x4>;
+                               csr-mask = <0x3a>;
+                               enable-offset = <0x0>;
+                               enable-mask = <0x06>;
+                       };
+
+                       sata01clk: sata01clk@1f21c000 {
+                               compatible = "apm,xgene-device-clock";
+                               #clock-cells = <1>;
+                               clocks = <&socplldiv2 0>;
+                               reg = <0x0 0x1f21c000 0x0 0x1000>;
+                               reg-names = "csr-reg";
+                               clock-output-names = "sata01clk";
+                               csr-offset = <0x4>;
+                               csr-mask = <0x05>;
+                               enable-offset = <0x0>;
+                               enable-mask = <0x39>;
+                       };
+
+                       sata23clk: sata23clk@1f22c000 {
+                               compatible = "apm,xgene-device-clock";
+                               #clock-cells = <1>;
+                               clocks = <&socplldiv2 0>;
+                               reg = <0x0 0x1f22c000 0x0 0x1000>;
+                               reg-names = "csr-reg";
+                               clock-output-names = "sata23clk";
+                               csr-offset = <0x4>;
+                               csr-mask = <0x05>;
+                               enable-offset = <0x0>;
+                               enable-mask = <0x39>;
+                       };
+
+                       sata45clk: sata45clk@1f23c000 {
+                               compatible = "apm,xgene-device-clock";
+                               #clock-cells = <1>;
+                               clocks = <&socplldiv2 0>;
+                               reg = <0x0 0x1f23c000 0x0 0x1000>;
+                               reg-names = "csr-reg";
+                               clock-output-names = "sata45clk";
+                               csr-offset = <0x4>;
+                               csr-mask = <0x05>;
+                               enable-offset = <0x0>;
+                               enable-mask = <0x39>;
+                       };
                };
 
                serial0: serial@1c020000 {
                        interrupt-parent = <&gic>;
                        interrupts = <0x0 0x4c 0x4>;
                };
+
+               phy1: phy@1f21a000 {
+                       compatible = "apm,xgene-phy";
+                       reg = <0x0 0x1f21a000 0x0 0x100>;
+                       #phy-cells = <1>;
+                       clocks = <&sataphy1clk 0>;
+                       status = "disabled";
+                       apm,tx-boost-gain = <30 30 30 30 30 30>;
+                       apm,tx-eye-tuning = <2 10 10 2 10 10>;
+               };
+
+               phy2: phy@1f22a000 {
+                       compatible = "apm,xgene-phy";
+                       reg = <0x0 0x1f22a000 0x0 0x100>;
+                       #phy-cells = <1>;
+                       clocks = <&sataphy2clk 0>;
+                       status = "ok";
+                       apm,tx-boost-gain = <30 30 30 30 30 30>;
+                       apm,tx-eye-tuning = <1 10 10 2 10 10>;
+               };
+
+               phy3: phy@1f23a000 {
+                       compatible = "apm,xgene-phy";
+                       reg = <0x0 0x1f23a000 0x0 0x100>;
+                       #phy-cells = <1>;
+                       clocks = <&sataphy3clk 0>;
+                       status = "ok";
+                       apm,tx-boost-gain = <31 31 31 31 31 31>;
+                       apm,tx-eye-tuning = <2 10 10 2 10 10>;
+               };
+
+               sata1: sata@1a000000 {
+                       compatible = "apm,xgene-ahci";
+                       reg = <0x0 0x1a000000 0x0 0x1000>,
+                             <0x0 0x1f210000 0x0 0x1000>,
+                             <0x0 0x1f21d000 0x0 0x1000>,
+                             <0x0 0x1f21e000 0x0 0x1000>,
+                             <0x0 0x1f217000 0x0 0x1000>;
+                       interrupts = <0x0 0x86 0x4>;
+                       status = "disabled";
+                       clocks = <&sata01clk 0>;
+                       phys = <&phy1 0>;
+                       phy-names = "sata-phy";
+               };
+
+               sata2: sata@1a400000 {
+                       compatible = "apm,xgene-ahci";
+                       reg = <0x0 0x1a400000 0x0 0x1000>,
+                             <0x0 0x1f220000 0x0 0x1000>,
+                             <0x0 0x1f22d000 0x0 0x1000>,
+                             <0x0 0x1f22e000 0x0 0x1000>,
+                             <0x0 0x1f227000 0x0 0x1000>;
+                       interrupts = <0x0 0x87 0x4>;
+                       status = "ok";
+                       clocks = <&sata23clk 0>;
+                       phys = <&phy2 0>;
+                       phy-names = "sata-phy";
+               };
+
+               sata3: sata@1a800000 {
+                       compatible = "apm,xgene-ahci";
+                       reg = <0x0 0x1a800000 0x0 0x1000>,
+                             <0x0 0x1f230000 0x0 0x1000>,
+                             <0x0 0x1f23d000 0x0 0x1000>,
+                             <0x0 0x1f23e000 0x0 0x1000>;
+                       interrupts = <0x0 0x88 0x4>;
+                       status = "ok";
+                       clocks = <&sata45clk 0>;
+                       phys = <&phy3 0>;
+                       phy-names = "sata-phy";
+               };
        };
 };
index 71c53ecfcc3ae356f0c5d0bd4c593e3b7bef8b89..4bca4923fc0b7eef27e37532358fe7eb48aa1224 100644 (file)
@@ -12,6 +12,7 @@ generic-y += dma.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += ftrace.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
@@ -22,13 +23,16 @@ generic-y += kmap_types.h
 generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += pci.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += resource.h
+generic-y += rwsem.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += segment.h
@@ -38,8 +42,8 @@ generic-y += shmbuf.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
-generic-y += switch_to.h
 generic-y += swab.h
+generic-y += switch_to.h
 generic-y += termbits.h
 generic-y += termios.h
 generic-y += topology.h
@@ -49,5 +53,3 @@ generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 409ca370cfe2ddccd245efc3f3b0a20fe2360d49..66eb7648043bc9c1ba6383b0e2187a74c8dc8d5c 100644 (file)
@@ -25,6 +25,7 @@
 #define wfi()          asm volatile("wfi" : : : "memory")
 
 #define isb()          asm volatile("isb" : : : "memory")
+#define dmb(opt)       asm volatile("dmb sy" : : : "memory")
 #define dsb(opt)       asm volatile("dsb sy" : : : "memory")
 
 #define mb()           dsb()
index 889324981aa4f569a77e6a5385897b6493c85526..4c60e64a801c5cf3a3c5e379a3270ea3ff31eea5 100644 (file)
@@ -84,6 +84,13 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
 {
 }
 
+/*
+ * Cache maintenance functions used by the DMA API. No to be used directly.
+ */
+extern void __dma_map_area(const void *, size_t, int);
+extern void __dma_unmap_area(const void *, size_t, int);
+extern void __dma_flush_range(const void *, const void *);
+
 /*
  * Copy user data from/to a page which is mapped into a different
  * processes address space.  Really, we want to allow our "user
index fda2704b3f9f9a49354e12dc6b55aaa81638cd5a..e71f81fe127a504838ba86c5290d99ce10b5411f 100644 (file)
@@ -228,7 +228,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
        return (u32)(unsigned long)uptr;
 }
 
-#define compat_user_stack_pointer() (current_pt_regs()->compat_sp)
+#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
 
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
new file mode 100644 (file)
index 0000000..cd4ac05
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_CPUFEATURE_H
+#define __ASM_CPUFEATURE_H
+
+#include <asm/hwcap.h>
+
+/*
+ * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
+ * in the kernel and for user space to keep track of which optional features
+ * are supported by the current system. So let's map feature 'x' to HWCAP_x.
+ * Note that HWCAP_x constants are bit fields so we need to take the log.
+ */
+
+#define MAX_CPU_FEATURES       (8 * sizeof(elf_hwcap))
+#define cpu_feature(x)         ilog2(HWCAP_ ## x)
+
+static inline bool cpu_have_feature(unsigned int num)
+{
+       return elf_hwcap & (1UL << num);
+}
+
+#endif
index 62314791570cbdda16b7e575c2aa7fc48ed5d45a..6e9b5b36921cce2d26276d12c5379514b420f2ff 100644 (file)
 #define DBG_ESR_EVT_HWWP       0x2
 #define DBG_ESR_EVT_BRK                0x6
 
+/*
+ * Break point instruction encoding
+ */
+#define BREAK_INSTR_SIZE               4
+
+/*
+ * ESR values expected for dynamic and compile time BRK instruction
+ */
+#define DBG_ESR_VAL_BRK(x)     (0xf2000000 | ((x) & 0xfffff))
+
+/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_IMM           0x400
+#define KDBG_COMPILED_DBG_BRK_IMM      0x401
+
+/*
+ * BRK instruction encoding
+ * The #imm16 value should be placed at bits[20:5] within BRK ins
+ */
+#define AARCH64_BREAK_MON      0xd4200000
+
+/*
+ * Extract byte from BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
+       ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
+
+/*
+ * Extract byte from BRK #imm16
+ */
+#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
+       (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
+
+#define KGDB_DYN_DGB_BRK_BYTE(x) \
+       (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
+
+#define  KGDB_DYN_BRK_INS_BYTE0  KGDB_DYN_DGB_BRK_BYTE(0)
+#define  KGDB_DYN_BRK_INS_BYTE1  KGDB_DYN_DGB_BRK_BYTE(1)
+#define  KGDB_DYN_BRK_INS_BYTE2  KGDB_DYN_DGB_BRK_BYTE(2)
+#define  KGDB_DYN_BRK_INS_BYTE3  KGDB_DYN_DGB_BRK_BYTE(3)
+
+#define CACHE_FLUSH_IS_SAFE            1
+
 enum debug_el {
        DBG_ACTIVE_EL0 = 0,
        DBG_ACTIVE_EL1,
@@ -43,23 +90,6 @@ enum debug_el {
 #ifndef __ASSEMBLY__
 struct task_struct;
 
-#define local_dbg_save(flags)                                                  \
-       do {                                                                    \
-               typecheck(unsigned long, flags);                                \
-               asm volatile(                                                   \
-               "mrs    %0, daif                        // local_dbg_save\n"    \
-               "msr    daifset, #8"                                            \
-               : "=r" (flags) : : "memory");                                   \
-       } while (0)
-
-#define local_dbg_restore(flags)                                               \
-       do {                                                                    \
-               typecheck(unsigned long, flags);                                \
-               asm volatile(                                                   \
-               "msr    daif, %0                        // local_dbg_restore\n" \
-               : : "r" (flags) : "memory");                                    \
-       } while (0)
-
 #define DBG_ARCH_ID_RESERVED   0       /* In case of ptrace ABI updates. */
 
 #define DBG_HOOK_HANDLED       0
index fd0c0c0e447a657115c116f45bf9edd693b339a3..3a4572ec3273267c04334057b944739fe56d73e2 100644 (file)
@@ -30,6 +30,8 @@
 
 #define DMA_ERROR_CODE (~(dma_addr_t)0)
 extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops coherent_swiotlb_dma_ops;
+extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
 
 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
@@ -47,6 +49,11 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
                return __generic_dma_ops(dev);
 }
 
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+       dev->archdata.dma_ops = ops;
+}
+
 #include <asm-generic/dma-mapping-common.h>
 
 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
index 6cddbb0c9f5459cff851101fd3010ad74882a1ef..024c46183c3cc4bac07977ffcdade60c2567ea98 100644 (file)
 #define COMPAT_HWCAP_IDIV      (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
 #define COMPAT_HWCAP_EVTSTRM   (1 << 21)
 
+#define COMPAT_HWCAP2_AES      (1 << 0)
+#define COMPAT_HWCAP2_PMULL    (1 << 1)
+#define COMPAT_HWCAP2_SHA1     (1 << 2)
+#define COMPAT_HWCAP2_SHA2     (1 << 3)
+#define COMPAT_HWCAP2_CRC32    (1 << 4)
+
 #ifndef __ASSEMBLY__
 /*
  * This yields a mask that user programs can use to figure out what
@@ -41,7 +47,8 @@
 
 #ifdef CONFIG_COMPAT
 #define COMPAT_ELF_HWCAP       (compat_elf_hwcap)
-extern unsigned int compat_elf_hwcap;
+#define COMPAT_ELF_HWCAP2      (compat_elf_hwcap2)
+extern unsigned int compat_elf_hwcap, compat_elf_hwcap2;
 #endif
 
 extern unsigned long elf_hwcap;
index 4cc813eddacbebee4c84f864f103bb6492d7c193..7846a6bb08334dec2833ef8c09d60373a55fe3fa 100644 (file)
@@ -121,7 +121,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
  *  I/O port access primitives.
  */
 #define IO_SPACE_LIMIT         0xffff
-#define PCI_IOBASE             ((void __iomem *)(MODULES_VADDR - SZ_2M))
+#define PCI_IOBASE             ((void __iomem *)(MODULES_VADDR - SZ_32M))
 
 static inline u8 inb(unsigned long addr)
 {
index b2fcfbc51ecc4b0eaef6e4b20efd0f7afd2a9efd..11cc941bd107b5cb2457101ea15ac772e2eb699a 100644 (file)
@@ -90,5 +90,28 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
        return flags & PSR_I_BIT;
 }
 
+/*
+ * save and restore debug state
+ */
+#define local_dbg_save(flags)                                          \
+       do {                                                            \
+               typecheck(unsigned long, flags);                        \
+               asm volatile(                                           \
+               "mrs    %0, daif                // local_dbg_save\n"    \
+               "msr    daifset, #8"                                    \
+               : "=r" (flags) : : "memory");                           \
+       } while (0)
+
+#define local_dbg_restore(flags)                                       \
+       do {                                                            \
+               typecheck(unsigned long, flags);                        \
+               asm volatile(                                           \
+               "msr    daif, %0                // local_dbg_restore\n" \
+               : : "r" (flags) : "memory");                            \
+       } while (0)
+
+#define local_dbg_enable()     asm("msr        daifclr, #8" : : : "memory")
+#define local_dbg_disable()    asm("msr        daifset, #8" : : : "memory")
+
 #endif
 #endif
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
new file mode 100644 (file)
index 0000000..3c8aafc
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/include/kgdb.h
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KGDB_H
+#define __ARM_KGDB_H
+
+#include <linux/ptrace.h>
+#include <asm/debug-monitors.h>
+
+#ifndef        __ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+       asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
+}
+
+extern void kgdb_handle_bus_error(void);
+extern int kgdb_fault_expected;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * gdb is expecting the following registers layout.
+ *
+ * General purpose regs:
+ *     r0-r30: 64 bit
+ *     sp,pc : 64 bit
+ *     pstate  : 64 bit
+ *     Total: 34
+ * FPU regs:
+ *     f0-f31: 128 bit
+ *     Total: 32
+ * Extra regs
+ *     fpsr & fpcr: 32 bit
+ *     Total: 2
+ *
+ */
+
+#define _GP_REGS               34
+#define _FP_REGS               32
+#define _EXTRA_REGS            2
+/*
+ * general purpose registers size in bytes.
+ * pstate is only 4 bytes. subtract 4 bytes
+ */
+#define GP_REG_BYTES           (_GP_REGS * 8)
+#define DBG_MAX_REG_NUM                (_GP_REGS + _FP_REGS + _EXTRA_REGS)
+
+/*
+ * Size of I/O buffer for gdb packet.
+ * considering to hold all register contents, size is set
+ */
+
+#define BUFMAX                 2048
+
+/*
+ * Number of bytes required for gdb_regs buffer.
+ * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
+ * GDB fails to connect for size beyond this with error
+ * "'g' packet reply is too long"
+ */
+
+#define NUMREGBYTES    ((_GP_REGS * 8) + (_FP_REGS * 16) + \
+                       (_EXTRA_REGS * 4))
+
+#endif /* __ASM_KGDB_H */
index 0eb39865537839c202879251fbb60f919328d58e..21ef48d32ff271fbdccba7f2df710b021f9db7e0 100644 (file)
 
 /* VTCR_EL2 Registers bits */
 #define VTCR_EL2_PS_MASK       (7 << 16)
-#define VTCR_EL2_PS_40B                (2 << 16)
 #define VTCR_EL2_TG0_MASK      (1 << 14)
 #define VTCR_EL2_TG0_4K                (0 << 14)
 #define VTCR_EL2_TG0_64K       (1 << 14)
  * 64kB pages (TG0 = 1)
  * 2 level page tables (SL = 1)
  */
-#define VTCR_EL2_FLAGS         (VTCR_EL2_PS_40B | VTCR_EL2_TG0_64K | \
-                                VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
-                                VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
-                                VTCR_EL2_T0SZ_40B)
+#define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
+                                VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
 #define VTTBR_X                (38 - VTCR_EL2_T0SZ_40B)
 #else
 /*
  * 4kB pages (TG0 = 0)
  * 3 level page tables (SL = 1)
  */
-#define VTCR_EL2_FLAGS         (VTCR_EL2_PS_40B | VTCR_EL2_TG0_4K | \
-                                VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
-                                VTCR_EL2_IRGN0_WBWA | VTCR_EL2_SL0_LVL1 | \
-                                VTCR_EL2_T0SZ_40B)
+#define VTCR_EL2_FLAGS         (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
+                                VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
+                                VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
 #define VTTBR_X                (37 - VTCR_EL2_T0SZ_40B)
 #endif
 
index b1d2e26c3c883e7259f0ab679efe33ffe2935429..f7af66b54cb216931f23718cc75754d269acefc4 100644 (file)
 #define PTE_HYP                        PTE_USER
 
 /*
- * 40-bit physical address supported.
+ * Highest possible physical address supported.
  */
-#define PHYS_MASK_SHIFT                (40)
+#define PHYS_MASK_SHIFT                (48)
 #define PHYS_MASK              ((UL(1) << PHYS_MASK_SHIFT) - 1)
 
 /*
 #define TCR_SHARED             ((UL(3) << 12) | (UL(3) << 28))
 #define TCR_TG0_64K            (UL(1) << 14)
 #define TCR_TG1_64K            (UL(1) << 30)
-#define TCR_IPS_40BIT          (UL(2) << 32)
 #define TCR_ASID16             (UL(1) << 36)
 #define TCR_TBI0               (UL(1) << 37)
 
index aa3917c8b62318aef9424735d560be305f3465d3..90c811f05a2e3279a8709211a770725141491dd7 100644 (file)
@@ -199,7 +199,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pte)
 {
        if (pte_valid_user(pte)) {
-               if (pte_exec(pte))
+               if (!pte_special(pte) && pte_exec(pte))
                        __sync_icache_dcache(pte, addr);
                if (pte_dirty(pte) && pte_write(pte))
                        pte_val(pte) &= ~PTE_RDONLY;
@@ -227,36 +227,36 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 
 #define __HAVE_ARCH_PTE_SPECIAL
 
-/*
- * Software PMD bits for THP
- */
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+       return __pte(pmd_val(pmd));
+}
 
-#define PMD_SECT_DIRTY         (_AT(pmdval_t, 1) << 55)
-#define PMD_SECT_SPLITTING     (_AT(pmdval_t, 1) << 57)
+static inline pmd_t pte_pmd(pte_t pte)
+{
+       return __pmd(pte_val(pte));
+}
 
 /*
  * THP definitions.
  */
-#define pmd_young(pmd)         (pmd_val(pmd) & PMD_SECT_AF)
-
-#define __HAVE_ARCH_PMD_WRITE
-#define pmd_write(pmd)         (!(pmd_val(pmd) & PMD_SECT_RDONLY))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define pmd_trans_huge(pmd)    (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
-#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
+#define pmd_trans_splitting(pmd)       pte_special(pmd_pte(pmd))
 #endif
 
-#define PMD_BIT_FUNC(fn,op) \
-static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
+#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
+#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mksplitting(pmd)   pte_pmd(pte_mkspecial(pmd_pte(pmd)))
+#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
 
-PMD_BIT_FUNC(wrprotect,        |= PMD_SECT_RDONLY);
-PMD_BIT_FUNC(mkold,    &= ~PMD_SECT_AF);
-PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING);
-PMD_BIT_FUNC(mkwrite,   &= ~PMD_SECT_RDONLY);
-PMD_BIT_FUNC(mkdirty,   |= PMD_SECT_DIRTY);
-PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
-PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
 
 #define pmd_mkhuge(pmd)                (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
 
@@ -266,15 +266,6 @@ PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK);
 
 #define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
-static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
-{
-       const pmdval_t mask = PMD_SECT_USER | PMD_SECT_PXN | PMD_SECT_UXN |
-                             PMD_SECT_RDONLY | PMD_SECT_PROT_NONE |
-                             PMD_SECT_VALID;
-       pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
-       return pmd;
-}
-
 #define set_pmd_at(mm, addr, pmdp, pmd)        set_pmd(pmdp, pmd)
 
 static inline int has_transparent_hugepage(void)
@@ -286,11 +277,9 @@ static inline int has_transparent_hugepage(void)
  * Mark the prot value as uncacheable and unbufferable.
  */
 #define pgprot_noncached(prot) \
-       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
 #define pgprot_writecombine(prot) \
-       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
-#define pgprot_dmacoherent(prot) \
-       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+       __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
 #define __HAVE_PHYS_MEM_ACCESS_PROT
 struct file;
 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -383,6 +372,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
        return pte;
 }
 
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+       return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
 
index e5312ea0ec1a59bdd92934926da81155a6ce3e12..d15ab8b463360869553f4ecfa4cc255d4c8455e3 100644 (file)
@@ -14,6 +14,6 @@
 #ifndef __ASM_PSCI_H
 #define __ASM_PSCI_H
 
-int psci_init(void);
+void psci_init(void);
 
 #endif /* __ASM_PSCI_H */
index 0e7fa49637359ab3cd4757565174d4e63f191cf0..c7ba261dd4b37b8b1ecb1a06d5a4fe7b583a3d92 100644 (file)
@@ -68,6 +68,7 @@
 
 /* Architecturally defined mapping between AArch32 and AArch64 registers */
 #define compat_usr(x)  regs[(x)]
+#define compat_fp      regs[11]
 #define compat_sp      regs[13]
 #define compat_lr      regs[14]
 #define compat_sp_hyp  regs[15]
@@ -132,7 +133,7 @@ struct pt_regs {
        (!((regs)->pstate & PSR_F_BIT))
 
 #define user_stack_pointer(regs) \
-       ((regs)->sp)
+       (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp)
 
 /*
  * Are the current registers suitable for user mode? (used to maintain
@@ -164,7 +165,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs)
        return 0;
 }
 
-#define instruction_pointer(regs)      (regs)->pc
+#define instruction_pointer(regs)      ((unsigned long)(regs)->pc)
 
 #ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *regs);
index 717031a762c27966aabc7786f6d2a900034b0b08..72cadf52ca807f181261b1599b25944374de5544 100644 (file)
 #ifndef __ASM_TLB_H
 #define __ASM_TLB_H
 
-#include <linux/pagemap.h>
-#include <linux/swap.h>
 
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-
-#define MMU_GATHER_BUNDLE      8
-
-/*
- * TLB handling.  This allows us to remove pages from the page
- * tables, and efficiently handle the TLB issues.
- */
-struct mmu_gather {
-       struct mm_struct        *mm;
-       unsigned int            fullmm;
-       struct vm_area_struct   *vma;
-       unsigned long           start, end;
-       unsigned long           range_start;
-       unsigned long           range_end;
-       unsigned int            nr;
-       unsigned int            max;
-       struct page             **pages;
-       struct page             *local[MMU_GATHER_BUNDLE];
-};
+#include <asm-generic/tlb.h>
 
 /*
- * This is unnecessarily complex.  There's three ways the TLB shootdown
- * code is used:
+ * There's three ways the TLB shootdown code is used:
  *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
  *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
- *     tlb->vma will be non-NULL.
  *  2. Unmapping all vmas.  See exit_mmap().
  *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
- *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
+ *     Page tables will be freed.
  *  3. Unmapping argument pages.  See shift_arg_pages().
  *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
- *     tlb->vma will be NULL.
  */
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       if (tlb->fullmm || !tlb->vma)
+       if (tlb->fullmm) {
                flush_tlb_mm(tlb->mm);
-       else if (tlb->range_end > 0) {
-               flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
-               tlb->range_start = TASK_SIZE;
-               tlb->range_end = 0;
+       } else if (tlb->end > 0) {
+               struct vm_area_struct vma = { .vm_mm = tlb->mm, };
+               flush_tlb_range(&vma, tlb->start, tlb->end);
+               tlb->start = TASK_SIZE;
+               tlb->end = 0;
        }
 }
 
 static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
 {
        if (!tlb->fullmm) {
-               if (addr < tlb->range_start)
-                       tlb->range_start = addr;
-               if (addr + PAGE_SIZE > tlb->range_end)
-                       tlb->range_end = addr + PAGE_SIZE;
-       }
-}
-
-static inline void __tlb_alloc_page(struct mmu_gather *tlb)
-{
-       unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
-
-       if (addr) {
-               tlb->pages = (void *)addr;
-               tlb->max = PAGE_SIZE / sizeof(struct page *);
+               tlb->start = min(tlb->start, addr);
+               tlb->end = max(tlb->end, addr + PAGE_SIZE);
        }
 }
 
-static inline void tlb_flush_mmu(struct mmu_gather *tlb)
-{
-       tlb_flush(tlb);
-       free_pages_and_swap_cache(tlb->pages, tlb->nr);
-       tlb->nr = 0;
-       if (tlb->pages == tlb->local)
-               __tlb_alloc_page(tlb);
-}
-
-static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
-{
-       tlb->mm = mm;
-       tlb->fullmm = !(start | (end+1));
-       tlb->start = start;
-       tlb->end = end;
-       tlb->vma = NULL;
-       tlb->max = ARRAY_SIZE(tlb->local);
-       tlb->pages = tlb->local;
-       tlb->nr = 0;
-       __tlb_alloc_page(tlb);
-}
-
-static inline void
-tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
-{
-       tlb_flush_mmu(tlb);
-
-       /* keep the page table cache within bounds */
-       check_pgt_cache();
-
-       if (tlb->pages != tlb->local)
-               free_pages((unsigned long)tlb->pages, 0);
-}
-
 /*
  * Memorize the range for the TLB flush.
  */
-static inline void
-tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+                                         unsigned long addr)
 {
        tlb_add_flush(tlb, addr);
 }
@@ -137,38 +66,24 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
  * case where we're doing a full MM flush.  When we're doing a munmap,
  * the vmas are adjusted to only cover the region to be torn down.
  */
-static inline void
-tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+static inline void tlb_start_vma(struct mmu_gather *tlb,
+                                struct vm_area_struct *vma)
 {
        if (!tlb->fullmm) {
-               tlb->vma = vma;
-               tlb->range_start = TASK_SIZE;
-               tlb->range_end = 0;
+               tlb->start = TASK_SIZE;
+               tlb->end = 0;
        }
 }
 
-static inline void
-tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+static inline void tlb_end_vma(struct mmu_gather *tlb,
+                              struct vm_area_struct *vma)
 {
        if (!tlb->fullmm)
                tlb_flush(tlb);
 }
 
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       tlb->pages[tlb->nr++] = page;
-       VM_BUG_ON(tlb->nr > tlb->max);
-       return tlb->max - tlb->nr;
-}
-
-static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
-{
-       if (!__tlb_remove_page(tlb, page))
-               tlb_flush_mmu(tlb);
-}
-
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
-       unsigned long addr)
+                                 unsigned long addr)
 {
        pgtable_page_dtor(pte);
        tlb_add_flush(tlb, addr);
@@ -184,16 +99,5 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 }
 #endif
 
-#define pte_free_tlb(tlb, ptep, addr)  __pte_free_tlb(tlb, ptep, addr)
-#define pmd_free_tlb(tlb, pmdp, addr)  __pmd_free_tlb(tlb, pmdp, addr)
-#define pud_free_tlb(tlb, pudp, addr)  pud_free((tlb)->mm, pudp)
-
-#define tlb_migrate_finish(mm)         do { } while (0)
-
-static inline void
-tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
-{
-       tlb_add_flush(tlb, addr);
-}
 
 #endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
new file mode 100644 (file)
index 0000000..0172e6d
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/cpumask.h>
+
+struct cpu_topology {
+       int thread_id;
+       int core_id;
+       int cluster_id;
+       cpumask_t thread_sibling;
+       cpumask_t core_sibling;
+};
+
+extern struct cpu_topology cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu)      (cpu_topology[cpu].cluster_id)
+#define topology_core_id(cpu)          (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu)     (&cpu_topology[cpu].core_sibling)
+#define topology_thread_cpumask(cpu)   (&cpu_topology[cpu].thread_sibling)
+
+#define mc_capable()   (cpu_topology[0].cluster_id != -1)
+#define smt_capable()  (cpu_topology[0].thread_id != -1)
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_ARM_TOPOLOGY_H */
index 6c0f684aca81ce2da8b71c18cf2fb659c62ceaab..3bf8f4e99a511c67a3a2d9c4a739929cedd5889f 100644 (file)
@@ -83,7 +83,7 @@ static inline void set_fs(mm_segment_t fs)
  * Returns 1 if the range is valid, 0 otherwise.
  *
  * This is equivalent to the following test:
- * (u65)addr + (u65)size < (u65)current->addr_limit
+ * (u65)addr + (u65)size <current->addr_limit
  *
  * This needs 65-bit arithmetic.
  */
@@ -91,7 +91,7 @@ static inline void set_fs(mm_segment_t fs)
 ({                                                                     \
        unsigned long flag, roksum;                                     \
        __chk_user_ptr(addr);                                           \
-       asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc"         \
+       asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls"         \
                : "=&r" (flag), "=&r" (roksum)                          \
                : "1" (addr), "Ir" (size),                              \
                  "r" (current_thread_info()->addr_limit)               \
index 82ce217e94cf07228e52ebbfae9e9bcdeb751de1..a4654c656a1eda3c07f682a8eda7acdfd8e18a31 100644 (file)
@@ -14,6 +14,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
 #define __ARCH_WANT_COMPAT_STAT64
 #define __ARCH_WANT_SYS_GETHOSTNAME
 #define __ARCH_WANT_SYS_PAUSE
index e4b78bdca19e81b373251cf228917d3a8eab0a60..942376d37d220fc359c6afc35ef3004c9a2074a2 100644 (file)
@@ -9,6 +9,7 @@ header-y += byteorder.h
 header-y += fcntl.h
 header-y += hwcap.h
 header-y += kvm_para.h
+header-y += perf_regs.h
 header-y += param.h
 header-y += ptrace.h
 header-y += setup.h
diff --git a/arch/arm64/include/uapi/asm/perf_regs.h b/arch/arm64/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..172b831
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _ASM_ARM64_PERF_REGS_H
+#define _ASM_ARM64_PERF_REGS_H
+
+enum perf_event_arm_regs {
+       PERF_REG_ARM64_X0,
+       PERF_REG_ARM64_X1,
+       PERF_REG_ARM64_X2,
+       PERF_REG_ARM64_X3,
+       PERF_REG_ARM64_X4,
+       PERF_REG_ARM64_X5,
+       PERF_REG_ARM64_X6,
+       PERF_REG_ARM64_X7,
+       PERF_REG_ARM64_X8,
+       PERF_REG_ARM64_X9,
+       PERF_REG_ARM64_X10,
+       PERF_REG_ARM64_X11,
+       PERF_REG_ARM64_X12,
+       PERF_REG_ARM64_X13,
+       PERF_REG_ARM64_X14,
+       PERF_REG_ARM64_X15,
+       PERF_REG_ARM64_X16,
+       PERF_REG_ARM64_X17,
+       PERF_REG_ARM64_X18,
+       PERF_REG_ARM64_X19,
+       PERF_REG_ARM64_X20,
+       PERF_REG_ARM64_X21,
+       PERF_REG_ARM64_X22,
+       PERF_REG_ARM64_X23,
+       PERF_REG_ARM64_X24,
+       PERF_REG_ARM64_X25,
+       PERF_REG_ARM64_X26,
+       PERF_REG_ARM64_X27,
+       PERF_REG_ARM64_X28,
+       PERF_REG_ARM64_X29,
+       PERF_REG_ARM64_LR,
+       PERF_REG_ARM64_SP,
+       PERF_REG_ARM64_PC,
+       PERF_REG_ARM64_MAX,
+};
+#endif /* _ASM_ARM64_PERF_REGS_H */
index 2d4554b134100acbb6dab937326bdabcb12cd73e..7d811d9522bc4fca5e12b035460a56337b68f13e 100644 (file)
@@ -14,12 +14,14 @@ arm64-obj-y         := cputable.o debug-monitors.o entry.o irq.o fpsimd.o   \
 arm64-obj-$(CONFIG_COMPAT)             += sys32.o kuser32.o signal32.o         \
                                           sys_compat.o
 arm64-obj-$(CONFIG_MODULES)            += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP)                        += smp.o smp_spin_table.o
+arm64-obj-$(CONFIG_SMP)                        += smp.o smp_spin_table.o topology.o
+arm64-obj-$(CONFIG_PERF_EVENTS)                += perf_regs.o
 arm64-obj-$(CONFIG_HW_PERF_EVENTS)     += perf_event.o
-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
+arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
 arm64-obj-$(CONFIG_EARLY_PRINTK)       += early_printk.o
 arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND)  += sleep.o suspend.o
 arm64-obj-$(CONFIG_JUMP_LABEL)         += jump_label.o
+arm64-obj-$(CONFIG_KGDB)               += kgdb.o
 
 obj-y                                  += $(arm64-obj-y) vdso/
 obj-m                                  += $(arm64-obj-m)
index 636ba8b6240b8042e9c49622297bbca74e140181..14ba23c6115367b962922e5decd3afabb59e789e 100644 (file)
@@ -137,7 +137,6 @@ void disable_debug_monitors(enum debug_el el)
 static void clear_os_lock(void *unused)
 {
        asm volatile("msr oslar_el1, %0" : : "r" (0));
-       isb();
 }
 
 static int os_lock_notify(struct notifier_block *self,
@@ -156,8 +155,9 @@ static struct notifier_block os_lock_nb = {
 static int debug_monitors_init(void)
 {
        /* Clear the OS lock. */
-       smp_call_function(clear_os_lock, NULL, 1);
-       clear_os_lock(NULL);
+       on_each_cpu(clear_os_lock, NULL, 1);
+       isb();
+       local_dbg_enable();
 
        /* Register hotplug handler. */
        register_cpu_notifier(&os_lock_nb);
@@ -189,7 +189,7 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
 
 /* EL1 Single Step Handler hooks */
 static LIST_HEAD(step_hook);
-DEFINE_RWLOCK(step_hook_lock);
+static DEFINE_RWLOCK(step_hook_lock);
 
 void register_step_hook(struct step_hook *hook)
 {
@@ -276,7 +276,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
  * Use reader/writer locks instead of plain spinlock.
  */
 static LIST_HEAD(break_hook);
-DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_RWLOCK(break_hook_lock);
 
 void register_break_hook(struct break_hook *hook)
 {
index 0b281fffda5199dab76eeaa8f729609570fe6310..61035d6814cbcca9a7bb9be06e92e44446256f25 100644 (file)
@@ -384,26 +384,18 @@ ENDPROC(__calc_phys_offset)
  * Preserves:  tbl, flags
  * Corrupts:   phys, start, end, pstate
  */
-       .macro  create_block_map, tbl, flags, phys, start, end, idmap=0
+       .macro  create_block_map, tbl, flags, phys, start, end
        lsr     \phys, \phys, #BLOCK_SHIFT
-       .if     \idmap
-       and     \start, \phys, #PTRS_PER_PTE - 1        // table index
-       .else
        lsr     \start, \start, #BLOCK_SHIFT
        and     \start, \start, #PTRS_PER_PTE - 1       // table index
-       .endif
        orr     \phys, \flags, \phys, lsl #BLOCK_SHIFT  // table entry
-       .ifnc   \start,\end
        lsr     \end, \end, #BLOCK_SHIFT
        and     \end, \end, #PTRS_PER_PTE - 1           // table end index
-       .endif
 9999:  str     \phys, [\tbl, \start, lsl #3]           // store the entry
-       .ifnc   \start,\end
        add     \start, \start, #1                      // next entry
        add     \phys, \phys, #BLOCK_SIZE               // next block
        cmp     \start, \end
        b.ls    9999b
-       .endif
        .endm
 
 /*
@@ -435,9 +427,13 @@ __create_page_tables:
         * Create the identity mapping.
         */
        add     x0, x25, #PAGE_SIZE             // section table address
-       adr     x3, __turn_mmu_on               // virtual/physical address
+       ldr     x3, =KERNEL_START
+       add     x3, x3, x28                     // __pa(KERNEL_START)
        create_pgd_entry x25, x0, x3, x5, x6
-       create_block_map x0, x7, x3, x5, x5, idmap=1
+       ldr     x6, =KERNEL_END
+       mov     x5, x3                          // __pa(KERNEL_START)
+       add     x6, x6, x28                     // __pa(KERNEL_END)
+       create_block_map x0, x7, x3, x5, x6
 
        /*
         * Map the kernel image (starting with PHYS_OFFSET).
@@ -445,7 +441,7 @@ __create_page_tables:
        add     x0, x26, #PAGE_SIZE             // section table address
        mov     x5, #PAGE_OFFSET
        create_pgd_entry x26, x0, x5, x3, x6
-       ldr     x6, =KERNEL_END - 1
+       ldr     x6, =KERNEL_END
        mov     x3, x24                         // phys offset
        create_block_map x0, x7, x3, x5, x6
 
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
new file mode 100644 (file)
index 0000000..75c9cf1
--- /dev/null
@@ -0,0 +1,336 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/kernel/kgdb.c
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <asm/traps.h>
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+       { "x0", 8, offsetof(struct pt_regs, regs[0])},
+       { "x1", 8, offsetof(struct pt_regs, regs[1])},
+       { "x2", 8, offsetof(struct pt_regs, regs[2])},
+       { "x3", 8, offsetof(struct pt_regs, regs[3])},
+       { "x4", 8, offsetof(struct pt_regs, regs[4])},
+       { "x5", 8, offsetof(struct pt_regs, regs[5])},
+       { "x6", 8, offsetof(struct pt_regs, regs[6])},
+       { "x7", 8, offsetof(struct pt_regs, regs[7])},
+       { "x8", 8, offsetof(struct pt_regs, regs[8])},
+       { "x9", 8, offsetof(struct pt_regs, regs[9])},
+       { "x10", 8, offsetof(struct pt_regs, regs[10])},
+       { "x11", 8, offsetof(struct pt_regs, regs[11])},
+       { "x12", 8, offsetof(struct pt_regs, regs[12])},
+       { "x13", 8, offsetof(struct pt_regs, regs[13])},
+       { "x14", 8, offsetof(struct pt_regs, regs[14])},
+       { "x15", 8, offsetof(struct pt_regs, regs[15])},
+       { "x16", 8, offsetof(struct pt_regs, regs[16])},
+       { "x17", 8, offsetof(struct pt_regs, regs[17])},
+       { "x18", 8, offsetof(struct pt_regs, regs[18])},
+       { "x19", 8, offsetof(struct pt_regs, regs[19])},
+       { "x20", 8, offsetof(struct pt_regs, regs[20])},
+       { "x21", 8, offsetof(struct pt_regs, regs[21])},
+       { "x22", 8, offsetof(struct pt_regs, regs[22])},
+       { "x23", 8, offsetof(struct pt_regs, regs[23])},
+       { "x24", 8, offsetof(struct pt_regs, regs[24])},
+       { "x25", 8, offsetof(struct pt_regs, regs[25])},
+       { "x26", 8, offsetof(struct pt_regs, regs[26])},
+       { "x27", 8, offsetof(struct pt_regs, regs[27])},
+       { "x28", 8, offsetof(struct pt_regs, regs[28])},
+       { "x29", 8, offsetof(struct pt_regs, regs[29])},
+       { "x30", 8, offsetof(struct pt_regs, regs[30])},
+       { "sp", 8, offsetof(struct pt_regs, sp)},
+       { "pc", 8, offsetof(struct pt_regs, pc)},
+       { "pstate", 8, offsetof(struct pt_regs, pstate)},
+       { "v0", 16, -1 },
+       { "v1", 16, -1 },
+       { "v2", 16, -1 },
+       { "v3", 16, -1 },
+       { "v4", 16, -1 },
+       { "v5", 16, -1 },
+       { "v6", 16, -1 },
+       { "v7", 16, -1 },
+       { "v8", 16, -1 },
+       { "v9", 16, -1 },
+       { "v10", 16, -1 },
+       { "v11", 16, -1 },
+       { "v12", 16, -1 },
+       { "v13", 16, -1 },
+       { "v14", 16, -1 },
+       { "v15", 16, -1 },
+       { "v16", 16, -1 },
+       { "v17", 16, -1 },
+       { "v18", 16, -1 },
+       { "v19", 16, -1 },
+       { "v20", 16, -1 },
+       { "v21", 16, -1 },
+       { "v22", 16, -1 },
+       { "v23", 16, -1 },
+       { "v24", 16, -1 },
+       { "v25", 16, -1 },
+       { "v26", 16, -1 },
+       { "v27", 16, -1 },
+       { "v28", 16, -1 },
+       { "v29", 16, -1 },
+       { "v30", 16, -1 },
+       { "v31", 16, -1 },
+       { "fpsr", 4, -1 },
+       { "fpcr", 4, -1 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+       if (regno >= DBG_MAX_REG_NUM || regno < 0)
+               return NULL;
+
+       if (dbg_reg_def[regno].offset != -1)
+               memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+                      dbg_reg_def[regno].size);
+       else
+               memset(mem, 0, dbg_reg_def[regno].size);
+       return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+       if (regno >= DBG_MAX_REG_NUM || regno < 0)
+               return -EINVAL;
+
+       if (dbg_reg_def[regno].offset != -1)
+               memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+                      dbg_reg_def[regno].size);
+       return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+       struct pt_regs *thread_regs;
+
+       /* Initialize to zero */
+       memset((char *)gdb_regs, 0, NUMREGBYTES);
+       thread_regs = task_pt_regs(task);
+       memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+       regs->pc = pc;
+}
+
+static int compiled_break;
+
+static void kgdb_arch_update_addr(struct pt_regs *regs,
+                               char *remcom_in_buffer)
+{
+       unsigned long addr;
+       char *ptr;
+
+       ptr = &remcom_in_buffer[1];
+       if (kgdb_hex2long(&ptr, &addr))
+               kgdb_arch_set_pc(regs, addr);
+       else if (compiled_break == 1)
+               kgdb_arch_set_pc(regs, regs->pc + 4);
+
+       compiled_break = 0;
+}
+
+int kgdb_arch_handle_exception(int exception_vector, int signo,
+                              int err_code, char *remcom_in_buffer,
+                              char *remcom_out_buffer,
+                              struct pt_regs *linux_regs)
+{
+       int err;
+
+       switch (remcom_in_buffer[0]) {
+       case 'D':
+       case 'k':
+               /*
+                * Packet D (Detach), k (kill). No special handling
+                * is required here. Handle same as c packet.
+                */
+       case 'c':
+               /*
+                * Packet c (Continue) to continue executing.
+                * Set pc to required address.
+                * Try to read optional parameter and set pc.
+                * If this was a compiled breakpoint, we need to move
+                * to the next instruction else we will just breakpoint
+                * over and over again.
+                */
+               kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+               atomic_set(&kgdb_cpu_doing_single_step, -1);
+               kgdb_single_step =  0;
+
+               /*
+                * Received continue command, disable single step
+                */
+               if (kernel_active_single_step())
+                       kernel_disable_single_step();
+
+               err = 0;
+               break;
+       case 's':
+               /*
+                * Update step address value with address passed
+                * with step packet.
+                * On debug exception return PC is copied to ELR
+                * So just update PC.
+                * If no step address is passed, resume from the address
+                * pointed by PC. Do not update PC
+                */
+               kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+               atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+               kgdb_single_step =  1;
+
+               /*
+                * Enable single step handling
+                */
+               if (!kernel_active_single_step())
+                       kernel_enable_single_step(linux_regs);
+               err = 0;
+               break;
+       default:
+               err = -1;
+       }
+       return err;
+}
+
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+       return 0;
+}
+
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+       compiled_break = 1;
+       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+       return 0;
+}
+
+static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+       return 0;
+}
+
+static struct break_hook kgdb_brkpt_hook = {
+       .esr_mask       = 0xffffffff,
+       .esr_val        = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
+       .fn             = kgdb_brk_fn
+};
+
+static struct break_hook kgdb_compiled_brkpt_hook = {
+       .esr_mask       = 0xffffffff,
+       .esr_val        = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
+       .fn             = kgdb_compiled_brk_fn
+};
+
+static struct step_hook kgdb_step_hook = {
+       .fn             = kgdb_step_brk_fn
+};
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+       kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+       local_irq_enable();
+       smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+       local_irq_disable();
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+       struct pt_regs *regs = args->regs;
+
+       if (kgdb_handle_exception(1, args->signr, cmd, regs))
+               return NOTIFY_DONE;
+       return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+       unsigned long flags;
+       int ret;
+
+       local_irq_save(flags);
+       ret = __kgdb_notify(ptr, cmd);
+       local_irq_restore(flags);
+
+       return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+       .notifier_call  = kgdb_notify,
+       /*
+        * Want to be lowest priority
+        */
+       .priority       = -INT_MAX,
+};
+
+/*
+ * kgdb_arch_init - Perform any architecture specific initalization.
+ * This function will handle the initalization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+       int ret = register_die_notifier(&kgdb_notifier);
+
+       if (ret != 0)
+               return ret;
+
+       register_break_hook(&kgdb_brkpt_hook);
+       register_break_hook(&kgdb_compiled_brkpt_hook);
+       register_step_hook(&kgdb_step_hook);
+       return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+       unregister_break_hook(&kgdb_brkpt_hook);
+       unregister_break_hook(&kgdb_compiled_brkpt_hook);
+       unregister_step_hook(&kgdb_step_hook);
+       unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * ARM instructions are always in LE.
+ * Break instruction is encoded in LE format
+ */
+struct kgdb_arch arch_kgdb_ops = {
+       .gdb_bpt_instr = {
+               KGDB_DYN_BRK_INS_BYTE0,
+               KGDB_DYN_BRK_INS_BYTE1,
+               KGDB_DYN_BRK_INS_BYTE2,
+               KGDB_DYN_BRK_INS_BYTE3,
+       }
+};
index 5b1cd792274a34cc620933e2258530b20cfdfa40..e868c72a79389c133559fc4c1599a197a33c195b 100644 (file)
@@ -1348,8 +1348,8 @@ early_initcall(init_hw_perf_events);
  * Callchain handling code.
  */
 struct frame_tail {
-       struct frame_tail   __user *fp;
-       unsigned long       lr;
+       struct frame_tail       __user *fp;
+       unsigned long           lr;
 } __attribute__((packed));
 
 /*
@@ -1386,22 +1386,80 @@ user_backtrace(struct frame_tail __user *tail,
        return buftail.fp;
 }
 
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+       compat_uptr_t   fp; /* a (struct compat_frame_tail *) in compat mode */
+       u32             sp;
+       u32             lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+                     struct perf_callchain_entry *entry)
+{
+       struct compat_frame_tail buftail;
+       unsigned long err;
+
+       /* Also check accessibility of one struct frame_tail beyond */
+       if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+               return NULL;
+
+       pagefault_disable();
+       err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+       pagefault_enable();
+
+       if (err)
+               return NULL;
+
+       perf_callchain_store(entry, buftail.lr);
+
+       /*
+        * Frame pointers should strictly progress back up the stack
+        * (towards higher addresses).
+        */
+       if (tail + 1 >= (struct compat_frame_tail __user *)
+                       compat_ptr(buftail.fp))
+               return NULL;
+
+       return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+
 void perf_callchain_user(struct perf_callchain_entry *entry,
                         struct pt_regs *regs)
 {
-       struct frame_tail __user *tail;
-
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* We don't support guest os callchain now */
                return;
        }
 
        perf_callchain_store(entry, regs->pc);
-       tail = (struct frame_tail __user *)regs->regs[29];
 
-       while (entry->nr < PERF_MAX_STACK_DEPTH &&
-              tail && !((unsigned long)tail & 0xf))
-               tail = user_backtrace(tail, entry);
+       if (!compat_user_mode(regs)) {
+               /* AARCH64 mode */
+               struct frame_tail __user *tail;
+
+               tail = (struct frame_tail __user *)regs->regs[29];
+
+               while (entry->nr < PERF_MAX_STACK_DEPTH &&
+                      tail && !((unsigned long)tail & 0xf))
+                       tail = user_backtrace(tail, entry);
+       } else {
+               /* AARCH32 compat mode */
+               struct compat_frame_tail __user *tail;
+
+               tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+               while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+                       tail && !((unsigned long)tail & 0x3))
+                       tail = compat_user_backtrace(tail, entry);
+       }
 }
 
 /*
@@ -1429,6 +1487,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
        frame.fp = regs->regs[29];
        frame.sp = regs->sp;
        frame.pc = regs->pc;
+
        walk_stackframe(&frame, callchain_trace, entry);
 }
 
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
new file mode 100644 (file)
index 0000000..f2d6f0a
--- /dev/null
@@ -0,0 +1,44 @@
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+       if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
+               return 0;
+
+       /*
+        * Compat (i.e. 32 bit) mode:
+        * - PC has been set in the pt_regs struct in kernel_entry,
+        * - Handle SP and LR here.
+        */
+       if (compat_user_mode(regs)) {
+               if ((u32)idx == PERF_REG_ARM64_SP)
+                       return regs->compat_sp;
+               if ((u32)idx == PERF_REG_ARM64_LR)
+                       return regs->compat_lr;
+       }
+
+       return regs->regs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+       if (!mask || mask & REG_RESERVED)
+               return -EINVAL;
+
+       return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+       if (is_compat_thread(task_thread_info(task)))
+               return PERF_SAMPLE_REGS_ABI_32;
+       else
+               return PERF_SAMPLE_REGS_ABI_64;
+}
index 1c0a9be2ffa85ad87245ac5837c94149e246533a..6391485f342daaac57e207f129a62e28d169b114 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/kallsyms.h>
 #include <linux/init.h>
 #include <linux/cpu.h>
-#include <linux/cpuidle.h>
 #include <linux/elfcore.h>
 #include <linux/pm.h>
 #include <linux/tick.h>
@@ -72,8 +71,17 @@ static void setup_restart(void)
 
 void soft_restart(unsigned long addr)
 {
+       typedef void (*phys_reset_t)(unsigned long);
+       phys_reset_t phys_reset;
+
        setup_restart();
-       cpu_reset(addr);
+
+       /* Switch to the identity mapping */
+       phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
+       phys_reset(addr);
+
+       /* Should never get here */
+       BUG();
 }
 
 /*
@@ -94,10 +102,8 @@ void arch_cpu_idle(void)
         * This should do all the clock switching and wait for interrupt
         * tricks
         */
-       if (cpuidle_idle_call()) {
-               cpu_do_idle();
-               local_irq_enable();
-       }
+       cpu_do_idle();
+       local_irq_enable();
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index 4f97db3d73633294f6a6310e54e626972debc9a2..ea4828a4aa96729993d85145603ed45afba7ac03 100644 (file)
@@ -176,22 +176,20 @@ static const struct of_device_id psci_of_match[] __initconst = {
        {},
 };
 
-int __init psci_init(void)
+void __init psci_init(void)
 {
        struct device_node *np;
        const char *method;
        u32 id;
-       int err = 0;
 
        np = of_find_matching_node(NULL, psci_of_match);
        if (!np)
-               return -ENODEV;
+               return;
 
        pr_info("probing function IDs from device-tree\n");
 
        if (of_property_read_string(np, "method", &method)) {
                pr_warning("missing \"method\" property\n");
-               err = -ENXIO;
                goto out_put_node;
        }
 
@@ -201,7 +199,6 @@ int __init psci_init(void)
                invoke_psci_fn = __invoke_psci_fn_smc;
        } else {
                pr_warning("invalid \"method\" property: %s\n", method);
-               err = -EINVAL;
                goto out_put_node;
        }
 
@@ -227,7 +224,7 @@ int __init psci_init(void)
 
 out_put_node:
        of_node_put(np);
-       return err;
+       return;
 }
 
 #ifdef CONFIG_SMP
@@ -251,7 +248,7 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
 {
        int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
        if (err)
-               pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
+               pr_err("failed to boot CPU%d (%d)\n", cpu, err);
 
        return err;
 }
@@ -278,7 +275,7 @@ static void cpu_psci_cpu_die(unsigned int cpu)
 
        ret = psci_ops.cpu_off(state);
 
-       pr_crit("psci: unable to power off CPU%u (%d)\n", cpu, ret);
+       pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
 }
 #endif
 
index c8e9effe52e10f6b59d39de293aca7607f7d5bce..67da30741a1b64a73ce085739209d7bf085ac2e9 100644 (file)
@@ -69,6 +69,7 @@ EXPORT_SYMBOL_GPL(elf_hwcap);
                                 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
                                 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 static const char *cpu_name;
@@ -242,6 +243,38 @@ static void __init setup_processor(void)
        block = (features >> 16) & 0xf;
        if (block && !(block & 0x8))
                elf_hwcap |= HWCAP_CRC32;
+
+#ifdef CONFIG_COMPAT
+       /*
+        * ID_ISAR5_EL1 carries similar information as above, but pertaining to
+        * the Aarch32 32-bit execution state.
+        */
+       features = read_cpuid(ID_ISAR5_EL1);
+       block = (features >> 4) & 0xf;
+       if (!(block & 0x8)) {
+               switch (block) {
+               default:
+               case 2:
+                       compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
+               case 1:
+                       compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
+               case 0:
+                       break;
+               }
+       }
+
+       block = (features >> 8) & 0xf;
+       if (block && !(block & 0x8))
+               compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
+
+       block = (features >> 12) & 0xf;
+       if (block && !(block & 0x8))
+               compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
+
+       block = (features >> 16) & 0xf;
+       if (block && !(block & 0x8))
+               compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
+#endif
 }
 
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
index 7cfb92a4ab66523212ec91392b6ee269fa0d97a9..f0a141dd5655817171605293d45429c740495661 100644 (file)
@@ -114,6 +114,11 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        return ret;
 }
 
+static void smp_store_cpu_info(unsigned int cpuid)
+{
+       store_cpu_topology(cpuid);
+}
+
 /*
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
@@ -152,6 +157,8 @@ asmlinkage void secondary_start_kernel(void)
         */
        notify_cpu_starting(cpu);
 
+       smp_store_cpu_info(cpu);
+
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
         * the CPU migration code to notice that the CPU is online
@@ -160,6 +167,7 @@ asmlinkage void secondary_start_kernel(void)
        set_cpu_online(cpu, true);
        complete(&cpu_running);
 
+       local_dbg_enable();
        local_irq_enable();
        local_async_enable();
 
@@ -390,6 +398,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        int err;
        unsigned int cpu, ncores = num_possible_cpus();
 
+       init_cpu_topology();
+
+       smp_store_cpu_info(smp_processor_id());
+
        /*
         * are we trying to boot more cores than exist?
         */
index 44c22805d2e2ad7e16895dc248bb9e861105fe07..7a530d2cc8077e7973938d1610d654644ae0ef1a 100644 (file)
@@ -128,7 +128,7 @@ static int smp_spin_table_cpu_boot(unsigned int cpu)
        return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
 }
 
-void smp_spin_table_cpu_postboot(void)
+static void smp_spin_table_cpu_postboot(void)
 {
        /*
         * Let the primary processor know we're out of the pen.
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
new file mode 100644 (file)
index 0000000..3e06b0b
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * arch/arm64/kernel/topology.c
+ *
+ * Copyright (C) 2011,2013,2014 Linaro Limited.
+ *
+ * Based on the arm32 version written by Vincent Guittot in turn based on
+ * arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/sched.h>
+
+#include <asm/topology.h>
+
+/*
+ * cpu topology table
+ */
+struct cpu_topology cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+       return &cpu_topology[cpu].core_sibling;
+}
+
+static void update_siblings_masks(unsigned int cpuid)
+{
+       struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+       int cpu;
+
+       if (cpuid_topo->cluster_id == -1) {
+               /*
+                * DT does not contain topology information for this cpu
+                * reset it to default behaviour
+                */
+               pr_debug("CPU%u: No topology information configured\n", cpuid);
+               cpuid_topo->core_id = 0;
+               cpumask_set_cpu(cpuid, &cpuid_topo->core_sibling);
+               cpumask_set_cpu(cpuid, &cpuid_topo->thread_sibling);
+               return;
+       }
+
+       /* update core and thread sibling masks */
+       for_each_possible_cpu(cpu) {
+               cpu_topo = &cpu_topology[cpu];
+
+               if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+                       continue;
+
+               cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+               if (cpu != cpuid)
+                       cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+               if (cpuid_topo->core_id != cpu_topo->core_id)
+                       continue;
+
+               cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+               if (cpu != cpuid)
+                       cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+       }
+}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+       update_siblings_masks(cpuid);
+}
+
+/*
+ * init_cpu_topology is called at boot when only one cpu is running
+ * which prevent simultaneous write access to cpu_topology array
+ */
+void __init init_cpu_topology(void)
+{
+       unsigned int cpu;
+
+       /* init core mask and power*/
+       for_each_possible_cpu(cpu) {
+               struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+               cpu_topo->thread_id = -1;
+               cpu_topo->core_id =  -1;
+               cpu_topo->cluster_id = -1;
+               cpumask_clear(&cpu_topo->core_sibling);
+               cpumask_clear(&cpu_topo->thread_sibling);
+       }
+}
index a7149cae16153bbacec32c420e1661b38b8aabd6..50384fec56c469b296dc5737bdf2733d2febb321 100644 (file)
@@ -106,49 +106,31 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
 
 static int __init vdso_init(void)
 {
-       struct page *pg;
-       char *vbase;
-       int i, ret = 0;
+       int i;
+
+       if (memcmp(&vdso_start, "\177ELF", 4)) {
+               pr_err("vDSO is not a valid ELF object!\n");
+               return -EINVAL;
+       }
 
        vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
        pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
                vdso_pages + 1, vdso_pages, 1L, &vdso_start);
 
        /* Allocate the vDSO pagelist, plus a page for the data. */
-       vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1),
+       vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
                                GFP_KERNEL);
-       if (vdso_pagelist == NULL) {
-               pr_err("Failed to allocate vDSO pagelist!\n");
+       if (vdso_pagelist == NULL)
                return -ENOMEM;
-       }
 
        /* Grab the vDSO code pages. */
-       for (i = 0; i < vdso_pages; i++) {
-               pg = virt_to_page(&vdso_start + i*PAGE_SIZE);
-               ClearPageReserved(pg);
-               get_page(pg);
-               vdso_pagelist[i] = pg;
-       }
-
-       /* Sanity check the shared object header. */
-       vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL);
-       if (vbase == NULL) {
-               pr_err("Failed to map vDSO pagelist!\n");
-               return -ENOMEM;
-       } else if (memcmp(vbase, "\177ELF", 4)) {
-               pr_err("vDSO is not a valid ELF object!\n");
-               ret = -EINVAL;
-               goto unmap;
-       }
+       for (i = 0; i < vdso_pages; i++)
+               vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
 
        /* Grab the vDSO data page. */
-       pg = virt_to_page(vdso_data);
-       get_page(pg);
-       vdso_pagelist[i] = pg;
+       vdso_pagelist[i] = virt_to_page(vdso_data);
 
-unmap:
-       vunmap(vbase);
-       return ret;
+       return 0;
 }
 arch_initcall(vdso_init);
 
index 2b0244d65c16f5c68dde0770f555bbadf4a5b073..d968796f4b2d7a88dda3605f0f16b9777879052b 100644 (file)
@@ -68,6 +68,12 @@ __do_hyp_init:
        msr     tcr_el2, x4
 
        ldr     x4, =VTCR_EL2_FLAGS
+       /*
+        * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
+        * VTCR_EL2.
+        */
+       mrs     x5, ID_AA64MMFR0_EL1
+       bfi     x4, x5, #16, #3
        msr     vtcr_el2, x4
 
        mrs     x4, mair_el1
index 1ea9f26d1b703585537d82cf30aa44ddbe879917..c46f48b33c1409d8ea6dfe3c48327a00b6376a06 100644 (file)
@@ -30,7 +30,7 @@
  *
  *     Corrupted registers: x0-x7, x9-x11
  */
-ENTRY(__flush_dcache_all)
+__flush_dcache_all:
        dsb     sy                              // ensure ordering with previous memory accesses
        mrs     x0, clidr_el1                   // read clidr
        and     x3, x0, #0x7000000              // extract loc from clidr
@@ -166,3 +166,81 @@ ENTRY(__flush_dcache_area)
        dsb     sy
        ret
 ENDPROC(__flush_dcache_area)
+
+/*
+ *     __dma_inv_range(start, end)
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
+ */
+__dma_inv_range:
+       dcache_line_size x2, x3
+       sub     x3, x2, #1
+       bic     x0, x0, x3
+       bic     x1, x1, x3
+1:     dc      ivac, x0                        // invalidate D / U line
+       add     x0, x0, x2
+       cmp     x0, x1
+       b.lo    1b
+       dsb     sy
+       ret
+ENDPROC(__dma_inv_range)
+
+/*
+ *     __dma_clean_range(start, end)
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
+ */
+__dma_clean_range:
+       dcache_line_size x2, x3
+       sub     x3, x2, #1
+       bic     x0, x0, x3
+1:     dc      cvac, x0                        // clean D / U line
+       add     x0, x0, x2
+       cmp     x0, x1
+       b.lo    1b
+       dsb     sy
+       ret
+ENDPROC(__dma_clean_range)
+
+/*
+ *     __dma_flush_range(start, end)
+ *     - start   - virtual start address of region
+ *     - end     - virtual end address of region
+ */
+ENTRY(__dma_flush_range)
+       dcache_line_size x2, x3
+       sub     x3, x2, #1
+       bic     x0, x0, x3
+1:     dc      civac, x0                       // clean & invalidate D / U line
+       add     x0, x0, x2
+       cmp     x0, x1
+       b.lo    1b
+       dsb     sy
+       ret
+ENDPROC(__dma_flush_range)
+
+/*
+ *     __dma_map_area(start, size, dir)
+ *     - start - kernel virtual start address
+ *     - size  - size of region
+ *     - dir   - DMA direction
+ */
+ENTRY(__dma_map_area)
+       add     x1, x1, x0
+       cmp     w2, #DMA_FROM_DEVICE
+       b.eq    __dma_inv_range
+       b       __dma_clean_range
+ENDPROC(__dma_map_area)
+
+/*
+ *     __dma_unmap_area(start, size, dir)
+ *     - start - kernel virtual start address
+ *     - size  - size of region
+ *     - dir   - DMA direction
+ */
+ENTRY(__dma_unmap_area)
+       add     x1, x1, x0
+       cmp     w2, #DMA_TO_DEVICE
+       b.ne    __dma_inv_range
+       ret
+ENDPROC(__dma_unmap_area)
index fbd76785c5db640bf511a9647380ebb1ae29b3ef..0ba347e59f06a7dbfe3fe7dcc884f9435c791d6e 100644 (file)
 struct dma_map_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
-                                         dma_addr_t *dma_handle, gfp_t flags,
-                                         struct dma_attrs *attrs)
+static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+                                bool coherent)
+{
+       if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+               return pgprot_writecombine(prot);
+       return prot;
+}
+
+static void *__dma_alloc_coherent(struct device *dev, size_t size,
+                                 dma_addr_t *dma_handle, gfp_t flags,
+                                 struct dma_attrs *attrs)
 {
        if (dev == NULL) {
                WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
                return NULL;
        }
 
-       if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+       if (IS_ENABLED(CONFIG_ZONE_DMA) &&
            dev->coherent_dma_mask <= DMA_BIT_MASK(32))
-               flags |= GFP_DMA32;
+               flags |= GFP_DMA;
        if (IS_ENABLED(CONFIG_DMA_CMA)) {
                struct page *page;
 
@@ -58,9 +66,9 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
        }
 }
 
-static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
-                                       void *vaddr, dma_addr_t dma_handle,
-                                       struct dma_attrs *attrs)
+static void __dma_free_coherent(struct device *dev, size_t size,
+                               void *vaddr, dma_addr_t dma_handle,
+                               struct dma_attrs *attrs)
 {
        if (dev == NULL) {
                WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
@@ -78,9 +86,212 @@ static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
        }
 }
 
-static struct dma_map_ops arm64_swiotlb_dma_ops = {
-       .alloc = arm64_swiotlb_alloc_coherent,
-       .free = arm64_swiotlb_free_coherent,
+static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
+                                    dma_addr_t *dma_handle, gfp_t flags,
+                                    struct dma_attrs *attrs)
+{
+       struct page *page, **map;
+       void *ptr, *coherent_ptr;
+       int order, i;
+
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
+
+       ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+       if (!ptr)
+               goto no_mem;
+       map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
+       if (!map)
+               goto no_map;
+
+       /* remove any dirty cache lines on the kernel alias */
+       __dma_flush_range(ptr, ptr + size);
+
+       /* create a coherent mapping */
+       page = virt_to_page(ptr);
+       for (i = 0; i < (size >> PAGE_SHIFT); i++)
+               map[i] = page + i;
+       coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
+                           __get_dma_pgprot(attrs, pgprot_default, false));
+       kfree(map);
+       if (!coherent_ptr)
+               goto no_map;
+
+       return coherent_ptr;
+
+no_map:
+       __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+no_mem:
+       *dma_handle = ~0;
+       return NULL;
+}
+
+static void __dma_free_noncoherent(struct device *dev, size_t size,
+                                  void *vaddr, dma_addr_t dma_handle,
+                                  struct dma_attrs *attrs)
+{
+       void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+
+       vunmap(vaddr);
+       __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+}
+
+static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
+                                    unsigned long offset, size_t size,
+                                    enum dma_data_direction dir,
+                                    struct dma_attrs *attrs)
+{
+       dma_addr_t dev_addr;
+
+       dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
+       __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+
+       return dev_addr;
+}
+
+
+static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
+                                size_t size, enum dma_data_direction dir,
+                                struct dma_attrs *attrs)
+{
+       __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+       swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+                                 int nelems, enum dma_data_direction dir,
+                                 struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i, ret;
+
+       ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
+       for_each_sg(sgl, sg, ret, i)
+               __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                              sg->length, dir);
+
+       return ret;
+}
+
+static void __swiotlb_unmap_sg_attrs(struct device *dev,
+                                    struct scatterlist *sgl, int nelems,
+                                    enum dma_data_direction dir,
+                                    struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nelems, i)
+               __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                                sg->length, dir);
+       swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
+}
+
+static void __swiotlb_sync_single_for_cpu(struct device *dev,
+                                         dma_addr_t dev_addr, size_t size,
+                                         enum dma_data_direction dir)
+{
+       __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+       swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
+}
+
+static void __swiotlb_sync_single_for_device(struct device *dev,
+                                            dma_addr_t dev_addr, size_t size,
+                                            enum dma_data_direction dir)
+{
+       swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
+       __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+}
+
+static void __swiotlb_sync_sg_for_cpu(struct device *dev,
+                                     struct scatterlist *sgl, int nelems,
+                                     enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nelems, i)
+               __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                                sg->length, dir);
+       swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
+}
+
+static void __swiotlb_sync_sg_for_device(struct device *dev,
+                                        struct scatterlist *sgl, int nelems,
+                                        enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
+       for_each_sg(sgl, sg, nelems, i)
+               __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                              sg->length, dir);
+}
+
+/* vma->vm_page_prot must be set appropriately before calling this function */
+static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                            void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       int ret = -ENXIO;
+       unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
+                                       PAGE_SHIFT;
+       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
+
+       if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+               ret = remap_pfn_range(vma, vma->vm_start,
+                                     pfn + off,
+                                     vma->vm_end - vma->vm_start,
+                                     vma->vm_page_prot);
+       }
+
+       return ret;
+}
+
+static int __swiotlb_mmap_noncoherent(struct device *dev,
+               struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               struct dma_attrs *attrs)
+{
+       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
+       return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+static int __swiotlb_mmap_coherent(struct device *dev,
+               struct vm_area_struct *vma,
+               void *cpu_addr, dma_addr_t dma_addr, size_t size,
+               struct dma_attrs *attrs)
+{
+       /* Just use whatever page_prot attributes were specified */
+       return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+struct dma_map_ops noncoherent_swiotlb_dma_ops = {
+       .alloc = __dma_alloc_noncoherent,
+       .free = __dma_free_noncoherent,
+       .mmap = __swiotlb_mmap_noncoherent,
+       .map_page = __swiotlb_map_page,
+       .unmap_page = __swiotlb_unmap_page,
+       .map_sg = __swiotlb_map_sg_attrs,
+       .unmap_sg = __swiotlb_unmap_sg_attrs,
+       .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = __swiotlb_sync_single_for_device,
+       .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device = __swiotlb_sync_sg_for_device,
+       .dma_supported = swiotlb_dma_supported,
+       .mapping_error = swiotlb_dma_mapping_error,
+};
+EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
+
+struct dma_map_ops coherent_swiotlb_dma_ops = {
+       .alloc = __dma_alloc_coherent,
+       .free = __dma_free_coherent,
+       .mmap = __swiotlb_mmap_coherent,
        .map_page = swiotlb_map_page,
        .unmap_page = swiotlb_unmap_page,
        .map_sg = swiotlb_map_sg_attrs,
@@ -92,12 +303,19 @@ static struct dma_map_ops arm64_swiotlb_dma_ops = {
        .dma_supported = swiotlb_dma_supported,
        .mapping_error = swiotlb_dma_mapping_error,
 };
+EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
+
+extern int swiotlb_late_init_with_default_size(size_t default_size);
 
-void __init arm64_swiotlb_init(void)
+static int __init swiotlb_late_init(void)
 {
-       dma_ops = &arm64_swiotlb_dma_ops;
-       swiotlb_init(1);
+       size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+
+       dma_ops = &coherent_swiotlb_dma_ops;
+
+       return swiotlb_late_init_with_default_size(swiotlb_size);
 }
+subsys_initcall(swiotlb_late_init);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
index d0b4c2efda90aa1ba8eaf2eabe8a977cb0fe2c9c..88627c450a6cbd3f5cdd0eb7056eeecaac76b6f2 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/memblock.h>
 #include <linux/sort.h>
 #include <linux/of_fdt.h>
+#include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
 
 #include <asm/sections.h>
@@ -59,22 +60,22 @@ static int __init early_initrd(char *p)
 early_param("initrd", early_initrd);
 #endif
 
-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
-
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
        struct memblock_region *reg;
        unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
-       unsigned long max_dma32 = min;
+       unsigned long max_dma = min;
 
        memset(zone_size, 0, sizeof(zone_size));
 
-#ifdef CONFIG_ZONE_DMA32
        /* 4GB maximum for 32-bit only capable devices */
-       max_dma32 = max(min, min(max, MAX_DMA32_PFN));
-       zone_size[ZONE_DMA32] = max_dma32 - min;
-#endif
-       zone_size[ZONE_NORMAL] = max - max_dma32;
+       if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+               unsigned long max_dma_phys =
+                       (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
+               max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+               zone_size[ZONE_DMA] = max_dma - min;
+       }
+       zone_size[ZONE_NORMAL] = max - max_dma;
 
        memcpy(zhole_size, zone_size, sizeof(zhole_size));
 
@@ -84,15 +85,15 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
                if (start >= max)
                        continue;
-#ifdef CONFIG_ZONE_DMA32
-               if (start < max_dma32) {
-                       unsigned long dma_end = min(end, max_dma32);
-                       zhole_size[ZONE_DMA32] -= dma_end - start;
+
+               if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
+                       unsigned long dma_end = min(end, max_dma);
+                       zhole_size[ZONE_DMA] -= dma_end - start;
                }
-#endif
-               if (end > max_dma32) {
+
+               if (end > max_dma) {
                        unsigned long normal_end = min(end, max);
-                       unsigned long normal_start = max(start, max_dma32);
+                       unsigned long normal_start = max(start, max_dma);
                        zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
                }
        }
@@ -261,8 +262,6 @@ static void __init free_unused_memmap(void)
  */
 void __init mem_init(void)
 {
-       arm64_swiotlb_init();
-
        max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
 
 #ifndef CONFIG_SPARSEMEM_VMEMMAP
index 1333e6f9a8e50bd7e8996a67a392feaa4aaf95e0..e085ee6ef4e23c146627798b9c7b98411a719f36 100644 (file)
@@ -173,12 +173,6 @@ ENDPROC(cpu_do_switch_mm)
  *     value of the SCTLR_EL1 register.
  */
 ENTRY(__cpu_setup)
-       /*
-        * Preserve the link register across the function call.
-        */
-       mov     x28, lr
-       bl      __flush_dcache_all
-       mov     lr, x28
        ic      iallu                           // I+BTB cache invalidate
        tlbi    vmalle1is                       // invalidate I + D TLBs
        dsb     sy
@@ -215,8 +209,14 @@ ENTRY(__cpu_setup)
         * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
         * both user and kernel.
         */
-       ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
+       ldr     x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \
                      TCR_ASID16 | TCR_TBI0 | (1 << 31)
+       /*
+        * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
+        * TCR_EL1.
+        */
+       mrs     x9, ID_AA64MMFR0_EL1
+       bfi     x10, x9, #32, #3
 #ifdef CONFIG_ARM64_64K_PAGES
        orr     x10, x10, TCR_TG0_64K
        orr     x10, x10, TCR_TG1_64K
index c7c64a63c29f470ebbec5071422d148ba9bc074a..00a0f3ccd6eb994a67071d8e544762475f1ca792 100644 (file)
@@ -1,22 +1,23 @@
 
-generic-y      += clkdev.h
-generic-y       += cputime.h
-generic-y       += delay.h
-generic-y       += device.h
-generic-y       += div64.h
-generic-y       += emergency-restart.h
-generic-y      += exec.h
-generic-y       += futex.h
-generic-y      += preempt.h
-generic-y       += irq_regs.h
-generic-y      += param.h
-generic-y       += local.h
-generic-y       += local64.h
-generic-y       += percpu.h
-generic-y       += scatterlist.h
-generic-y       += sections.h
-generic-y       += topology.h
-generic-y      += trace_clock.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += futex.h
+generic-y += hash.h
+generic-y += irq_regs.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += topology.h
+generic-y += trace_clock.h
 generic-y += vga.h
-generic-y       += xor.h
-generic-y      += hash.h
+generic-y += xor.h
index 7635e770622ee32dafb27f6368a588542791d51f..278661bbd1b0aefb081be0bdb7d15423cd27f3f3 100644 (file)
@@ -9,7 +9,7 @@
 
 static void __init check_bugs(void)
 {
-       cpu_data->loops_per_jiffy = loops_per_jiffy;
+       boot_cpu_data.loops_per_jiffy = loops_per_jiffy;
 }
 
 #endif /* __ASM_AVR32_BUGS_H */
index 48d71c5c898a18d083f8a55da807c103d7515458..972adcc1e8f40179d06a4062d2adf63416f36452 100644 (file)
@@ -83,13 +83,8 @@ static inline unsigned int avr32_get_chip_revision(struct avr32_cpuinfo *cpu)
 
 extern struct avr32_cpuinfo boot_cpu_data;
 
-#ifdef CONFIG_SMP
-extern struct avr32_cpuinfo cpu_data[];
-#define current_cpu_data cpu_data[smp_processor_id()]
-#else
-#define cpu_data (&boot_cpu_data)
+/* No SMP support so far */
 #define current_cpu_data boot_cpu_data
-#endif
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's
index 2233be71e2e8cf7e8b62789eb6a5a7ba864d001e..0341ae27c9ec57fd06eaf88776db6ae83444c96e 100644 (file)
@@ -39,10 +39,12 @@ static ssize_t store_pc0event(struct device *dev,
                              size_t count)
 {
        unsigned long val;
-       char *endp;
+       int ret;
 
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf || val > 0x3f)
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
+       if (val > 0x3f)
                return -EINVAL;
        val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff);
        sysreg_write(PCCR, val);
@@ -61,11 +63,11 @@ static ssize_t store_pc0count(struct device *dev,
                                const char *buf, size_t count)
 {
        unsigned long val;
-       char *endp;
+       int ret;
 
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
        sysreg_write(PCNT0, val);
 
        return count;
@@ -84,10 +86,12 @@ static ssize_t store_pc1event(struct device *dev,
                              size_t count)
 {
        unsigned long val;
-       char *endp;
+       int ret;
 
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf || val > 0x3f)
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
+       if (val > 0x3f)
                return -EINVAL;
        val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff);
        sysreg_write(PCCR, val);
@@ -106,11 +110,11 @@ static ssize_t store_pc1count(struct device *dev,
                              size_t count)
 {
        unsigned long val;
-       char *endp;
+       int ret;
 
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
        sysreg_write(PCNT1, val);
 
        return count;
@@ -129,11 +133,11 @@ static ssize_t store_pccycles(struct device *dev,
                              size_t count)
 {
        unsigned long val;
-       char *endp;
+       int ret;
 
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
        sysreg_write(PCCNT, val);
 
        return count;
@@ -152,11 +156,11 @@ static ssize_t store_pcenable(struct device *dev,
                              size_t count)
 {
        unsigned long pccr, val;
-       char *endp;
+       int ret;
 
-       val = simple_strtoul(buf, &endp, 0);
-       if (endp == buf)
-               return -EINVAL;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+               return ret;
        if (val)
                val = 1;
 
index 6a46ecd56cfd538c7fe070b641fad3e68ffc3d1f..85d635cd7b28f84045e5bf29e62e4aab53196f85 100644 (file)
@@ -111,6 +111,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
        __flush_icache_range(start & ~(linesz - 1),
                             (end + linesz - 1) & ~(linesz - 1));
 }
+EXPORT_SYMBOL(flush_icache_range);
 
 /*
  * This one is called from __do_fault() and do_swap_page().
index 359d36fdc2471f987ff561104e9dcb2723fdcf99..0d93b9a79ca9561399a152d25a3545a09496b858 100644 (file)
@@ -10,6 +10,7 @@ generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += fb.h
 generic-y += futex.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
@@ -17,14 +18,16 @@ generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
 generic-y += percpu.h
 generic-y += pgalloc.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sembuf.h
@@ -44,5 +47,3 @@ generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += user.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 2fd04f10cc266b86d6a88c786278c18fd2a604fb..89de539ed0100624ead2e5ea6d7f9790eed11cf2 100644 (file)
 /* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
 #include <mach/irq.h>
 
-/*
- * pm save bfin pint registers
- */
-struct adi_pm_pint_save {
-       u32 assign;
-       u32 edge_set;
-       u32 invert_set;
-};
-
 #if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
 # define NOP_PAD_ANOMALY_05000244 "nop; nop;"
 #else
index d73bb85ccdd3e9f7dbc49c5a3c67944f250c82a6..8dbdce8421b08c0e58322db87002ef314778cd1b 100644 (file)
@@ -15,6 +15,7 @@ generic-y += exec.h
 generic-y += fb.h
 generic-y += fcntl.h
 generic-y += futex.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += io.h
 generic-y += ioctl.h
@@ -24,6 +25,7 @@ generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += local.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += mmu.h
 generic-y += mmu_context.h
@@ -34,6 +36,7 @@ generic-y += percpu.h
 generic-y += pgalloc.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += segment.h
@@ -56,5 +59,3 @@ generic-y += ucontext.h
 generic-y += user.h
 generic-y += vga.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index f3fd8768f095cc55c606d22bf5281042a31234c2..afff5105909d953af7ca3667c773350fb4bc434d 100644 (file)
@@ -5,12 +5,14 @@ header-y += arch-v32/
 
 generic-y += barrier.h
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += exec.h
 generic-y += hash.h
 generic-y += kvm_para.h
 generic-y += linkage.h
+generic-y += mcs_spinlock.h
 generic-y += module.h
+generic-y += preempt.h
 generic-y += trace_clock.h
 generic-y += vga.h
 generic-y += xor.h
-generic-y += preempt.h
diff --git a/arch/cris/include/asm/cputime.h b/arch/cris/include/asm/cputime.h
deleted file mode 100644 (file)
index 4446a65..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __CRIS_CPUTIME_H
-#define __CRIS_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __CRIS_CPUTIME_H */
index bc42f14c9c2e706081c66b3805a9369d65e4bce9..87b95eb8aee53e3f5280f3b985da8b0c6cc75192 100644 (file)
@@ -1,6 +1,8 @@
 
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += exec.h
-generic-y += trace_clock.h
-generic-y += preempt.h
 generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/frv/include/asm/cputime.h b/arch/frv/include/asm/cputime.h
deleted file mode 100644 (file)
index f6c373a..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_CPUTIME_H
-#define _ASM_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* _ASM_CPUTIME_H */
index 38ca45d3df1e4d88230d1ab8f991510efb983450..eadcc118f9506d8fbfddfd5b0dc7e65c6f3f9673 100644 (file)
@@ -25,14 +25,16 @@ generic-y += ipcbuf.h
 generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
-generic-y += local64.h
 generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += pci.h
 generic-y += percpu.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += rwsem.h
 generic-y += scatterlist.h
@@ -45,8 +47,8 @@ generic-y += siginfo.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
-generic-y += statfs.h
 generic-y += stat.h
+generic-y += statfs.h
 generic-y += termbits.h
 generic-y += termios.h
 generic-y += topology.h
@@ -55,4 +57,3 @@ generic-y += types.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += xor.h
-generic-y += preempt.h
index efbd2929aeb75c563b5ccbf4b267be596ce921e2..b4efaf2bc13e9c104e075072dec02399542c53ec 100644 (file)
@@ -25,14 +25,13 @@ CONFIG_KEXEC=y
 CONFIG_CRASH_DUMP=y
 CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=m
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_DOCK=y
 CONFIG_ACPI_PROCESSOR=m
-CONFIG_ACPI_CONTAINER=m
+CONFIG_ACPI_CONTAINER=y
 CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_ACPI=m
+CONFIG_HOTPLUG_PCI_ACPI=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
index 0f4e9e41f130623baae948b022dbd01468869331..0fed9ae5a42a7a0630673efe62662b6973bf64fa 100644 (file)
@@ -26,7 +26,6 @@ CONFIG_IA64_PALINFO=y
 CONFIG_KEXEC=y
 CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=m
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_BUTTON=m
 CONFIG_ACPI_FAN=m
 CONFIG_ACPI_PROCESSOR=m
index fc7aba07c2b4f7064efa3e0e9704382e5495fdf6..54bc72eda30d4f5efefa3c3f9949fcba3155adc0 100644 (file)
@@ -16,7 +16,6 @@ CONFIG_IA64_PALINFO=y
 CONFIG_CRASH_DUMP=y
 CONFIG_EFI_VARS=y
 CONFIG_BINFMT_MISC=y
-CONFIG_ACPI_PROCFS=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
 CONFIG_PACKET=y
index 8e858b593e4f13cf79fb34fda2903531e02fd1dc..30c43d39dede068c9f54448bc8e1f90917fe0c84 100644 (file)
@@ -1596,7 +1596,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
 *
 ***************************************************************/
 
-static void __init
+static void
 ioc_iova_init(struct ioc *ioc)
 {
        int tcnfg;
@@ -1807,7 +1807,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
        { SX2000_IOC_ID, "sx2000", NULL },
 };
 
-static struct ioc * __init
+static struct ioc *
 ioc_init(unsigned long hpa, void *handle)
 {
        struct ioc *ioc;
@@ -2041,7 +2041,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
 #define sba_map_ioc_to_node(ioc, handle)
 #endif
 
-static int __init
+static int
 acpi_sba_ioc_add(struct acpi_device *device,
                 const struct acpi_device_id *not_used)
 {
index 283a83154b5eaaab7f8dc5474f0ec722e970a1de..0da4aa2602ae01d9badaca0f960e8d6b4c16ae2b 100644 (file)
@@ -1,8 +1,9 @@
 
 generic-y += clkdev.h
 generic-y += exec.h
+generic-y += hash.h
 generic-y += kvm_para.h
-generic-y += trace_clock.h
+generic-y += mcs_spinlock.h
 generic-y += preempt.h
+generic-y += trace_clock.h
 generic-y += vtime.h
-generic-y += hash.h
index a2496e449b75452038ab7e7b14e4bd2c796fd458..5cb55a1e606b0b5cbed352f1e7187c41d31371de 100644 (file)
@@ -77,7 +77,6 @@ void build_cpu_to_node_map(void);
 #define topology_core_id(cpu)                  (cpu_data(cpu)->core_id)
 #define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
 #define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
-#define smt_capable()                          (smp_num_siblings > 1)
 #endif
 
 extern void arch_fix_phys_package_id(int num, u32 slot);
index 07d209c9507f292d80c45edc7b7433d78a78602b..467497ade45f82745129c384a638a9ea6b75378a 100644 (file)
 #include <asm/sal.h>
 #include <asm/cyclone.h>
 
-#define BAD_MADT_ENTRY(entry, end) (                                        \
-               (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
-               ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-
 #define PREFIX                 "ACPI: "
 
 unsigned int acpi_cpei_override;
index da5b462e6de6c9e91c18d42756ff482bb9f70505..741b99c1a0b1483b153be8823ae681a141d71bf5 100644 (file)
@@ -477,6 +477,9 @@ efi_init (void)
        char *cp, vendor[100] = "unknown";
        int i;
 
+       set_bit(EFI_BOOT, &efi.flags);
+       set_bit(EFI_64BIT, &efi.flags);
+
        /*
         * It's too early to be able to use the standard kernel command line
         * support...
@@ -529,6 +532,8 @@ efi_init (void)
               efi.systab->hdr.revision >> 16,
               efi.systab->hdr.revision & 0xffff, vendor);
 
+       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
        palo_phys      = EFI_INVALID_TABLE_ADDR;
 
        if (efi_config_init(arch_tables) != 0)
@@ -657,6 +662,8 @@ efi_enter_virtual_mode (void)
                return;
        }
 
+       set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
        /*
         * Now that EFI is in virtual mode, we call the EFI functions more
         * efficiently:
index 1034884b77da428c59190f9841229fc3e3cb8f41..0884f5ecbcc3e97b86a5e4c248614359dbd7674c 100644 (file)
@@ -364,7 +364,6 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
 
 static struct irqaction irq_move_irqaction = {
        .handler =      smp_irq_move_cleanup_interrupt,
-       .flags =        IRQF_DISABLED,
        .name =         "irq_move"
 };
 
@@ -489,14 +488,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
        ia64_srlz_d();
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
                int irq = local_vector_to_irq(vector);
-               struct irq_desc *desc = irq_to_desc(irq);
 
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_incr_irqs_this_cpu(irq, desc);
+                       kstat_incr_irq_this_cpu(irq);
                } else if (unlikely(IS_RESCHEDULE(vector))) {
                        scheduler_ipi();
-                       kstat_incr_irqs_this_cpu(irq, desc);
+                       kstat_incr_irq_this_cpu(irq);
                } else {
                        ia64_setreg(_IA64_REG_CR_TPR, vector);
                        ia64_srlz_d();
@@ -549,13 +547,12 @@ void ia64_process_pending_intr(void)
          */
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
                int irq = local_vector_to_irq(vector);
-               struct irq_desc *desc = irq_to_desc(irq);
 
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_incr_irqs_this_cpu(irq, desc);
+                       kstat_incr_irq_this_cpu(irq);
                } else if (unlikely(IS_RESCHEDULE(vector))) {
-                       kstat_incr_irqs_this_cpu(irq, desc);
+                       kstat_incr_irq_this_cpu(irq);
                } else {
                        struct pt_regs *old_regs = set_irq_regs(NULL);
 
@@ -602,7 +599,6 @@ static irqreturn_t dummy_handler (int irq, void *dev_id)
 
 static struct irqaction ipi_irqaction = {
        .handler =      handle_IPI,
-       .flags =        IRQF_DISABLED,
        .name =         "IPI"
 };
 
@@ -611,13 +607,11 @@ static struct irqaction ipi_irqaction = {
  */
 static struct irqaction resched_irqaction = {
        .handler =      dummy_handler,
-       .flags =        IRQF_DISABLED,
        .name =         "resched"
 };
 
 static struct irqaction tlb_irqaction = {
        .handler =      dummy_handler,
-       .flags =        IRQF_DISABLED,
        .name =         "tlb_flush"
 };
 
index b8edfa75a83f9cd8bce4469d1d1c97b93b02d371..db7b36bb068b8d306042028560dbbaafcbf7664f 100644 (file)
@@ -217,7 +217,7 @@ void ia64_mca_printk(const char *fmt, ...)
        /* Copy the output into mlogbuf */
        if (oops_in_progress) {
                /* mlogbuf was abandoned, use printk directly instead. */
-               printk(temp_buf);
+               printk("%s", temp_buf);
        } else {
                spin_lock(&mlogbuf_wlock);
                for (p = temp_buf; *p; p++) {
@@ -268,7 +268,7 @@ void ia64_mlogbuf_dump(void)
                }
                *p = '\0';
                if (temp_buf[0])
-                       printk(temp_buf);
+                       printk("%s", temp_buf);
                mlogbuf_start = index;
 
                mlogbuf_timestamp = 0;
@@ -1772,38 +1772,32 @@ __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
 
 static struct irqaction cmci_irqaction = {
        .handler =      ia64_mca_cmc_int_handler,
-       .flags =        IRQF_DISABLED,
        .name =         "cmc_hndlr"
 };
 
 static struct irqaction cmcp_irqaction = {
        .handler =      ia64_mca_cmc_int_caller,
-       .flags =        IRQF_DISABLED,
        .name =         "cmc_poll"
 };
 
 static struct irqaction mca_rdzv_irqaction = {
        .handler =      ia64_mca_rendez_int_handler,
-       .flags =        IRQF_DISABLED,
        .name =         "mca_rdzv"
 };
 
 static struct irqaction mca_wkup_irqaction = {
        .handler =      ia64_mca_wakeup_int_handler,
-       .flags =        IRQF_DISABLED,
        .name =         "mca_wkup"
 };
 
 #ifdef CONFIG_ACPI
 static struct irqaction mca_cpe_irqaction = {
        .handler =      ia64_mca_cpe_int_handler,
-       .flags =        IRQF_DISABLED,
        .name =         "cpe_hndlr"
 };
 
 static struct irqaction mca_cpep_irqaction = {
        .handler =      ia64_mca_cpe_int_caller,
-       .flags =        IRQF_DISABLED,
        .name =         "cpe_poll"
 };
 #endif /* CONFIG_ACPI */
index fb2f1e622877e202f8bdb6be4d6922fa6431986f..c430f9198d1bfec6b50b9d6f21d2929ef55dcf45 100644 (file)
@@ -17,12 +17,9 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
 {
        struct msi_msg msg;
        u32 addr, data;
-       int cpu = first_cpu(*cpu_mask);
+       int cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
        unsigned int irq = idata->irq;
 
-       if (!cpu_online(cpu))
-               return -1;
-
        if (irq_prepare_move(irq, cpu))
                return -1;
 
@@ -139,10 +136,7 @@ static int dmar_msi_set_affinity(struct irq_data *data,
        unsigned int irq = data->irq;
        struct irq_cfg *cfg = irq_cfg + irq;
        struct msi_msg msg;
-       int cpu = cpumask_first(mask);
-
-       if (!cpu_online(cpu))
-               return -1;
+       int cpu = cpumask_first_and(mask, cpu_online_mask);
 
        if (irq_prepare_move(irq, cpu))
                return -1;
index cb592773c78b1ef1f86faa4a4190c507c4e9fdbf..d841c4bd6864907212cde6554957e65d27eaa02d 100644 (file)
@@ -6387,7 +6387,6 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
 
 static struct irqaction perfmon_irqaction = {
        .handler = pfm_interrupt_handler,
-       .flags   = IRQF_DISABLED,
        .name    = "perfmon"
 };
 
index fbaac1afb8441926d3f2fdc9eec4ed85c6122da1..71c52bc7c28d824ff3428675ac11410db654639d 100644 (file)
@@ -380,7 +380,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs)
 
 static struct irqaction timer_irqaction = {
        .handler =      timer_interrupt,
-       .flags =        IRQF_DISABLED | IRQF_IRQPOLL,
+       .flags =        IRQF_IRQPOLL,
        .name =         "timer"
 };
 
index 62cf4dde6a0426dafa12ce5a3b0b56f1ccb00b41..85d095154902453fb5896734597e849af07ea74e 100644 (file)
@@ -209,8 +209,8 @@ static int sn_set_affinity_irq(struct irq_data *data,
        nasid_t nasid;
        int slice;
 
-       nasid = cpuid_to_nasid(cpumask_first(mask));
-       slice = cpuid_to_slice(cpumask_first(mask));
+       nasid = cpuid_to_nasid(cpumask_first_and(mask, cpu_online_mask));
+       slice = cpuid_to_slice(cpumask_first_and(mask, cpu_online_mask));
 
        list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
                                 sn_irq_lh[irq], list)
index 2b98b9e088def963ac47033c4717fa5ea4f24075..afc58d2799adb7a8b7ccf5d454d90a26fff0a3dc 100644 (file)
@@ -166,7 +166,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data,
        struct sn_pcibus_provider *provider;
        unsigned int cpu, irq = data->irq;
 
-       cpu = cpumask_first(cpu_mask);
+       cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
        sn_irq_info = sn_msi_info[irq].sn_irq_info;
        if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
                return -1;
index 932435ac4e5ce4ff721b2c455de567c5c7603861..67779a74b62dbbe2e5841253acf145867b67c9ae 100644 (file)
@@ -1,7 +1,9 @@
 
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += exec.h
+generic-y += hash.h
+generic-y += mcs_spinlock.h
 generic-y += module.h
-generic-y += trace_clock.h
 generic-y += preempt.h
-generic-y += hash.h
+generic-y += trace_clock.h
diff --git a/arch/m32r/include/asm/cputime.h b/arch/m32r/include/asm/cputime.h
deleted file mode 100644 (file)
index 0a47550..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __M32R_CPUTIME_H
-#define __M32R_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __M32R_CPUTIME_H */
index dbdd2231c75ddc9fab61fd1b2dca7c1cc60a3e3e..b2e322939256f528bfeb042bc420cd0de2efc1a3 100644 (file)
@@ -17,6 +17,7 @@ config M68K
        select FPU if MMU
        select ARCH_WANT_IPC_PARSE_VERSION
        select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
+       select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
        select HAVE_MOD_ARCH_SPECIFIC
        select MODULES_USE_ELF_REL
        select MODULES_USE_ELF_RELA
index 18c0e29976e37475cf822b5b11d9bf32437791eb..2081b8cd5591c6d77385de8594e737c9d421f7be 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 
 #include <asm/irq.h>
 #include <asm/amigahw.h>
index 3e73a63c066f8a480ae543035c26cd5265f797ae..3d2b63bedf0589c67cdf0471007157bb913f18ac 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/init.h>
 #include <linux/seq_file.h>
 #include <linux/module.h>
+#include <linux/irq.h>
 
 #include <asm/traps.h>
 
index 559ff3af8ff7b1e8dd4a738eebe6321f1dc3ddd9..96da4963d14b21ef0b5daa023b4d16425eaebeaf 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
@@ -85,6 +87,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -94,6 +97,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -126,6 +131,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -163,8 +169,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -190,7 +194,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -512,7 +515,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index cb1f55df69b6c87b03dc2c1598b43fdbfd3fcfa0..1b8739f50cbf77631d0eb5fb25d22941c7233aad 100644 (file)
@@ -25,6 +25,8 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
@@ -83,6 +85,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -92,6 +95,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -124,6 +129,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -161,8 +167,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -188,7 +192,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -470,7 +473,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index e880cfbb62d9e419bcbf4a4d006ffd420939b4f1..6ea4e91f0caabed1a806afbaab092744f23c0c98 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
@@ -82,6 +84,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -91,6 +94,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -123,6 +128,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -160,8 +166,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -187,7 +191,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -487,7 +490,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 4aa4f45e52a8708853925fb678a19d4889f632f3..e5a12739ff2d3535e8d9ed0dc22df8720eb4689e 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
 CONFIG_M68060=y
 CONFIG_VME=y
@@ -81,6 +83,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -90,6 +93,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -122,6 +127,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -159,8 +165,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -186,7 +190,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -463,7 +466,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 7cd9d9f456fb61ab3619641b81ee8dbfbd975acf..8936d7fb0f0f15a983dcd6d0262301a7447e10b9 100644 (file)
@@ -25,6 +25,8 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
@@ -83,6 +85,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -92,6 +95,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -124,6 +129,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -161,8 +167,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -188,7 +192,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -472,7 +475,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 31f5bd061d1466f5367fa0b0dafc1ecaa8f4d521..be5342cca25b36180991f88a49d2bfe80f49ff2f 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
 CONFIG_M68030=y
 CONFIG_M68040=y
@@ -82,6 +84,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -91,6 +94,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -123,6 +128,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -160,8 +166,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -187,7 +191,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -495,7 +498,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 4e5adff326eec88bccff93bf83933f518b94328b..f27194ade167d96fb949b1e2a42e7e74a75f6e79 100644 (file)
@@ -20,6 +20,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68020=y
 CONFIG_M68040=y
 CONFIG_M68060=y
@@ -91,6 +93,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -100,6 +103,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -132,6 +137,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -169,8 +175,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -196,7 +200,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -571,7 +574,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 02cdbac5565e5153f0970409e8247535bf818fd7..c3887603c1db4a49196af9a52c2dc85efdc641be 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68030=y
 CONFIG_VME=y
 CONFIG_MVME147=y
@@ -80,6 +82,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -89,6 +92,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -121,6 +126,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -158,8 +164,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -185,7 +189,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -463,7 +466,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 05a990a9dbd46994c60488c6eefeb807aa19f287..f7ff784d05aca2498040a3b47ea477c5793ac22d 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
 CONFIG_M68060=y
 CONFIG_VME=y
@@ -81,6 +83,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -90,6 +93,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -122,6 +127,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -159,8 +165,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -186,7 +190,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -464,7 +467,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 568e2a98f976ee3c91900734df88e5d675c83811..f0c72ab037be1b268abcc720be5dda97fb128323 100644 (file)
@@ -25,6 +25,8 @@ CONFIG_SUN_PARTITION=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_M68040=y
 CONFIG_M68060=y
 CONFIG_Q40=y
@@ -81,6 +83,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -90,6 +93,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -122,6 +127,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -159,8 +165,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -186,7 +190,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -485,7 +488,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 60b0aeac5742e80f28c8345a856070638252897b..7bca0f464521707451d41686040fac2dfc4b2070 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
@@ -78,6 +80,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -87,6 +90,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -119,6 +124,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -156,8 +162,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -183,7 +187,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -464,7 +467,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 21bda331eebb04cfae90c5b94d869b18c9097d95..317f3e1fec954285880d0268bce5af93cc23be1e 100644 (file)
@@ -24,6 +24,8 @@ CONFIG_UNIXWARE_DISKLABEL=y
 # CONFIG_EFI_PARTITION is not set
 CONFIG_SYSV68_PARTITION=y
 CONFIG_IOSCHED_DEADLINE=m
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
 CONFIG_SUN3X=y
 # CONFIG_COMPACTION is not set
 CONFIG_CLEANCACHE=y
@@ -78,6 +80,7 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -87,6 +90,8 @@ CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -119,6 +124,7 @@ CONFIG_NETFILTER_XT_MATCH_DSCP=m
 CONFIG_NETFILTER_XT_MATCH_ESP=m
 CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
 CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
 CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
 CONFIG_NETFILTER_XT_MATCH_LENGTH=m
 CONFIG_NETFILTER_XT_MATCH_LIMIT=m
@@ -156,8 +162,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -183,7 +187,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -464,7 +467,6 @@ CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 # CONFIG_CRYPTO_HW is not set
-CONFIG_CRC_T10DIF=y
 CONFIG_XZ_DEC_X86=y
 CONFIG_XZ_DEC_POWERPC=y
 CONFIG_XZ_DEC_IA64=y
index 6fb9e813a91074cb8ae41205bb8f899bcc35cf38..c67c94a2d67229a461239a4335def5d90d597abe 100644 (file)
@@ -14,8 +14,9 @@ generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kvm_para.h
-generic-y += local64.h
 generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += mutex.h
 generic-y += percpu.h
index 4c99bab7e6647c0709ebacbff38b42f01ab24449..3ab329b8852173694bac06c2559841fed73d95ac 100644 (file)
 
 #ifdef CONFIG_FRAMEBUFFER_CONSOLE
 #define CONSOLE
-#define CONSOLE_PENGUIN
 #endif
 
 #ifdef CONFIG_EARLY_PRINTK
@@ -658,27 +657,6 @@ ENTRY(__start)
        movel   %a0@,%a1@
 #endif
 
-#if 0
-       /*
-        * Clear the screen
-        */
-       lea     %pc@(L(mac_videobase)),%a0
-       movel   %a0@,%a1
-       lea     %pc@(L(mac_dimensions)),%a0
-       movel   %a0@,%d1
-       swap    %d1             /* #rows is high bytes */
-       andl    #0xFFFF,%d1     /* rows */
-       subl    #10,%d1
-       lea     %pc@(L(mac_rowbytes)),%a0
-loopy2:
-       movel   %a0@,%d0
-       subql   #1,%d0
-loopx2:
-       moveb   #0x55, %a1@+
-       dbra    %d0,loopx2
-       dbra    %d1,loopy2
-#endif
-
 L(test_notmac):
 #endif /* CONFIG_MAC */
 
@@ -907,15 +885,15 @@ L(nothp):
  */
 #ifdef CONFIG_MAC
        is_not_mac(L(nocon))
-#ifdef CONSOLE
+#  ifdef CONSOLE
        console_init
-#ifdef CONSOLE_PENGUIN
+#    ifdef CONFIG_LOGO
        console_put_penguin
-#endif /* CONSOLE_PENGUIN */
+#    endif /* CONFIG_LOGO */
        console_put_stats
-#endif /* CONSOLE */
+#  endif /* CONSOLE */
 L(nocon):
-#endif /* CONFIG_MAC */
+#endif /* CONFIG_MAC */
 
 
        putc    '\n'
@@ -3324,14 +3302,13 @@ func_return     set_leds
 #define Lconsole_struct_num_columns    8
 #define Lconsole_struct_num_rows       12
 #define Lconsole_struct_left_edge      16
-#define Lconsole_struct_penguin_putc   20
 
 func_start     console_init,%a0-%a4/%d0-%d7
        /*
         *      Some of the register usage that follows
         *              a0 = pointer to boot_info
         *              a1 = pointer to screen
-        *              a2 = pointer to Lconsole_globals
+        *              a2 = pointer to console_globals
         *              d3 = pixel width of screen
         *              d4 = pixel height of screen
         *              (d3,d4) ~= (x,y) of a point just below
@@ -3456,7 +3433,7 @@ func_start        console_put_stats,%a0/%d7
 
 func_return    console_put_stats
 
-#ifdef CONSOLE_PENGUIN
+#ifdef CONFIG_LOGO
 func_start     console_put_penguin,%a0-%a1/%d0-%d7
        /*
         *      Get 'that_penguin' onto the screen in the upper right corner
@@ -3799,38 +3776,6 @@ L(console_plot_pixel_exit):
 func_return    console_plot_pixel
 #endif /* CONSOLE */
 
-#if 0
-/*
- * This is some old code lying around.  I don't believe
- * it's used or important anymore.  My guess is it contributed
- * to getting to this point, but it's done for now.
- * It was still in the 2.1.77 head.S, so it's still here.
- * (And still not used!)
- */
-L(showtest):
-       moveml  %a0/%d7,%sp@-
-       puts    "A="
-       putn    %a1
-
-       .long   0xf0119f15              | ptestr        #5,%a1@,#7,%a0
-
-       puts    "DA="
-       putn    %a0
-
-       puts    "D="
-       putn    %a0@
-
-       puts    "S="
-       lea     %pc@(L(mmu)),%a0
-       .long   0xf0106200              | pmove         %psr,%a0@
-       clrl    %d7
-       movew   %a0@,%d7
-       putn    %d7
-
-       putc    '\n'
-       moveml  %sp@+,%a0/%d7
-       rts
-#endif /* 0 */
 
 __INITDATA
        .align  4
@@ -3849,7 +3794,6 @@ L(console_globals):
        .long   0               /* max num columns */
        .long   0               /* max num rows */
        .long   0               /* left edge */
-       .long   0               /* mac putc */
 L(console_font):
        .long   0               /* pointer to console font (struct font_desc) */
 L(console_font_data):
index 077d3a70fed1995611f89e0f5795c40da07378df..5b8d66fbf3832a2c70d95d7841f4c5e38e9171cf 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/types.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/irq.h>
 
 #include <asm/setup.h>
 #include <asm/irq.h>
index b716d807c2ece0b222da935b6796f789b2486c97..c29ead89a31778da9d5f217b6cc984c1176a6772 100644 (file)
@@ -13,6 +13,7 @@ generic-y += fb.h
 generic-y += fcntl.h
 generic-y += futex.h
 generic-y += hardirq.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
@@ -23,6 +24,7 @@ generic-y += kmap_types.h
 generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
@@ -30,6 +32,7 @@ generic-y += pci.h
 generic-y += percpu.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += sembuf.h
@@ -52,5 +55,3 @@ generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 2b98bc73642afb4cd07c7c412c416c9a87cb69de..c98ed95c054185913aa3fb63dbf641d6984ba2d8 100644 (file)
@@ -1,8 +1,10 @@
 
 generic-y += barrier.h
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += exec.h
 generic-y += hash.h
-generic-y += trace_clock.h
-generic-y += syscalls.h
+generic-y += mcs_spinlock.h
 generic-y += preempt.h
+generic-y += syscalls.h
+generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/cputime.h b/arch/microblaze/include/asm/cputime.h
deleted file mode 100644 (file)
index 6d68ad7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>
index 2d7f65052c1f3ffe9c9f191539a196dff97339f0..05439187891dfa02e582dc1caa2b6abbbfb8395b 100644 (file)
@@ -2,16 +2,17 @@
 generic-y += cputime.h
 generic-y += current.h
 generic-y += emergency-restart.h
+generic-y += hash.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += mutex.h
 generic-y += parport.h
 generic-y += percpu.h
+generic-y += preempt.h
 generic-y += scatterlist.h
 generic-y += sections.h
 generic-y += segment.h
 generic-y += serial.h
 generic-y += trace_clock.h
-generic-y += preempt.h
 generic-y += ucontext.h
 generic-y += xor.h
-generic-y += hash.h
index 12609a17dc8b5893faacf8df47b954b10a140664..20ea4859c82259534425ceb3833aa098f7400cf2 100644 (file)
@@ -10,8 +10,4 @@
 
 #include <topology.h>
 
-#ifdef CONFIG_SMP
-#define smt_capable()  (smp_num_siblings > 1)
-#endif
-
 #endif /* __ASM_TOPOLOGY_H */
index 4d3b92886665799d2ca2b746d8efc310cc0c1168..413d6c612bec059e0e5579d3167d67e6bb8e7ad0 100644 (file)
@@ -24,7 +24,6 @@
 
 #ifndef __ASSEMBLY__
 
-#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_ALARM
 #define __ARCH_WANT_SYS_GETHOSTNAME
index dfc1b911be04b7480cb30809ba59583cc76598b6..c1681d65dd5c8e580cc26dbf896caa90ad857df9 100644 (file)
@@ -1007,7 +1007,7 @@ static void __irq_entry smtc_clock_tick_interrupt(void)
        int irq = MIPS_CPU_IRQ_BASE + 1;
 
        irq_enter();
-       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+       kstat_incr_irq_this_cpu(irq);
        cd = &per_cpu(mips_clockevent_device, cpu);
        cd->event_handler(cd);
        irq_exit();
index 3db64d51798ddb4afd1fd3893a270c954db0a921..58b40ae5933535bc869f76f4bfacc52fb5f705d4 100644 (file)
@@ -148,7 +148,7 @@ static void __irq_entry indy_buserror_irq(void)
        int irq = SGI_BUSERR_IRQ;
 
        irq_enter();
-       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+       kstat_incr_irq_this_cpu(irq);
        ip22_be_interrupt(irq);
        irq_exit();
 }
index 6071924493353c96c3c69560f90ab2488d49c617..045aa89f28d8c79fc8ffed9217f9e1a62e56e229 100644 (file)
@@ -123,7 +123,7 @@ void __irq_entry indy_8254timer_irq(void)
        char c;
 
        irq_enter();
-       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+       kstat_incr_irq_this_cpu(irq);
        printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
        ArcRead(0, &c, 1, &cnt);
        ArcEnterInteractiveMode();
index 09d6e16a70f14f0b294f99a4b193862cfde8dcca..59cfe2659771e058bf08823c8b6b83fa22a44fa7 100644 (file)
@@ -95,7 +95,7 @@ static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
        u64 cur_ints;
        unsigned long flags;
 
-       i = cpumask_first(mask);
+       i = cpumask_first_and(mask, cpu_online_mask);
 
        /* Convert logical CPU to physical CPU */
        cpu = cpu_logical_map(i);
index 54e2c4de15c1638706879b6356c85c13c10165a0..70d9182b26f155703bbf7eb74a781a61681f2dc3 100644 (file)
@@ -182,7 +182,7 @@ void bcm1480_mailbox_interrupt(void)
        int irq = K_BCM1480_INT_MBOX_0_0;
        unsigned int action;
 
-       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+       kstat_incr_irq_this_cpu(irq);
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
 
index fca0cdb99509a3484e11c022a9e4ec00bfda8dd8..6d8dba5cf3480af010e2f1807238a19a468884de 100644 (file)
@@ -88,7 +88,7 @@ static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
        u64 cur_ints;
        unsigned long flags;
 
-       i = cpumask_first(mask);
+       i = cpumask_first_and(mask, cpu_online_mask);
 
        /* Convert logical CPU to physical CPU */
        cpu = cpu_logical_map(i);
index d7b942db0ea519e0c5f6f8b39c69fa21589a5eaa..db976117dd4d77948547b2f34a7a1b7ddde2a15b 100644 (file)
@@ -170,7 +170,7 @@ void sb1250_mailbox_interrupt(void)
        int irq = K_INT_MBOX_0;
        unsigned int action;
 
-       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+       kstat_incr_irq_this_cpu(irq);
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
 
index 992e989ab7853d3f3e1be4bbb432cf4c2de8c7a2..654d5ba6e31077354f130f22a198c7b6adb45743 100644 (file)
@@ -1,7 +1,9 @@
 
 generic-y += barrier.h
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += exec.h
 generic-y += hash.h
-generic-y += trace_clock.h
+generic-y += mcs_spinlock.h
 generic-y += preempt.h
+generic-y += trace_clock.h
diff --git a/arch/mn10300/include/asm/cputime.h b/arch/mn10300/include/asm/cputime.h
deleted file mode 100644 (file)
index 6d68ad7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>
index ccce35e3e179150c87f5f94af2a2463333236a68..60f64ca1752aff964ee6274ed066ccf9744d5034 100644 (file)
@@ -113,7 +113,7 @@ int __init init_clockevents(void)
        cd->set_next_event      = next_event;
 
        iact = &per_cpu(timer_irq, cpu);
-       iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+       iact->flags = IRQF_SHARED | IRQF_TIMER;
        iact->handler = timer_interrupt;
 
        clockevents_register_device(cd);
index bf6e949a2f87a63f701f3a23a468574da4561cfe..7ecf69879e2d6ff077296a79605d8f367b70fdd4 100644 (file)
@@ -985,17 +985,17 @@ static int mn10300_serial_startup(struct uart_port *_port)
        irq_set_chip(port->tm_irq, &mn10300_serial_pic);
 
        if (request_irq(port->rx_irq, mn10300_serial_interrupt,
-                       IRQF_DISABLED | IRQF_NOBALANCING,
+                       IRQF_NOBALANCING,
                        port->rx_name, port) < 0)
                goto error;
 
        if (request_irq(port->tx_irq, mn10300_serial_interrupt,
-                       IRQF_DISABLED | IRQF_NOBALANCING,
+                       IRQF_NOBALANCING,
                        port->tx_name, port) < 0)
                goto error2;
 
        if (request_irq(port->tm_irq, mn10300_serial_interrupt,
-                       IRQF_DISABLED | IRQF_NOBALANCING,
+                       IRQF_NOBALANCING,
                        port->tm_name, port) < 0)
                goto error3;
        mn10300_serial_mask_ack(port->tm_irq);
index db64a7166c095850c6bf18fcadcdaa3a444859fe..a2d8e6938d6716bca53162a00775291bca58d227 100644 (file)
@@ -142,7 +142,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
        NMICR = NMICR_WDIF;
 
        nmi_count(smp_processor_id())++;
-       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+       kstat_incr_irq_this_cpu(irq);
 
        for_each_online_cpu(cpu) {
 
index a17f9c9c14c9f3b7115d41f4ad765ee70de3ba2a..f984193718b1003f03861e79cd137b4a8c23badd 100644 (file)
@@ -143,7 +143,7 @@ static struct irqaction call_function_ipi = {
 static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
 static struct irqaction local_timer_ipi = {
        .handler        = smp_ipi_timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_NOBALANCING,
+       .flags          = IRQF_NOBALANCING,
        .name           = "smp local timer IPI"
 };
 #endif
index e16c216f31dcdd3ce0c4279f6e921ce13db1f41e..073e2ccc4a441b419bc58c2207eba4f5e7eae81c 100644 (file)
@@ -76,7 +76,7 @@ static irqreturn_t fpga_interrupt(int irq, void *_mask)
 static struct irqaction fpga_irq[]  = {
        [0] = {
                .handler        = fpga_interrupt,
-               .flags          = IRQF_DISABLED | IRQF_SHARED,
+               .flags          = IRQF_SHARED,
                .name           = "fpga",
        },
 };
index 2e40f1ca86677ecff7f232beb909128aa509e9d3..480af0d9c2f5dbe20fb39dc6331648dacb043a97 100644 (file)
@@ -10,8 +10,8 @@ generic-y += bugs.h
 generic-y += cacheflush.h
 generic-y += checksum.h
 generic-y += clkdev.h
-generic-y += cmpxchg.h
 generic-y += cmpxchg-local.h
+generic-y += cmpxchg.h
 generic-y += cputime.h
 generic-y += current.h
 generic-y += device.h
@@ -25,6 +25,7 @@ generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += futex.h
 generic-y += hardirq.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
@@ -34,6 +35,7 @@ generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kvm_para.h
 generic-y += local.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += module.h
 generic-y += msgbuf.h
@@ -41,6 +43,7 @@ generic-y += pci.h
 generic-y += percpu.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sections.h
@@ -53,11 +56,11 @@ generic-y += siginfo.h
 generic-y += signal.h
 generic-y += socket.h
 generic-y += sockios.h
-generic-y += statfs.h
 generic-y += stat.h
+generic-y += statfs.h
 generic-y += string.h
-generic-y += switch_to.h
 generic-y += swab.h
+generic-y += switch_to.h
 generic-y += termbits.h
 generic-y += termios.h
 generic-y += topology.h
@@ -68,5 +71,3 @@ generic-y += user.h
 generic-y += vga.h
 generic-y += word-at-a-time.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 752c981bc3c7fc36dbd0fb01705f4c691e7ee6bd..ecf25e6678ad5a2f0e2bd63761699e85c9d2d759 100644 (file)
@@ -1,9 +1,29 @@
 
+generic-y += auxvec.h
 generic-y += barrier.h
-generic-y += word-at-a-time.h auxvec.h user.h cputime.h emergency-restart.h \
-         segment.h topology.h vga.h device.h percpu.h hw_irq.h mutex.h \
-         div64.h irq_regs.h kdebug.h kvm_para.h local64.h local.h param.h \
-         poll.h xor.h clkdev.h exec.h
-generic-y += trace_clock.h
-generic-y += preempt.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += exec.h
 generic-y += hash.h
+generic-y += hw_irq.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += preempt.h
+generic-y += segment.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
index 8ceac4785609279079e321384ec8e6b59a2b4cc7..cfe056fe7f5c636f45f642950a01e0ce4cb7e536 100644 (file)
@@ -117,7 +117,7 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
                return -EINVAL;
 
        /* whatever mask they set, we just allow one CPU */
-       cpu_dest = first_cpu(*dest);
+       cpu_dest = cpumask_first_and(dest, cpu_online_mask);
 
        return cpu_dest;
 }
index 6c0a955a1b06277d05b7bf159da202279b2038e9..3fb1bc432f4f6106f1a892ce02113ec68970af6a 100644 (file)
@@ -1,7 +1,8 @@
 
 generic-y += clkdev.h
+generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
 generic-y += rwsem.h
 generic-y += trace_clock.h
-generic-y += preempt.h
 generic-y += vtime.h
-generic-y += hash.h
index d0b5fca6b0776fc8ef220483b91b1557c73a0835..c9202151079f2b1d6e99af4569b82ce68c785573 100644 (file)
@@ -99,7 +99,6 @@ static inline int prrn_is_enabled(void)
 
 #ifdef CONFIG_SMP
 #include <asm/cputable.h>
-#define smt_capable()          (cpu_has_feature(CPU_FTR_SMT))
 
 #ifdef CONFIG_PPC64
 #include <asm/smp.h>
index fdc679d309ec4c030d6f407d35675fb1b2527473..bb61ca58ca6d8272955d7023852f97cbe180c676 100644 (file)
@@ -143,13 +143,30 @@ static void eeh_disable_irq(struct pci_dev *dev)
 static void eeh_enable_irq(struct pci_dev *dev)
 {
        struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
-       struct irq_desc *desc;
 
        if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
                edev->mode &= ~EEH_DEV_IRQ_DISABLED;
-
-               desc = irq_to_desc(dev->irq);
-               if (desc && desc->depth > 0)
+               /*
+                * FIXME !!!!!
+                *
+                * This is just ass backwards. This maze has
+                * unbalanced irq_enable/disable calls. So instead of
+                * finding the root cause it works around the warning
+                * in the irq_enable code by conditionally calling
+                * into it.
+                *
+                * That's just wrong.The warning in the core code is
+                * there to tell people to fix their assymetries in
+                * their own code, not by abusing the core information
+                * to avoid it.
+                *
+                * I so wish that the assymetry would be the other way
+                * round and a few more irq_disable calls render that
+                * shit unusable forever.
+                *
+                *      tglx
+                */
+               if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
                        enable_irq(dev->irq);
        }
 }
index 1d0848bba049bf2c97bd09d9d86864857af128a7..ca1cd7459c4a3e60c73de7f4169484b03804cdbc 100644 (file)
@@ -465,7 +465,6 @@ static inline void check_stack_overflow(void)
 
 void __do_irq(struct pt_regs *regs)
 {
-       struct irq_desc *desc;
        unsigned int irq;
 
        irq_enter();
@@ -487,11 +486,8 @@ void __do_irq(struct pt_regs *regs)
        /* And finally process it */
        if (unlikely(irq == NO_IRQ))
                __get_cpu_var(irq_stat).spurious_irqs++;
-       else {
-               desc = irq_to_desc(irq);
-               if (likely(desc))
-                       desc->handle_irq(irq, desc);
-       }
+       else
+               generic_handle_irq(irq);
 
        trace_irq_exit(regs);
 
index 1f0ebdeea5f77cdd63a844680912242d8b0a7574..863d89386f607dd86a3edb08d594448b3b5fe260 100644 (file)
@@ -1121,8 +1121,7 @@ oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
        int ret = 0;
        struct cpufreq_freqs *frq = data;
        if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
-           (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
-           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
+           (val == CPUFREQ_POSTCHANGE && frq->old > frq->new))
                set_spu_profiling_frequency(frq->new, spu_cycle_reset);
        return ret;
 }
index 49318385d4fac71e6122588da95c895a18d9198d..4a0a64fe25df274a16051b668a3497b4632e8bf1 100644 (file)
@@ -83,7 +83,6 @@ static struct timer_list spuloadavg_timer;
 #define MIN_SPU_TIMESLICE      max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
 #define DEF_SPU_TIMESLICE      (100 * HZ / (1000 * SPUSCHED_TICK))
 
-#define MAX_USER_PRIO          (MAX_PRIO - MAX_RT_PRIO)
 #define SCALE_PRIO(x, prio) \
        max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
 
index 110f4fbd319f628373522e99672a95dcffda6688..81a7a0a79be75e0c8035baa992a6a674546c94eb 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/of_fdt.h>
 #include <linux/interrupt.h>
 #include <linux/bug.h>
-#include <linux/cpuidle.h>
 #include <linux/pci.h>
 
 #include <asm/machdep.h>
@@ -225,16 +224,6 @@ static int __init pnv_probe(void)
        return 1;
 }
 
-void powernv_idle(void)
-{
-       /* Hook to cpuidle framework if available, else
-        * call on default platform idle code
-        */
-       if (cpuidle_idle_call()) {
-               power7_idle();
-       }
-}
-
 define_machine(powernv) {
        .name                   = "PowerNV",
        .probe                  = pnv_probe,
@@ -244,7 +233,7 @@ define_machine(powernv) {
        .show_cpuinfo           = pnv_show_cpuinfo,
        .progress               = pnv_progress,
        .machine_shutdown       = pnv_shutdown,
-       .power_save             = powernv_idle,
+       .power_save             = power7_idle,
        .calibrate_decr         = generic_calibrate_decr,
        .dma_set_mask           = pnv_dma_set_mask,
 #ifdef CONFIG_KEXEC
index 972df0ffd4dcc4dff154fe84ada40705b173b9f7..2db8cc691bf49dcd5b72b150c48bde8ec440f841 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/irq.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
-#include <linux/cpuidle.h>
 #include <linux/of.h>
 #include <linux/kexec.h>
 
@@ -356,29 +355,24 @@ early_initcall(alloc_dispatch_log_kmem_cache);
 
 static void pseries_lpar_idle(void)
 {
-       /* This would call on the cpuidle framework, and the back-end pseries
-        * driver to  go to idle states
+       /*
+        * Default handler to go into low thread priority and possibly
+        * low power mode by cedeing processor to hypervisor
         */
-       if (cpuidle_idle_call()) {
-               /* On error, execute default handler
-                * to go into low thread priority and possibly
-                * low power mode by cedeing processor to hypervisor
-                */
 
-               /* Indicate to hypervisor that we are idle. */
-               get_lppaca()->idle = 1;
+       /* Indicate to hypervisor that we are idle. */
+       get_lppaca()->idle = 1;
 
-               /*
-                * Yield the processor to the hypervisor.  We return if
-                * an external interrupt occurs (which are driven prior
-                * to returning here) or if a prod occurs from another
-                * processor. When returning here, external interrupts
-                * are enabled.
-                */
-               cede_processor();
+       /*
+        * Yield the processor to the hypervisor.  We return if
+        * an external interrupt occurs (which are driven prior
+        * to returning here) or if a prod occurs from another
+        * processor. When returning here, external interrupts
+        * are enabled.
+        */
+       cede_processor();
 
-               get_lppaca()->idle = 0;
-       }
+       get_lppaca()->idle = 0;
 }
 
 /*
index b74085cea1af2565f3aea17f127943eb03ba7fec..2d20f10a42030394581c001e9f19793ebe4667bd 100644 (file)
@@ -28,8 +28,6 @@
 #include <asm/ehv_pic.h>
 #include <asm/fsl_hcalls.h>
 
-#include "../../../kernel/irq/settings.h"
-
 static struct ehv_pic *global_ehv_pic;
 static DEFINE_SPINLOCK(ehv_pic_lock);
 
@@ -113,17 +111,13 @@ static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
 int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
 {
        unsigned int src = virq_to_hw(d->irq);
-       struct irq_desc *desc = irq_to_desc(d->irq);
        unsigned int vecpri, vold, vnew, prio, cpu_dest;
        unsigned long flags;
 
        if (flow_type == IRQ_TYPE_NONE)
                flow_type = IRQ_TYPE_LEVEL_LOW;
 
-       irq_settings_clr_level(desc);
-       irq_settings_set_trigger_mask(desc, flow_type);
-       if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
-               irq_settings_set_level(desc);
+       irqd_set_trigger_type(d, flow_type);
 
        vecpri = ehv_pic_type_to_vecpri(flow_type);
 
@@ -144,7 +138,7 @@ int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
        ev_int_set_config(src, vecpri, prio, cpu_dest);
 
        spin_unlock_irqrestore(&ehv_pic_lock, flags);
-       return 0;
+       return IRQ_SET_MASK_OK_NOCOPY;
 }
 
 static struct irq_chip ehv_pic_irq_chip = {
index 65a07750f4f946f038bb2e2a4e13f9f6d05dce83..953f17c8d17cde75f73ef78ebc59aa0235e9fde0 100644 (file)
@@ -117,6 +117,7 @@ config S390
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZ4
@@ -140,6 +141,7 @@ config S390
        select OLD_SIGACTION
        select OLD_SIGSUSPEND3
        select SYSCTL_EXCEPTION_TRACE
+       select TTY
        select VIRT_CPU_ACCOUNTING
        select VIRT_TO_BUS
 
@@ -415,6 +417,10 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
 config ARCH_ENABLE_MEMORY_HOTREMOVE
        def_bool y
 
+config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+       def_bool y
+       depends on 64BIT
+
 config FORCE_MAX_ZONEORDER
        int
        default "9"
index de8e2b3b0180e0650dd5a100f9e57a9b89e99359..69b23b25ac34a4f3f6c6faeb305648ef5a7e5b63 100644 (file)
@@ -171,7 +171,7 @@ static int __init appldata_os_init(void)
        int rc, max_size;
 
        max_size = sizeof(struct appldata_os_data) +
-                  (NR_CPUS * sizeof(struct appldata_os_per_cpu));
+                  (num_possible_cpus() * sizeof(struct appldata_os_per_cpu));
        if (max_size > APPLDATA_MAX_REC_SIZE) {
                pr_err("Maximum OS record size %i exceeds the maximum "
                       "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
index e0af2ee587511d023abf72a35b3353668da921ca..ddaae2f5c9137d0155ef5d5e943b40d097471881 100644 (file)
@@ -46,6 +46,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_MARCH_Z9_109=y
+CONFIG_NR_CPUS=256
 CONFIG_PREEMPT=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -58,7 +59,6 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
 CONFIG_CRASH_DUMP=y
-CONFIG_ZFCPDUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -101,7 +101,6 @@ CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -111,6 +110,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=m
 CONFIG_INET6_XFRM_MODE_TUNNEL=m
 CONFIG_INET6_XFRM_MODE_BEET=m
 CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_VTI=m
 CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
@@ -135,7 +135,17 @@ CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CT_NETLINK_TIMEOUT=m
-CONFIG_NETFILTER_TPROXY=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -204,7 +214,9 @@ CONFIG_IP_SET_HASH_IP=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
 CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
@@ -227,6 +239,11 @@ CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -249,6 +266,9 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -268,6 +288,7 @@ CONFIG_IP6_NF_SECURITY=m
 CONFIG_NF_NAT_IPV6=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_NET_SCTPPROBE=m
 CONFIG_RDS=m
 CONFIG_RDS_RDMA=m
@@ -314,6 +335,7 @@ CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
 CONFIG_NET_CLS_FLOW=m
 CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_BPF=m
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_GACT=m
@@ -381,8 +403,8 @@ CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_MIRROR=m
-CONFIG_DM_RAID=m
 CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
@@ -434,7 +456,6 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
-CONFIG_ZVM_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
@@ -534,13 +555,23 @@ CONFIG_UNUSED_SYMBOLS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_SELFTEST=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
 CONFIG_SLUB_DEBUG_ON=y
 CONFIG_SLUB_STATS=y
+CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_STACK_USAGE=y
 CONFIG_DEBUG_VM=y
 CONFIG_DEBUG_VM_RB=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_RT_MUTEX_TESTER=y
@@ -573,9 +604,11 @@ CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
 CONFIG_LKDTM=m
+CONFIG_TEST_LIST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
-CONFIG_RBTREE_TEST=m
+CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_DMA_API_DEBUG=y
 # CONFIG_STRICT_DEVMEM is not set
@@ -638,7 +671,6 @@ CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_ASYMMETRIC_KEY_TYPE=m
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_PUBLIC_KEY_ALGO_RSA=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
index b9f6b4cab927e79618600f806351441083e89428..c81a74e3e25a698340a3fae730ec3a5c27324508 100644 (file)
@@ -46,6 +46,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_MARCH_Z9_109=y
+CONFIG_NR_CPUS=256
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
@@ -56,7 +57,6 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
 CONFIG_CRASH_DUMP=y
-CONFIG_ZFCPDUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -99,7 +99,6 @@ CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -109,6 +108,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=m
 CONFIG_INET6_XFRM_MODE_TUNNEL=m
 CONFIG_INET6_XFRM_MODE_BEET=m
 CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_VTI=m
 CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
@@ -133,7 +133,17 @@ CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CT_NETLINK_TIMEOUT=m
-CONFIG_NETFILTER_TPROXY=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -202,7 +212,9 @@ CONFIG_IP_SET_HASH_IP=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
 CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
@@ -225,6 +237,11 @@ CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -247,6 +264,9 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -266,6 +286,7 @@ CONFIG_IP6_NF_SECURITY=m
 CONFIG_NF_NAT_IPV6=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_NET_SCTPPROBE=m
 CONFIG_RDS=m
 CONFIG_RDS_RDMA=m
@@ -311,6 +332,7 @@ CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
 CONFIG_NET_CLS_FLOW=m
 CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_BPF=m
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_GACT=m
@@ -378,8 +400,8 @@ CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_MIRROR=m
-CONFIG_DM_RAID=m
 CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
@@ -431,7 +453,6 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
-CONFIG_ZVM_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
@@ -540,6 +561,7 @@ CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_LKDTM=m
 CONFIG_RBTREE_TEST=m
 CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
@@ -601,7 +623,6 @@ CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_ASYMMETRIC_KEY_TYPE=m
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_PUBLIC_KEY_ALGO_RSA=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
index 91087b43e8fa3d1016f5cd27fb5e10caa03483e2..b5ba8fe1cc6487c7cb67a6948b3d23895888950d 100644 (file)
@@ -44,6 +44,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_MARCH_Z9_109=y
+CONFIG_NR_CPUS=256
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
@@ -54,7 +55,6 @@ CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
 CONFIG_CRASH_DUMP=y
-CONFIG_ZFCPDUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -97,7 +97,6 @@ CONFIG_TCP_CONG_VENO=m
 CONFIG_TCP_CONG_YEAH=m
 CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
@@ -107,6 +106,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=m
 CONFIG_INET6_XFRM_MODE_TUNNEL=m
 CONFIG_INET6_XFRM_MODE_BEET=m
 CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_VTI=m
 CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
@@ -131,7 +131,17 @@ CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CT_NETLINK_TIMEOUT=m
-CONFIG_NETFILTER_TPROXY=m
+CONFIG_NF_TABLES=m
+CONFIG_NFT_EXTHDR=m
+CONFIG_NFT_META=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_RBTREE=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_COMPAT=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -200,7 +210,9 @@ CONFIG_IP_SET_HASH_IP=m
 CONFIG_IP_SET_HASH_IPPORT=m
 CONFIG_IP_SET_HASH_IPPORTIP=m
 CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NETPORTNET=m
 CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETNET=m
 CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
@@ -223,6 +235,11 @@ CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_NF_TABLES_IPV4=m
+CONFIG_NFT_REJECT_IPV4=m
+CONFIG_NFT_CHAIN_ROUTE_IPV4=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NF_TABLES_ARP=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -245,6 +262,9 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_NF_TABLES_IPV6=m
+CONFIG_NFT_CHAIN_ROUTE_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -264,6 +284,7 @@ CONFIG_IP6_NF_SECURITY=m
 CONFIG_NF_NAT_IPV6=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_NET_SCTPPROBE=m
 CONFIG_RDS=m
 CONFIG_RDS_RDMA=m
@@ -309,6 +330,7 @@ CONFIG_NET_CLS_RSVP=m
 CONFIG_NET_CLS_RSVP6=m
 CONFIG_NET_CLS_FLOW=m
 CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_BPF=m
 CONFIG_NET_CLS_ACT=y
 CONFIG_NET_ACT_POLICE=m
 CONFIG_NET_ACT_GACT=m
@@ -376,8 +398,8 @@ CONFIG_BLK_DEV_DM=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_MIRROR=m
-CONFIG_DM_RAID=m
 CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_RAID=m
 CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
@@ -429,7 +451,6 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
-CONFIG_ZVM_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
@@ -532,6 +553,7 @@ CONFIG_LATENCYTOP=y
 CONFIG_BLK_DEV_IO_TRACE=y
 # CONFIG_KPROBE_EVENT is not set
 CONFIG_LKDTM=m
+CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_S390_PTDUMP=y
@@ -593,7 +615,6 @@ CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_ASYMMETRIC_KEY_TYPE=m
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_PUBLIC_KEY_ALGO_RSA=m
 CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
index d725c4d956e4551aecfdba78d0c02e857e7ad7b4..cef073ca1f07f2aca39ece9e52f4ce16468ee0b4 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_HZ_100=y
 # CONFIG_CHSC_SCH is not set
 # CONFIG_SCM_BUS is not set
 CONFIG_CRASH_DUMP=y
-CONFIG_ZFCPDUMP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SECCOMP is not set
 # CONFIG_IUCV is not set
index 33f57514f4245a3835438801c99e11c749ac33a3..4557cb7ffddf80bda691e9254a831f275220473c 100644 (file)
@@ -40,6 +40,7 @@ CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
 CONFIG_MARCH_Z196=y
+CONFIG_NR_CPUS=256
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
@@ -122,22 +123,31 @@ CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DETECT_HUNG_TASK=y
 CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_PROVE_RCU=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_RCU_TRACE=y
 CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_KPROBES_SANITY_TEST=y
 # CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
index 24908ce149f15c89f1fbf5f4c8cc0b09b8a9ec13..32040ace00ea2431a18428dca5c34c0c4ebde10c 100644 (file)
@@ -32,7 +32,7 @@ struct diag2fc_data {
        __u32 pcpus;
        __u32 lcpus;
        __u32 vcpus;
-       __u32 cpu_min;
+       __u32 ocpus;
        __u32 cpu_max;
        __u32 cpu_shares;
        __u32 cpu_use_samp;
@@ -142,7 +142,12 @@ static int hpyfs_vm_create_guest(struct dentry *systems_dir,
        ATTRIBUTE(cpus_dir, "capped", capped_value);
        ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
        ATTRIBUTE(cpus_dir, "count", data->vcpus);
-       ATTRIBUTE(cpus_dir, "weight_min", data->cpu_min);
+       /*
+        * Note: The "weight_min" attribute got the wrong name.
+        * The value represents the number of non-stopped (operating)
+        * CPUS.
+        */
+       ATTRIBUTE(cpus_dir, "weight_min", data->ocpus);
        ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
        ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
 
index 8386a4a1f19a35ba4cf1e93259b3bca911928408..57892a8a905584d79fd112a7c2e67a319a3f9e45 100644 (file)
@@ -1,6 +1,7 @@
 
 
 generic-y += clkdev.h
-generic-y += trace_clock.h
-generic-y += preempt.h
 generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
+generic-y += trace_clock.h
index 4bbb5957ed1b6db504cec7328100af49d70607e3..bd93ff6661b809643e4dcd3e94747c4464a763a2 100644 (file)
@@ -44,11 +44,21 @@ struct airq_iv {
 
 struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
 void airq_iv_release(struct airq_iv *iv);
-unsigned long airq_iv_alloc_bit(struct airq_iv *iv);
-void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit);
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
 unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
                           unsigned long end);
 
+static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
+{
+       return airq_iv_alloc(iv, 1);
+}
+
+static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
+{
+       airq_iv_free(iv, bit, 1);
+}
+
 static inline unsigned long airq_iv_end(struct airq_iv *iv)
 {
        return iv->end;
index 6e6ad06808293b7e88949351f647e516af8f16b2..ec5ef891db6bb8f159bc25a8c98679ea27fb4cb8 100644 (file)
@@ -13,9 +13,9 @@
  *
  * The bitop functions are defined to work on unsigned longs, so for an
  * s390x system the bits end up numbered:
- *   |63..............0|127............64|191...........128|255...........196|
+ *   |63..............0|127............64|191...........128|255...........192|
  * and on s390:
- *   |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *   |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
  *
  * There are a few little-endian macros used mostly for filesystem
  * bitmaps, these work on similar bit arrays layouts, but
@@ -30,7 +30,7 @@
  * on an s390x system the bits are numbered:
  *   |0..............63|64............127|128...........191|192...........255|
  * and on s390:
- *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *   |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
  *
  * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
  * number field needs to be reversed compared to the LSB0 encoded bit
@@ -304,7 +304,7 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
  * On an s390x system the bits are numbered:
  *   |0..............63|64............127|128...........191|192...........255|
  * and on s390:
- *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *   |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
  */
 unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
 unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
index f201af8be580ddc876de2f5de96952aaaf493cb5..a9c2c06861772f63b52988cc98afa80058fc0f22 100644 (file)
@@ -219,7 +219,9 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
 #define to_ccwdev(n) container_of(n, struct ccw_device, dev)
 #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
 
-extern struct ccw_device *ccw_device_probe_console(void);
+extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
+extern void ccw_device_destroy_console(struct ccw_device *);
+extern int ccw_device_enable_console(struct ccw_device *);
 extern void ccw_device_wait_idle(struct ccw_device *);
 extern int ccw_device_force_console(struct ccw_device *);
 
index 4f57a4f3909a1682822a61976cbc4e01f417dda5..7403648563554604e19b6cfbd0873bee868c7644 100644 (file)
@@ -44,22 +44,15 @@ csum_partial(const void *buff, int len, __wsum sum)
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  *
- * Copy from userspace and compute checksum.  If we catch an exception
- * then zero the rest of the buffer.
+ * Copy from userspace and compute checksum.
  */
 static inline __wsum
 csum_partial_copy_from_user(const void __user *src, void *dst,
                                           int len, __wsum sum,
                                           int *err_ptr)
 {
-       int missing;
-
-       missing = copy_from_user(dst, src, len);
-       if (missing) {
-               memset(dst + len - missing, 0, missing);
+       if (unlikely(copy_from_user(dst, src, len)))
                *err_ptr = -EFAULT;
-       }
-               
        return csum_partial(dst, len, sum);
 }
 
index 5d7e8cf83bd6c7d53b5f2a34fc2120add4cfa2d8..d350ed9d0fbb2e2955cade45aea5d7632716bfb9 100644 (file)
@@ -8,7 +8,11 @@
 #include <linux/thread_info.h>
 
 #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
-#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v))
+
+#define __SC_DELOUSE(t,v) ({ \
+       BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
+       (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
+})
 
 #define PSW32_MASK_PER         0x40000000UL
 #define PSW32_MASK_DAT         0x04000000UL
index 51bcaa0fdeefaa33b289a521b6cd95be411ada27..fda46bd38c99a7b529ce71925e862efdc3c88a5d 100644 (file)
@@ -5,7 +5,10 @@
 #include <linux/uaccess.h>
 #include <asm/errno.h>
 
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
+int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
        int cmp = (encoded_op >> 24) & 15;
@@ -17,7 +20,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
                oparg = 1 << oparg;
 
        pagefault_disable();
-       ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval);
+       ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
        pagefault_enable();
 
        if (!ret) {
@@ -34,10 +37,4 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
        return ret;
 }
 
-static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
-                                               u32 oldval, u32 newval)
-{
-       return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
-}
-
 #endif /* _ASM_S390_FUTEX_H */
index eef3dd3fd9a9f76d105b2c92d266dc5079c7833f..9bf95bb30f1a6cf27d0494396168f87e23cf8a65 100644 (file)
@@ -106,7 +106,9 @@ struct kvm_s390_sie_block {
        __u64   gbea;                   /* 0x0180 */
        __u8    reserved188[24];        /* 0x0188 */
        __u32   fac;                    /* 0x01a0 */
-       __u8    reserved1a4[68];        /* 0x01a4 */
+       __u8    reserved1a4[20];        /* 0x01a4 */
+       __u64   cbrlo;                  /* 0x01b8 */
+       __u8    reserved1c0[40];        /* 0x01c0 */
        __u64   itdba;                  /* 0x01e8 */
        __u8    reserved1f0[16];        /* 0x01f0 */
 } __attribute__((packed));
@@ -155,6 +157,7 @@ struct kvm_vcpu_stat {
        u32 instruction_stsi;
        u32 instruction_stfl;
        u32 instruction_tprot;
+       u32 instruction_essa;
        u32 instruction_sigp_sense;
        u32 instruction_sigp_sense_running;
        u32 instruction_sigp_external_call;
index 5d1f950704dc6272ec368279b2a01f82274fe220..38149b63dc44a360ab3b0db2dfca4f23e64cc63a 100644 (file)
@@ -48,13 +48,42 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
 {
-       cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
-       update_mm(next, tsk);
+       int cpu = smp_processor_id();
+
+       if (prev == next)
+               return;
+       if (atomic_inc_return(&next->context.attach_count) >> 16) {
+               /* Delay update_mm until all TLB flushes are done. */
+               set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
+       } else {
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+               update_mm(next, tsk);
+               if (next->context.flush_mm)
+                       /* Flush pending TLBs */
+                       __tlb_flush_mm(next);
+       }
        atomic_dec(&prev->context.attach_count);
        WARN_ON(atomic_read(&prev->context.attach_count) < 0);
-       atomic_inc(&next->context.attach_count);
-       /* Check for TLBs not flushed yet */
-       __tlb_flush_mm_lazy(next);
+}
+
+#define finish_arch_post_lock_switch finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+
+       if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
+               return;
+       preempt_disable();
+       clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
+       while (atomic_read(&mm->context.attach_count) >> 16)
+               cpu_relax();
+
+       cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+       update_mm(mm, tsk);
+       if (mm->context.flush_mm)
+               __tlb_flush_mm(mm);
+       preempt_enable();
 }
 
 #define enter_lazy_tlb(mm,tsk) do { } while (0)
index e1408ddb94f8d6d5107561374bb759a37c5d8eb1..884017cbfa9fade412372f7f781e503b3f39513b 100644 (file)
@@ -22,6 +22,7 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
 void page_table_free(struct mm_struct *, unsigned long *);
 void page_table_free_rcu(struct mmu_gather *, unsigned long *);
 
+void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long);
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned long key, bool nq);
 
@@ -91,11 +92,22 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
 {
        unsigned long *table = crst_table_alloc(mm);
-       if (table)
-               crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
+
+       if (!table)
+               return NULL;
+       crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
+       if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
+               crst_table_free(mm, table);
+               return NULL;
+       }
        return (pmd_t *) table;
 }
-#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
+
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+       pgtable_pmd_page_dtor(virt_to_page(pmd));
+       crst_table_free(mm, (unsigned long *) pmd);
+}
 
 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 {
index 2204400d0bd58d4a1e45c82394ff5cbd100aa2cc..1ab75eaacbd417079d3e6de25e8401c87e24a1d7 100644 (file)
@@ -229,6 +229,7 @@ extern unsigned long MODULES_END;
 #define _PAGE_READ     0x010           /* SW pte read bit */
 #define _PAGE_WRITE    0x020           /* SW pte write bit */
 #define _PAGE_SPECIAL  0x040           /* SW associated with special page */
+#define _PAGE_UNUSED   0x080           /* SW bit for pgste usage state */
 #define __HAVE_ARCH_PTE_SPECIAL
 
 /* Set of bits not changed in pte_modify */
@@ -394,6 +395,12 @@ extern unsigned long MODULES_END;
 
 #endif /* CONFIG_64BIT */
 
+/* Guest Page State used for virtualization */
+#define _PGSTE_GPS_ZERO                0x0000000080000000UL
+#define _PGSTE_GPS_USAGE_MASK  0x0000000003000000UL
+#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
+#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
+
 /*
  * A user page table pointer has the space-switch-event bit, the
  * private-space-control bit and the storage-alteration-event-control
@@ -617,6 +624,14 @@ static inline int pte_none(pte_t pte)
        return pte_val(pte) == _PAGE_INVALID;
 }
 
+static inline int pte_swap(pte_t pte)
+{
+       /* Bit pattern: (pte & 0x603) == 0x402 */
+       return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
+                               _PAGE_TYPE | _PAGE_PRESENT))
+               == (_PAGE_INVALID | _PAGE_TYPE);
+}
+
 static inline int pte_file(pte_t pte)
 {
        /* Bit pattern: (pte & 0x601) == 0x600 */
@@ -821,20 +836,20 @@ unsigned long gmap_translate(unsigned long address, struct gmap *);
 unsigned long __gmap_fault(unsigned long address, struct gmap *);
 unsigned long gmap_fault(unsigned long address, struct gmap *);
 void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
+void __gmap_zap(unsigned long address, struct gmap *);
 
 void gmap_register_ipte_notifier(struct gmap_notifier *);
 void gmap_unregister_ipte_notifier(struct gmap_notifier *);
 int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
-void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *);
+void gmap_do_ipte_notify(struct mm_struct *, pte_t *);
 
 static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
-                                       unsigned long addr,
                                        pte_t *ptep, pgste_t pgste)
 {
 #ifdef CONFIG_PGSTE
        if (pgste_val(pgste) & PGSTE_IN_BIT) {
                pgste_val(pgste) &= ~PGSTE_IN_BIT;
-               gmap_do_ipte_notify(mm, addr, ptep);
+               gmap_do_ipte_notify(mm, ptep);
        }
 #endif
        return pgste;
@@ -852,6 +867,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 
        if (mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
+               pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
                pgste_set_key(ptep, pgste, entry);
                pgste_set_pte(ptep, entry);
                pgste_set_unlock(ptep, pgste);
@@ -881,6 +897,12 @@ static inline int pte_young(pte_t pte)
        return (pte_val(pte) & _PAGE_YOUNG) != 0;
 }
 
+#define __HAVE_ARCH_PTE_UNUSED
+static inline int pte_unused(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_UNUSED;
+}
+
 /*
  * pgd/pmd/pte modification functions
  */
@@ -1034,30 +1056,41 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
 
 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
 {
-       if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+       unsigned long pto = (unsigned long) ptep;
+
 #ifndef CONFIG_64BIT
-               /* pto must point to the start of the segment table */
-               pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
-#else
-               /* ipte in zarch mode can do the math */
-               pte_t *pto = ptep;
+       /* pto in ESA mode must point to the start of the segment table */
+       pto &= 0x7ffffc00;
 #endif
-               asm volatile(
-                       "       ipte    %2,%3"
-                       : "=m" (*ptep) : "m" (*ptep),
-                         "a" (pto), "a" (address));
-       }
+       /* Invalidation + global TLB flush for the pte */
+       asm volatile(
+               "       ipte    %2,%3"
+               : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
+}
+
+static inline void ptep_flush_direct(struct mm_struct *mm,
+                                    unsigned long address, pte_t *ptep)
+{
+       if (pte_val(*ptep) & _PAGE_INVALID)
+               return;
+       __ptep_ipte(address, ptep);
 }
 
 static inline void ptep_flush_lazy(struct mm_struct *mm,
                                   unsigned long address, pte_t *ptep)
 {
-       int active = (mm == current->active_mm) ? 1 : 0;
+       int active, count;
 
-       if (atomic_read(&mm->context.attach_count) > active)
-               __ptep_ipte(address, ptep);
-       else
+       if (pte_val(*ptep) & _PAGE_INVALID)
+               return;
+       active = (mm == current->active_mm) ? 1 : 0;
+       count = atomic_add_return(0x10000, &mm->context.attach_count);
+       if ((count & 0xffff) <= active) {
+               pte_val(*ptep) |= _PAGE_INVALID;
                mm->context.flush_mm = 1;
+       } else
+               __ptep_ipte(address, ptep);
+       atomic_sub(0x10000, &mm->context.attach_count);
 }
 
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -1070,11 +1103,11 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
 
        if (mm_has_pgste(vma->vm_mm)) {
                pgste = pgste_get_lock(ptep);
-               pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
+               pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
        }
 
        pte = *ptep;
-       __ptep_ipte(addr, ptep);
+       ptep_flush_direct(vma->vm_mm, addr, ptep);
        young = pte_young(pte);
        pte = pte_mkold(pte);
 
@@ -1116,7 +1149,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 
        if (mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
-               pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+               pgste = pgste_ipte_notify(mm, ptep, pgste);
        }
 
        pte = *ptep;
@@ -1140,12 +1173,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
 
        if (mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
-               pgste_ipte_notify(mm, address, ptep, pgste);
+               pgste_ipte_notify(mm, ptep, pgste);
        }
 
        pte = *ptep;
        ptep_flush_lazy(mm, address, ptep);
-       pte_val(*ptep) |= _PAGE_INVALID;
 
        if (mm_has_pgste(mm)) {
                pgste = pgste_update_all(&pte, pgste);
@@ -1178,14 +1210,17 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
 
        if (mm_has_pgste(vma->vm_mm)) {
                pgste = pgste_get_lock(ptep);
-               pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
+               pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
        }
 
        pte = *ptep;
-       __ptep_ipte(address, ptep);
+       ptep_flush_direct(vma->vm_mm, address, ptep);
        pte_val(*ptep) = _PAGE_INVALID;
 
        if (mm_has_pgste(vma->vm_mm)) {
+               if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
+                   _PGSTE_GPS_USAGE_UNUSED)
+                       pte_val(pte) |= _PAGE_UNUSED;
                pgste = pgste_update_all(&pte, pgste);
                pgste_set_unlock(ptep, pgste);
        }
@@ -1209,7 +1244,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 
        if (!full && mm_has_pgste(mm)) {
                pgste = pgste_get_lock(ptep);
-               pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+               pgste = pgste_ipte_notify(mm, ptep, pgste);
        }
 
        pte = *ptep;
@@ -1234,7 +1269,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
        if (pte_write(pte)) {
                if (mm_has_pgste(mm)) {
                        pgste = pgste_get_lock(ptep);
-                       pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+                       pgste = pgste_ipte_notify(mm, ptep, pgste);
                }
 
                ptep_flush_lazy(mm, address, ptep);
@@ -1260,10 +1295,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
                return 0;
        if (mm_has_pgste(vma->vm_mm)) {
                pgste = pgste_get_lock(ptep);
-               pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste);
+               pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
        }
 
-       __ptep_ipte(address, ptep);
+       ptep_flush_direct(vma->vm_mm, address, ptep);
 
        if (mm_has_pgste(vma->vm_mm)) {
                pgste_set_pte(ptep, entry);
@@ -1447,12 +1482,16 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
 static inline void pmdp_flush_lazy(struct mm_struct *mm,
                                   unsigned long address, pmd_t *pmdp)
 {
-       int active = (mm == current->active_mm) ? 1 : 0;
+       int active, count;
 
-       if ((atomic_read(&mm->context.attach_count) & 0xffff) > active)
-               __pmd_idte(address, pmdp);
-       else
+       active = (mm == current->active_mm) ? 1 : 0;
+       count = atomic_add_return(0x10000, &mm->context.attach_count);
+       if ((count & 0xffff) <= active) {
+               pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
                mm->context.flush_mm = 1;
+       } else
+               __pmd_idte(address, pmdp);
+       atomic_sub(0x10000, &mm->context.attach_count);
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 9c82cebddabd78938b1d66baef004c74f3bd97ec..f4783c0b7b43cfd4e58c2c4bfed19013f465fb1b 100644 (file)
@@ -83,6 +83,7 @@ struct per_struct_kernel {
  * These are defined as per linux/ptrace.h, which see.
  */
 #define arch_has_single_step() (1)
+#define arch_has_block_step()  (1)
 
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
 #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
index abaca2275c7a5acb3f0f730539e4d8d5fd6cd19f..2f5e9932b4defddda4587c6593492712f2fb85c1 100644 (file)
@@ -46,6 +46,7 @@ int sclp_cpu_configure(u8 cpu);
 int sclp_cpu_deconfigure(u8 cpu);
 unsigned long long sclp_get_rnmax(void);
 unsigned long long sclp_get_rzm(void);
+unsigned int sclp_get_max_cpu(void);
 int sclp_sdias_blk_count(void);
 int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
 int sclp_chp_configure(struct chp_id chpid);
index 94cfbe442f124cc720f2436c7e3c34d485cf0139..406f3a1e63efcce54ccbe6c409d6246bbdb7a887 100644 (file)
@@ -59,7 +59,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
 #define MACHINE_FLAG_DIAG44    (1UL << 4)
 #define MACHINE_FLAG_IDTE      (1UL << 5)
 #define MACHINE_FLAG_DIAG9C    (1UL << 6)
-#define MACHINE_FLAG_MVCOS     (1UL << 7)
 #define MACHINE_FLAG_KVM       (1UL << 8)
 #define MACHINE_FLAG_ESOP      (1UL << 9)
 #define MACHINE_FLAG_EDAT1     (1UL << 10)
@@ -85,7 +84,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
 #define MACHINE_HAS_IDTE       (0)
 #define MACHINE_HAS_DIAG44     (1)
 #define MACHINE_HAS_MVPG       (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
-#define MACHINE_HAS_MVCOS      (0)
 #define MACHINE_HAS_EDAT1      (0)
 #define MACHINE_HAS_EDAT2      (0)
 #define MACHINE_HAS_LPP                (0)
@@ -98,7 +96,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
 #define MACHINE_HAS_IDTE       (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
 #define MACHINE_HAS_DIAG44     (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
 #define MACHINE_HAS_MVPG       (1)
-#define MACHINE_HAS_MVCOS      (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
 #define MACHINE_HAS_EDAT1      (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
 #define MACHINE_HAS_EDAT2      (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
 #define MACHINE_HAS_LPP                (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
index 10e0fcd3633d178f25b299a709b9c80706f05818..3ccd71b903454a667ec116a21fa0678a6f80dafe 100644 (file)
@@ -81,6 +81,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_NOTIFY_RESUME      1       /* callback before returning to user */
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
+#define TIF_TLB_WAIT           4       /* wait for TLB flush completion */
 #define TIF_PER_TRAP           6       /* deliver sigtrap on return to user */
 #define TIF_MCCK_PENDING       7       /* machine check handling is pending */
 #define TIF_SYSCALL_TRACE      8       /* syscall trace active */
@@ -91,11 +92,13 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    19      /* restore signal mask in do_signal() */
 #define TIF_SINGLE_STEP                20      /* This task is single stepped */
+#define TIF_BLOCK_STEP         21      /* This task is block stepped */
 
 #define _TIF_SYSCALL           (1<<TIF_SYSCALL)
 #define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
+#define _TIF_TLB_WAIT          (1<<TIF_TLB_WAIT)
 #define _TIF_PER_TRAP          (1<<TIF_PER_TRAP)
 #define _TIF_MCCK_PENDING      (1<<TIF_MCCK_PENDING)
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
index 79330af9a5f85442745110001defbaa2a1964bb8..4133b3f72fb09a04c9f640cd214ef4a21a69c9db 100644 (file)
@@ -92,33 +92,58 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
 #define ARCH_HAS_SORT_EXTABLE
 #define ARCH_HAS_SEARCH_EXTABLE
 
-struct uaccess_ops {
-       size_t (*copy_from_user)(size_t, const void __user *, void *);
-       size_t (*copy_to_user)(size_t, void __user *, const void *);
-       size_t (*copy_in_user)(size_t, void __user *, const void __user *);
-       size_t (*clear_user)(size_t, void __user *);
-       size_t (*strnlen_user)(size_t, const char __user *);
-       size_t (*strncpy_from_user)(size_t, const char __user *, char *);
-       int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
-       int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
-};
+int __handle_fault(unsigned long, unsigned long, int);
 
-extern struct uaccess_ops uaccess;
-extern struct uaccess_ops uaccess_mvcos;
-extern struct uaccess_ops uaccess_pt;
+/**
+ * __copy_from_user: - Copy a block of data from user space, with less checking.
+ * @to:   Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n:   Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Copy data from user space to kernel space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long __must_check __copy_from_user(void *to, const void __user *from,
+                                           unsigned long n);
+
+/**
+ * __copy_to_user: - Copy a block of data into user space, with less checking.
+ * @to:   Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n:   Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Copy data from kernel space to user space.  Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long __must_check __copy_to_user(void __user *to, const void *from,
+                                         unsigned long n);
 
-extern int __handle_fault(unsigned long, unsigned long, int);
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
 
-static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
+static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
 {
-       size = uaccess.copy_to_user(size, ptr, x);
-       return size ? -EFAULT : size;
+       size = __copy_to_user(ptr, x, size);
+       return size ? -EFAULT : 0;
 }
 
-static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
 {
-       size = uaccess.copy_from_user(size, ptr, x);
-       return size ? -EFAULT : size;
+       size = __copy_from_user(x, ptr, size);
+       return size ? -EFAULT : 0;
 }
 
 /*
@@ -135,8 +160,8 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
        case 2:                                                 \
        case 4:                                                 \
        case 8:                                                 \
-               __pu_err = __put_user_fn(sizeof (*(ptr)),       \
-                                        ptr, &__x);            \
+               __pu_err = __put_user_fn(&__x, ptr,             \
+                                        sizeof(*(ptr)));       \
                break;                                          \
        default:                                                \
                __put_user_bad();                               \
@@ -152,7 +177,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
 })
 
 
-extern int __put_user_bad(void) __attribute__((noreturn));
+int __put_user_bad(void) __attribute__((noreturn));
 
 #define __get_user(x, ptr)                                     \
 ({                                                             \
@@ -161,29 +186,29 @@ extern int __put_user_bad(void) __attribute__((noreturn));
        switch (sizeof(*(ptr))) {                               \
        case 1: {                                               \
                unsigned char __x;                              \
-               __gu_err = __get_user_fn(sizeof (*(ptr)),       \
-                                        ptr, &__x);            \
+               __gu_err = __get_user_fn(&__x, ptr,             \
+                                        sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 2: {                                               \
                unsigned short __x;                             \
-               __gu_err = __get_user_fn(sizeof (*(ptr)),       \
-                                        ptr, &__x);            \
+               __gu_err = __get_user_fn(&__x, ptr,             \
+                                        sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 4: {                                               \
                unsigned int __x;                               \
-               __gu_err = __get_user_fn(sizeof (*(ptr)),       \
-                                        ptr, &__x);            \
+               __gu_err = __get_user_fn(&__x, ptr,             \
+                                        sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
        case 8: {                                               \
                unsigned long long __x;                         \
-               __gu_err = __get_user_fn(sizeof (*(ptr)),       \
-                                        ptr, &__x);            \
+               __gu_err = __get_user_fn(&__x, ptr,             \
+                                        sizeof(*(ptr)));       \
                (x) = *(__force __typeof__(*(ptr)) *) &__x;     \
                break;                                          \
        };                                                      \
@@ -200,34 +225,11 @@ extern int __put_user_bad(void) __attribute__((noreturn));
        __get_user(x, ptr);                                     \
 })
 
-extern int __get_user_bad(void) __attribute__((noreturn));
+int __get_user_bad(void) __attribute__((noreturn));
 
 #define __put_user_unaligned __put_user
 #define __get_user_unaligned __get_user
 
-/**
- * __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to:   Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from kernel space to user space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-static inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-       return uaccess.copy_to_user(n, to, from);
-}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
 /**
  * copy_to_user: - Copy a block of data into user space.
  * @to:   Destination address, in user space.
@@ -248,30 +250,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        return __copy_to_user(to, from, n);
 }
 
-/**
- * __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to:   Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n:    Number of bytes to copy.
- *
- * Context: User context only.  This function may sleep.
- *
- * Copy data from user space to kernel space.  Caller must check
- * the specified block with access_ok() before calling this function.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-static inline unsigned long __must_check
-__copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-       return uaccess.copy_from_user(n, from, to);
-}
-
-extern void copy_from_user_overflow(void)
+void copy_from_user_overflow(void)
 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 __compiletime_warning("copy_from_user() buffer size is not provably correct")
 #endif
@@ -306,11 +285,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
        return __copy_from_user(to, from, n);
 }
 
-static inline unsigned long __must_check
-__copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
-       return uaccess.copy_in_user(n, to, from);
-}
+unsigned long __must_check
+__copy_in_user(void __user *to, const void __user *from, unsigned long n);
 
 static inline unsigned long __must_check
 copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -322,18 +298,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
 /*
  * Copy a null terminated string from userspace.
  */
+
+long __strncpy_from_user(char *dst, const char __user *src, long count);
+
 static inline long __must_check
 strncpy_from_user(char *dst, const char __user *src, long count)
 {
        might_fault();
-       return uaccess.strncpy_from_user(count, src, dst);
+       return __strncpy_from_user(dst, src, count);
 }
 
-static inline unsigned long
-strnlen_user(const char __user * src, unsigned long n)
+unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
+
+static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
 {
        might_fault();
-       return uaccess.strnlen_user(n, src);
+       return __strnlen_user(src, n);
 }
 
 /**
@@ -355,21 +335,14 @@ strnlen_user(const char __user * src, unsigned long n)
 /*
  * Zero Userspace
  */
+unsigned long __must_check __clear_user(void __user *to, unsigned long size);
 
-static inline unsigned long __must_check
-__clear_user(void __user *to, unsigned long n)
-{
-       return uaccess.clear_user(n, to);
-}
-
-static inline unsigned long __must_check
-clear_user(void __user *to, unsigned long n)
+static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
 {
        might_fault();
-       return uaccess.clear_user(n, to);
+       return __clear_user(to, n);
 }
 
-extern int copy_to_user_real(void __user *dest, void *src, size_t count);
-extern int copy_from_user_real(void *dest, void __user *src, size_t count);
+int copy_to_user_real(void __user *dest, void *src, unsigned long count);
 
 #endif /* __S390_UACCESS_H */
index 7e0b498a2c2ba95c8ca56537e673b18c4a0065d3..a150f4fabe437a5f39ddcafb88c598895c909518 100644 (file)
@@ -402,6 +402,12 @@ typedef struct
 #define PTRACE_DISABLE_TE            0x5010
 #define PTRACE_TE_ABORT_RAND         0x5011
 
+/*
+ * The numbers chosen here are somewhat arbitrary but absolutely MUST
+ * not overlap with any of the number assigned in <linux/ptrace.h>.
+ */
+#define PTRACE_SINGLEBLOCK     12      /* resume execution until next branch */
+
 /*
  * PT_PROT definition is loosely based on hppa bsd definition in
  * gdb/hppab-nat.c
index 1b3ac09c11b6df2981ca3dbde3cc6df3fba5d2e9..a95c4ca99617e43360fb3b867ac3aefb92b374d6 100644 (file)
@@ -47,9 +47,8 @@ obj-$(CONFIG_SCHED_BOOK)      += topology.o
 obj-$(CONFIG_HIBERNATION)      += suspend.o swsusp_asm64.o
 obj-$(CONFIG_AUDIT)            += audit.o
 compat-obj-$(CONFIG_AUDIT)     += compat_audit.o
-obj-$(CONFIG_COMPAT)           += compat_linux.o compat_signal.o \
-                                       compat_wrapper.o compat_exec_domain.o \
-                                       $(compat-obj-y)
+obj-$(CONFIG_COMPAT)           += compat_linux.o compat_signal.o
+obj-$(CONFIG_COMPAT)           += compat_wrapper.o $(compat-obj-y)
 
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
 obj-$(CONFIG_KPROBES)          += kprobes.o
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
deleted file mode 100644 (file)
index 765fabd..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Support for 32-bit Linux for S390 personality.
- *
- * Copyright IBM Corp. 2000
- * Author(s): Gerhard Tonn (ton@de.ibm.com)
- *
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/personality.h>
-#include <linux/sched.h>
-
-static struct exec_domain s390_exec_domain;
-
-static int __init s390_init (void)
-{
-       s390_exec_domain.name = "Linux/s390";
-       s390_exec_domain.handler = NULL;
-       s390_exec_domain.pers_low = PER_LINUX32;
-       s390_exec_domain.pers_high = PER_LINUX32;
-       s390_exec_domain.signal_map = default_exec_domain.signal_map;
-       s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
-       register_exec_domain(&s390_exec_domain);
-       return 0;
-}
-
-__initcall(s390_init);
index db02052bd137254c1c6d46314005ed4fce1a6204..ca38139423ae7f22e3a4d5e33d4990d9f4bc5daa 100644 (file)
 #define SET_STAT_UID(stat, uid)                (stat).st_uid = high2lowuid(uid)
 #define SET_STAT_GID(stat, gid)                (stat).st_gid = high2lowgid(gid)
 
-asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
+COMPAT_SYSCALL_DEFINE3(s390_chown16, const char __user *, filename,
+                      u16, user, u16, group)
 {
        return sys_chown(filename, low2highuid(user), low2highgid(group));
 }
 
-asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
+COMPAT_SYSCALL_DEFINE3(s390_lchown16, const char __user *,
+                      filename, u16, user, u16, group)
 {
        return sys_lchown(filename, low2highuid(user), low2highgid(group));
 }
 
-asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
+COMPAT_SYSCALL_DEFINE3(s390_fchown16, unsigned int, fd, u16, user, u16, group)
 {
        return sys_fchown(fd, low2highuid(user), low2highgid(group));
 }
 
-asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
+COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
 {
        return sys_setregid(low2highgid(rgid), low2highgid(egid));
 }
 
-asmlinkage long sys32_setgid16(u16 gid)
+COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
 {
        return sys_setgid((gid_t)gid);
 }
 
-asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
+COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
 {
        return sys_setreuid(low2highuid(ruid), low2highuid(euid));
 }
 
-asmlinkage long sys32_setuid16(u16 uid)
+COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
 {
        return sys_setuid((uid_t)uid);
 }
 
-asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
+COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
 {
        return sys_setresuid(low2highuid(ruid), low2highuid(euid),
-               low2highuid(suid));
+                            low2highuid(suid));
 }
 
-asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __user *suidp)
+COMPAT_SYSCALL_DEFINE3(s390_getresuid16, u16 __user *, ruidp,
+                      u16 __user *, euidp, u16 __user *, suidp)
 {
        const struct cred *cred = current_cred();
        int retval;
@@ -144,13 +147,14 @@ asmlinkage long sys32_getresuid16(u16 __user *ruidp, u16 __user *euidp, u16 __us
        return retval;
 }
 
-asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
+COMPAT_SYSCALL_DEFINE3(s390_setresgid16, u16, rgid, u16, egid, u16, sgid)
 {
        return sys_setresgid(low2highgid(rgid), low2highgid(egid),
-               low2highgid(sgid));
+                            low2highgid(sgid));
 }
 
-asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __user *sgidp)
+COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
+                      u16 __user *, egidp, u16 __user *, sgidp)
 {
        const struct cred *cred = current_cred();
        int retval;
@@ -167,12 +171,12 @@ asmlinkage long sys32_getresgid16(u16 __user *rgidp, u16 __user *egidp, u16 __us
        return retval;
 }
 
-asmlinkage long sys32_setfsuid16(u16 uid)
+COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
 {
        return sys_setfsuid((uid_t)uid);
 }
 
-asmlinkage long sys32_setfsgid16(u16 gid)
+COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
 {
        return sys_setfsgid((gid_t)gid);
 }
@@ -215,7 +219,7 @@ static int groups16_from_user(struct group_info *group_info, u16 __user *groupli
        return 0;
 }
 
-asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
+COMPAT_SYSCALL_DEFINE2(s390_getgroups16, int, gidsetsize, u16 __user *, grouplist)
 {
        const struct cred *cred = current_cred();
        int i;
@@ -240,7 +244,7 @@ out:
        return i;
 }
 
-asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
+COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplist)
 {
        struct group_info *group_info;
        int retval;
@@ -265,22 +269,22 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
        return retval;
 }
 
-asmlinkage long sys32_getuid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_getuid16)
 {
        return high2lowuid(from_kuid_munged(current_user_ns(), current_uid()));
 }
 
-asmlinkage long sys32_geteuid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_geteuid16)
 {
        return high2lowuid(from_kuid_munged(current_user_ns(), current_euid()));
 }
 
-asmlinkage long sys32_getgid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_getgid16)
 {
        return high2lowgid(from_kgid_munged(current_user_ns(), current_gid()));
 }
 
-asmlinkage long sys32_getegid16(void)
+COMPAT_SYSCALL_DEFINE0(s390_getegid16)
 {
        return high2lowgid(from_kgid_munged(current_user_ns(), current_egid()));
 }
@@ -295,41 +299,35 @@ COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second,
 }
 #endif
 
-asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
+COMPAT_SYSCALL_DEFINE3(s390_truncate64, const char __user *, path, u32, high, u32, low)
 {
-       if ((int)high < 0)
-               return -EINVAL;
-       else
-               return sys_truncate(path, (high << 32) | low);
+       return sys_truncate(path, (unsigned long)high << 32 | low);
 }
 
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+COMPAT_SYSCALL_DEFINE3(s390_ftruncate64, unsigned int, fd, u32, high, u32, low)
 {
-       if ((int)high < 0)
-               return -EINVAL;
-       else
-               return sys_ftruncate(fd, (high << 32) | low);
+       return sys_ftruncate(fd, (unsigned long)high << 32 | low);
 }
 
-asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf,
-                               size_t count, u32 poshi, u32 poslo)
+COMPAT_SYSCALL_DEFINE5(s390_pread64, unsigned int, fd, char __user *, ubuf,
+                      compat_size_t, count, u32, high, u32, low)
 {
        if ((compat_ssize_t) count < 0)
                return -EINVAL;
-       return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+       return sys_pread64(fd, ubuf, count, (unsigned long)high << 32 | low);
 }
 
-asmlinkage long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
-                               size_t count, u32 poshi, u32 poslo)
+COMPAT_SYSCALL_DEFINE5(s390_pwrite64, unsigned int, fd, const char __user *, ubuf,
+                      compat_size_t, count, u32, high, u32, low)
 {
        if ((compat_ssize_t) count < 0)
                return -EINVAL;
-       return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
+       return sys_pwrite64(fd, ubuf, count, (unsigned long)high << 32 | low);
 }
 
-asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
+COMPAT_SYSCALL_DEFINE4(s390_readahead, int, fd, u32, high, u32, low, s32, count)
 {
-       return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
+       return sys_readahead(fd, (unsigned long)high << 32 | low, count);
 }
 
 struct stat64_emu31 {
@@ -381,7 +379,7 @@ static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat)
        return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 
 }
 
-asmlinkage long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(s390_stat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
 {
        struct kstat stat;
        int ret = vfs_stat(filename, &stat);
@@ -390,7 +388,7 @@ asmlinkage long sys32_stat64(const char __user * filename, struct stat64_emu31 _
        return ret;
 }
 
-asmlinkage long sys32_lstat64(const char __user * filename, struct stat64_emu31 __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(s390_lstat64, const char __user *, filename, struct stat64_emu31 __user *, statbuf)
 {
        struct kstat stat;
        int ret = vfs_lstat(filename, &stat);
@@ -399,7 +397,7 @@ asmlinkage long sys32_lstat64(const char __user * filename, struct stat64_emu31
        return ret;
 }
 
-asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(s390_fstat64, unsigned int, fd, struct stat64_emu31 __user *, statbuf)
 {
        struct kstat stat;
        int ret = vfs_fstat(fd, &stat);
@@ -408,8 +406,8 @@ asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * sta
        return ret;
 }
 
-asmlinkage long sys32_fstatat64(unsigned int dfd, const char __user *filename,
-                               struct stat64_emu31 __user* statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(s390_fstatat64, unsigned int, dfd, const char __user *, filename,
+                      struct stat64_emu31 __user *, statbuf, int, flag)
 {
        struct kstat stat;
        int error;
@@ -435,7 +433,7 @@ struct mmap_arg_struct_emu31 {
        compat_ulong_t offset;
 };
 
-asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
+COMPAT_SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct_emu31 __user *, arg)
 {
        struct mmap_arg_struct_emu31 a;
 
@@ -447,7 +445,7 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
                              a.offset >> PAGE_SHIFT);
 }
 
-asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
+COMPAT_SYSCALL_DEFINE1(s390_mmap2, struct mmap_arg_struct_emu31 __user *, arg)
 {
        struct mmap_arg_struct_emu31 a;
 
@@ -456,7 +454,7 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
 }
 
-asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count)
+COMPAT_SYSCALL_DEFINE3(s390_read, unsigned int, fd, char __user *, buf, compat_size_t, count)
 {
        if ((compat_ssize_t) count < 0)
                return -EINVAL; 
@@ -464,7 +462,7 @@ asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count)
        return sys_read(fd, buf, count);
 }
 
-asmlinkage long sys32_write(unsigned int fd, const char __user * buf, size_t count)
+COMPAT_SYSCALL_DEFINE3(s390_write, unsigned int, fd, const char __user *, buf, compat_size_t, count)
 {
        if ((compat_ssize_t) count < 0)
                return -EINVAL; 
@@ -478,14 +476,13 @@ asmlinkage long sys32_write(unsigned int fd, const char __user * buf, size_t cou
  * because the 31 bit values differ from the 64 bit values.
  */
 
-asmlinkage long
-sys32_fadvise64(int fd, loff_t offset, size_t len, int advise)
+COMPAT_SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, high, u32, low, compat_size_t, len, int, advise)
 {
        if (advise == 4)
                advise = POSIX_FADV_DONTNEED;
        else if (advise == 5)
                advise = POSIX_FADV_NOREUSE;
-       return sys_fadvise64(fd, offset, len, advise);
+       return sys_fadvise64(fd, (unsigned long)high << 32 | low, len, advise);
 }
 
 struct fadvise64_64_args {
@@ -495,8 +492,7 @@ struct fadvise64_64_args {
        int advice;
 };
 
-asmlinkage long
-sys32_fadvise64_64(struct fadvise64_64_args __user *args)
+COMPAT_SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
 {
        struct fadvise64_64_args a;
 
@@ -508,3 +504,17 @@ sys32_fadvise64_64(struct fadvise64_64_args __user *args)
                a.advice = POSIX_FADV_NOREUSE;
        return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
 }
+
+COMPAT_SYSCALL_DEFINE6(s390_sync_file_range, int, fd, u32, offhigh, u32, offlow,
+                      u32, nhigh, u32, nlow, unsigned int, flags)
+{
+       return sys_sync_file_range(fd, ((loff_t)offhigh << 32) + offlow,
+                                  ((u64)nhigh << 32) + nlow, flags);
+}
+
+COMPAT_SYSCALL_DEFINE6(s390_fallocate, int, fd, int, mode, u32, offhigh, u32, offlow,
+                      u32, lenhigh, u32, lenlow)
+{
+       return sys_fallocate(fd, mode, ((loff_t)offhigh << 32) + offlow,
+                            ((u64)lenhigh << 32) + lenlow);
+}
index 1bfda3eca37909988c26564b11398b6b75302c50..39ddfdb40ae86228c89f126b02e6f488ca1ae57e 100644 (file)
@@ -76,46 +76,43 @@ struct stat64_emu31;
 struct mmap_arg_struct_emu31;
 struct fadvise64_64_args;
 
-long sys32_chown16(const char __user * filename, u16 user, u16 group);
-long sys32_lchown16(const char __user * filename, u16 user, u16 group);
-long sys32_fchown16(unsigned int fd, u16 user, u16 group);
-long sys32_setregid16(u16 rgid, u16 egid);
-long sys32_setgid16(u16 gid);
-long sys32_setreuid16(u16 ruid, u16 euid);
-long sys32_setuid16(u16 uid);
-long sys32_setresuid16(u16 ruid, u16 euid, u16 suid);
-long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
-long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid);
-long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
-long sys32_setfsuid16(u16 uid);
-long sys32_setfsgid16(u16 gid);
-long sys32_getgroups16(int gidsetsize, u16 __user *grouplist);
-long sys32_setgroups16(int gidsetsize, u16 __user *grouplist);
-long sys32_getuid16(void);
-long sys32_geteuid16(void);
-long sys32_getgid16(void);
-long sys32_getegid16(void);
-long sys32_truncate64(const char __user * path, unsigned long high,
-                     unsigned long low);
-long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
-long sys32_init_module(void __user *umod, unsigned long len,
-                      const char __user *uargs);
-long sys32_delete_module(const char __user *name_user, unsigned int flags);
-long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
-                  u32 poshi, u32 poslo);
-long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
-                   size_t count, u32 poshi, u32 poslo);
-compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
-long sys32_stat64(const char __user * filename, struct stat64_emu31 __user * statbuf);
-long sys32_lstat64(const char __user * filename,
-                  struct stat64_emu31 __user * statbuf);
-long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf);
-long sys32_fstatat64(unsigned int dfd, const char __user *filename,
-                    struct stat64_emu31 __user* statbuf, int flag);
-unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
-long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
-long sys32_read(unsigned int fd, char __user * buf, size_t count);
-long sys32_write(unsigned int fd, const char __user * buf, size_t count);
-long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
-long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
+long compat_sys_s390_chown16(const char __user *filename, u16 user, u16 group);
+long compat_sys_s390_lchown16(const char __user *filename, u16 user, u16 group);
+long compat_sys_s390_fchown16(unsigned int fd, u16 user, u16 group);
+long compat_sys_s390_setregid16(u16 rgid, u16 egid);
+long compat_sys_s390_setgid16(u16 gid);
+long compat_sys_s390_setreuid16(u16 ruid, u16 euid);
+long compat_sys_s390_setuid16(u16 uid);
+long compat_sys_s390_setresuid16(u16 ruid, u16 euid, u16 suid);
+long compat_sys_s390_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
+long compat_sys_s390_setresgid16(u16 rgid, u16 egid, u16 sgid);
+long compat_sys_s390_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
+long compat_sys_s390_setfsuid16(u16 uid);
+long compat_sys_s390_setfsgid16(u16 gid);
+long compat_sys_s390_getgroups16(int gidsetsize, u16 __user *grouplist);
+long compat_sys_s390_setgroups16(int gidsetsize, u16 __user *grouplist);
+long compat_sys_s390_getuid16(void);
+long compat_sys_s390_geteuid16(void);
+long compat_sys_s390_getgid16(void);
+long compat_sys_s390_getegid16(void);
+long compat_sys_s390_truncate64(const char __user *path, u32 high, u32 low);
+long compat_sys_s390_ftruncate64(unsigned int fd, u32 high, u32 low);
+long compat_sys_s390_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, u32 high, u32 low);
+long compat_sys_s390_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count, u32 high, u32 low);
+long compat_sys_s390_readahead(int fd, u32 high, u32 low, s32 count);
+long compat_sys_s390_stat64(const char __user *filename, struct stat64_emu31 __user *statbuf);
+long compat_sys_s390_lstat64(const char __user *filename, struct stat64_emu31 __user *statbuf);
+long compat_sys_s390_fstat64(unsigned int fd, struct stat64_emu31 __user *statbuf);
+long compat_sys_s390_fstatat64(unsigned int dfd, const char __user *filename, struct stat64_emu31 __user *statbuf, int flag);
+long compat_sys_s390_old_mmap(struct mmap_arg_struct_emu31 __user *arg);
+long compat_sys_s390_mmap2(struct mmap_arg_struct_emu31 __user *arg);
+long compat_sys_s390_read(unsigned int fd, char __user * buf, compat_size_t count);
+long compat_sys_s390_write(unsigned int fd, const char __user * buf, compat_size_t count);
+long compat_sys_s390_fadvise64(int fd, u32 high, u32 low, compat_size_t len, int advise);
+long compat_sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
+long compat_sys_s390_sync_file_range(int fd, u32 offhigh, u32 offlow, u32 nhigh, u32 nlow, unsigned int flags);
+long compat_sys_s390_fallocate(int fd, int mode, u32 offhigh, u32 offlow, u32 lenhigh, u32 lenlow);
+long compat_sys_sigreturn(void);
+long compat_sys_rt_sigreturn(void);
+
 #endif /* _ASM_S390X_S390_H */
index 8b84bc373e945bbb2edaba876addbbf5a850fcdd..7df5ed9f44d7c2d471e99bd3f21195d958638c3d 100644 (file)
@@ -241,7 +241,7 @@ static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
        return 0;
 }
 
-asmlinkage long sys32_sigreturn(void)
+COMPAT_SYSCALL_DEFINE0(sigreturn)
 {
        struct pt_regs *regs = task_pt_regs(current);
        sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
@@ -260,7 +260,7 @@ badframe:
        return 0;
 }
 
-asmlinkage long sys32_rt_sigreturn(void)
+COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
 {
        struct pt_regs *regs = task_pt_regs(current);
        rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
deleted file mode 100644 (file)
index 0248949..0000000
+++ /dev/null
@@ -1,1425 +0,0 @@
-/*
-*    wrapper for 31 bit compatible system calls.
-*
-*    Copyright IBM Corp. 2000, 2006
-*    Author(s): Gerhard Tonn (ton@de.ibm.com),
-*              Thomas Spatzier (tspat@de.ibm.com)
-*/
-
-#include <linux/linkage.h>
-
-ENTRY(sys32_exit_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_exit                # branch to sys_exit
-
-ENTRY(sys32_read_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # char *
-       llgfr   %r4,%r4                 # size_t
-       jg      sys32_read              # branch to sys_read
-
-ENTRY(sys32_write_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # const char *
-       llgfr   %r4,%r4                 # size_t
-       jg      sys32_write             # branch to system call
-
-ENTRY(sys32_close_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_close               # branch to system call
-
-ENTRY(sys32_creat_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # int
-       jg      sys_creat               # branch to system call
-
-ENTRY(sys32_link_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgtr   %r3,%r3                 # const char *
-       jg      sys_link                # branch to system call
-
-ENTRY(sys32_unlink_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       jg      sys_unlink              # branch to system call
-
-ENTRY(sys32_chdir_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       jg      sys_chdir               # branch to system call
-
-ENTRY(sys32_time_wrapper)
-       llgtr   %r2,%r2                 # int *
-       jg      compat_sys_time         # branch to system call
-
-ENTRY(sys32_mknod_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # int
-       llgfr   %r4,%r4                 # dev
-       jg      sys_mknod               # branch to system call
-
-ENTRY(sys32_chmod_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # mode_t
-       jg      sys_chmod               # branch to system call
-
-ENTRY(sys32_lchown16_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # __kernel_old_uid_emu31_t
-       llgfr   %r4,%r4                 # __kernel_old_uid_emu31_t
-       jg      sys32_lchown16          # branch to system call
-
-#sys32_getpid_wrapper                          # void
-
-ENTRY(sys32_mount_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # char *
-       llgfr   %r5,%r5                 # unsigned long
-       llgtr   %r6,%r6                 # void *
-       jg      compat_sys_mount        # branch to system call
-
-ENTRY(sys32_oldumount_wrapper)
-       llgtr   %r2,%r2                 # char *
-       jg      sys_oldumount           # branch to system call
-
-ENTRY(sys32_setuid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_uid_emu31_t
-       jg      sys32_setuid16          # branch to system call
-
-#sys32_getuid16_wrapper                        # void
-
-ENTRY(sys32_ptrace_wrapper)
-       lgfr    %r2,%r2                 # long
-       lgfr    %r3,%r3                 # long
-       llgtr   %r4,%r4                 # long
-       llgfr   %r5,%r5                 # long
-       jg      compat_sys_ptrace       # branch to system call
-
-ENTRY(sys32_alarm_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_alarm               # branch to system call
-
-ENTRY(compat_sys_utime_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # struct compat_utimbuf *
-       jg      compat_sys_utime        # branch to system call
-
-ENTRY(sys32_access_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # int
-       jg      sys_access              # branch to system call
-
-ENTRY(sys32_nice_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_nice                # branch to system call
-
-#sys32_sync_wrapper                    # void
-
-ENTRY(sys32_kill_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       jg      sys_kill                # branch to system call
-
-ENTRY(sys32_rename_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgtr   %r3,%r3                 # const char *
-       jg      sys_rename              # branch to system call
-
-ENTRY(sys32_mkdir_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # int
-       jg      sys_mkdir               # branch to system call
-
-ENTRY(sys32_rmdir_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       jg      sys_rmdir               # branch to system call
-
-ENTRY(sys32_dup_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_dup                 # branch to system call
-
-ENTRY(sys32_pipe_wrapper)
-       llgtr   %r2,%r2                 # u32 *
-       jg      sys_pipe                # branch to system call
-
-ENTRY(compat_sys_times_wrapper)
-       llgtr   %r2,%r2                 # struct compat_tms *
-       jg      compat_sys_times        # branch to system call
-
-ENTRY(sys32_brk_wrapper)
-       llgtr   %r2,%r2                 # unsigned long
-       jg      sys_brk                 # branch to system call
-
-ENTRY(sys32_setgid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_gid_emu31_t
-       jg      sys32_setgid16          # branch to system call
-
-#sys32_getgid16_wrapper                        # void
-
-ENTRY(sys32_signal_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # __sighandler_t
-       jg      sys_signal
-
-#sys32_geteuid16_wrapper               # void
-
-#sys32_getegid16_wrapper               # void
-
-ENTRY(sys32_acct_wrapper)
-       llgtr   %r2,%r2                 # char *
-       jg      sys_acct                # branch to system call
-
-ENTRY(sys32_umount_wrapper)
-       llgtr   %r2,%r2                 # char *
-       lgfr    %r3,%r3                 # int
-       jg      sys_umount              # branch to system call
-
-ENTRY(compat_sys_ioctl_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned int
-       llgfr   %r4,%r4                 # unsigned int
-       jg      compat_sys_ioctl        # branch to system call
-
-ENTRY(compat_sys_fcntl_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned int
-       llgfr   %r4,%r4                 # unsigned long
-       jg      compat_sys_fcntl        # branch to system call
-
-ENTRY(sys32_setpgid_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       lgfr    %r3,%r3                 # pid_t
-       jg      sys_setpgid             # branch to system call
-
-ENTRY(sys32_umask_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_umask               # branch to system call
-
-ENTRY(sys32_chroot_wrapper)
-       llgtr   %r2,%r2                 # char *
-       jg      sys_chroot              # branch to system call
-
-ENTRY(sys32_ustat_wrapper)
-       llgfr   %r2,%r2                 # dev_t
-       llgtr   %r3,%r3                 # struct ustat *
-       jg      compat_sys_ustat
-
-ENTRY(sys32_dup2_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned int
-       jg      sys_dup2                # branch to system call
-
-#sys32_getppid_wrapper                 # void
-
-#sys32_getpgrp_wrapper                 # void
-
-#sys32_setsid_wrapper                  # void
-
-ENTRY(sys32_setreuid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_uid_emu31_t
-       llgfr   %r3,%r3                 # __kernel_old_uid_emu31_t
-       jg      sys32_setreuid16        # branch to system call
-
-ENTRY(sys32_setregid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_gid_emu31_t
-       llgfr   %r3,%r3                 # __kernel_old_gid_emu31_t
-       jg      sys32_setregid16        # branch to system call
-
-ENTRY(sys_sigsuspend_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       llgfr   %r4,%r4                 # old_sigset_t
-       jg      sys_sigsuspend
-
-ENTRY(compat_sys_sigpending_wrapper)
-       llgtr   %r2,%r2                 # compat_old_sigset_t *
-       jg      compat_sys_sigpending   # branch to system call
-
-ENTRY(sys32_sethostname_wrapper)
-       llgtr   %r2,%r2                 # char *
-       lgfr    %r3,%r3                 # int
-       jg      sys_sethostname         # branch to system call
-
-ENTRY(compat_sys_setrlimit_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # struct rlimit_emu31 *
-       jg      compat_sys_setrlimit    # branch to system call
-
-ENTRY(compat_sys_old_getrlimit_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # struct rlimit_emu31 *
-       jg      compat_sys_old_getrlimit # branch to system call
-
-ENTRY(compat_sys_getrlimit_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # struct rlimit_emu31 *
-       jg      compat_sys_getrlimit    # branch to system call
-
-ENTRY(sys32_mmap2_wrapper)
-       llgtr   %r2,%r2                 # struct mmap_arg_struct_emu31 *
-       jg      sys32_mmap2                     # branch to system call
-
-ENTRY(compat_sys_gettimeofday_wrapper)
-       llgtr   %r2,%r2                 # struct timeval_emu31 *
-       llgtr   %r3,%r3                 # struct timezone *
-       jg      compat_sys_gettimeofday # branch to system call
-
-ENTRY(compat_sys_settimeofday_wrapper)
-       llgtr   %r2,%r2                 # struct timeval_emu31 *
-       llgtr   %r3,%r3                 # struct timezone *
-       jg      compat_sys_settimeofday # branch to system call
-
-ENTRY(sys32_getgroups16_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # __kernel_old_gid_emu31_t *
-       jg      sys32_getgroups16       # branch to system call
-
-ENTRY(sys32_setgroups16_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # __kernel_old_gid_emu31_t *
-       jg      sys32_setgroups16       # branch to system call
-
-ENTRY(sys32_symlink_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgtr   %r3,%r3                 # const char *
-       jg      sys_symlink             # branch to system call
-
-ENTRY(sys32_readlink_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgtr   %r3,%r3                 # char *
-       lgfr    %r4,%r4                 # int
-       jg      sys_readlink            # branch to system call
-
-ENTRY(sys32_uselib_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       jg      sys_uselib              # branch to system call
-
-ENTRY(sys32_swapon_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # int
-       jg      sys_swapon              # branch to system call
-
-ENTRY(sys32_reboot_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       llgfr   %r4,%r4                 # unsigned int
-       llgtr   %r5,%r5                 # void *
-       jg      sys_reboot              # branch to system call
-
-ENTRY(old32_readdir_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # void *
-       llgfr   %r4,%r4                 # unsigned int
-       jg      compat_sys_old_readdir  # branch to system call
-
-ENTRY(old32_mmap_wrapper)
-       llgtr   %r2,%r2                 # struct mmap_arg_struct_emu31 *
-       jg      old32_mmap              # branch to system call
-
-ENTRY(sys32_munmap_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # size_t
-       jg      sys_munmap              # branch to system call
-
-ENTRY(sys32_fchmod_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # mode_t
-       jg      sys_fchmod              # branch to system call
-
-ENTRY(sys32_fchown16_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # compat_uid_t
-       llgfr   %r4,%r4                 # compat_uid_t
-       jg      sys32_fchown16          # branch to system call
-
-ENTRY(sys32_getpriority_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       jg      sys_getpriority         # branch to system call
-
-ENTRY(sys32_setpriority_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       lgfr    %r4,%r4                 # int
-       jg      sys_setpriority         # branch to system call
-
-ENTRY(compat_sys_statfs_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # struct compat_statfs *
-       jg      compat_sys_statfs       # branch to system call
-
-ENTRY(compat_sys_fstatfs_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # struct compat_statfs *
-       jg      compat_sys_fstatfs      # branch to system call
-
-ENTRY(compat_sys_socketcall_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # u32 *
-       jg      compat_sys_socketcall   # branch to system call
-
-ENTRY(sys32_syslog_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # char *
-       lgfr    %r4,%r4                 # int
-       jg      sys_syslog              # branch to system call
-
-ENTRY(compat_sys_newstat_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # struct stat_emu31 *
-       jg      compat_sys_newstat      # branch to system call
-
-ENTRY(compat_sys_newlstat_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # struct stat_emu31 *
-       jg      compat_sys_newlstat     # branch to system call
-
-ENTRY(compat_sys_newfstat_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # struct stat_emu31 *
-       jg      compat_sys_newfstat     # branch to system call
-
-#sys32_vhangup_wrapper                 # void
-
-ENTRY(sys32_swapoff_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       jg      sys_swapoff             # branch to system call
-
-ENTRY(compat_sys_sysinfo_wrapper)
-       llgtr   %r2,%r2                 # struct sysinfo_emu31 *
-       jg      compat_sys_sysinfo      # branch to system call
-
-ENTRY(sys32_fsync_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_fsync               # branch to system call
-
-#sys32_sigreturn_wrapper               # done in sigreturn_glue
-
-#sys32_clone_wrapper                   # done in clone_glue
-
-ENTRY(sys32_setdomainname_wrapper)
-       llgtr   %r2,%r2                 # char *
-       lgfr    %r3,%r3                 # int
-       jg      sys_setdomainname       # branch to system call
-
-ENTRY(sys32_newuname_wrapper)
-       llgtr   %r2,%r2                 # struct new_utsname *
-       jg      sys_newuname            # branch to system call
-
-ENTRY(compat_sys_adjtimex_wrapper)
-       llgtr   %r2,%r2                 # struct compat_timex *
-       jg      compat_sys_adjtimex     # branch to system call
-
-ENTRY(sys32_mprotect_wrapper)
-       llgtr   %r2,%r2                 # unsigned long (actually pointer
-       llgfr   %r3,%r3                 # size_t
-       llgfr   %r4,%r4                 # unsigned long
-       jg      sys_mprotect            # branch to system call
-
-ENTRY(sys_init_module_wrapper)
-       llgtr   %r2,%r2                 # void *
-       llgfr   %r3,%r3                 # unsigned long
-       llgtr   %r4,%r4                 # char *
-       jg      sys_init_module         # branch to system call
-
-ENTRY(sys_delete_module_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # unsigned int
-       jg      sys_delete_module       # branch to system call
-
-ENTRY(sys32_quotactl_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # const char *
-       llgfr   %r4,%r4                 # qid_t
-       llgtr   %r5,%r5                 # caddr_t
-       jg      sys_quotactl            # branch to system call
-
-ENTRY(sys32_getpgid_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       jg      sys_getpgid             # branch to system call
-
-ENTRY(sys32_fchdir_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_fchdir              # branch to system call
-
-ENTRY(sys32_bdflush_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # long
-       jg      sys_bdflush             # branch to system call
-
-ENTRY(sys32_sysfs_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # unsigned long
-       llgfr   %r4,%r4                 # unsigned long
-       jg      sys_sysfs               # branch to system call
-
-ENTRY(sys32_personality_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_s390_personality    # branch to system call
-
-ENTRY(sys32_setfsuid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_uid_emu31_t
-       jg      sys32_setfsuid16        # branch to system call
-
-ENTRY(sys32_setfsgid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_gid_emu31_t
-       jg      sys32_setfsgid16        # branch to system call
-
-ENTRY(sys32_llseek_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned long
-       llgfr   %r4,%r4                 # unsigned long
-       llgtr   %r5,%r5                 # loff_t *
-       llgfr   %r6,%r6                 # unsigned int
-       jg      sys_llseek              # branch to system call
-
-ENTRY(sys32_getdents_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # void *
-       llgfr   %r4,%r4                 # unsigned int
-       jg      compat_sys_getdents     # branch to system call
-
-ENTRY(compat_sys_select_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # compat_fd_set *
-       llgtr   %r4,%r4                 # compat_fd_set *
-       llgtr   %r5,%r5                 # compat_fd_set *
-       llgtr   %r6,%r6                 # struct compat_timeval *
-       jg      compat_sys_select       # branch to system call
-
-ENTRY(sys32_flock_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned int
-       jg      sys_flock               # branch to system call
-
-ENTRY(sys32_msync_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # size_t
-       lgfr    %r4,%r4                 # int
-       jg      sys_msync               # branch to system call
-
-ENTRY(compat_sys_readv_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const struct compat_iovec *
-       llgfr   %r4,%r4                 # unsigned long
-       jg      compat_sys_readv        # branch to system call
-
-ENTRY(compat_sys_writev_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const struct compat_iovec *
-       llgfr   %r4,%r4                 # unsigned long
-       jg      compat_sys_writev       # branch to system call
-
-ENTRY(sys32_getsid_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       jg      sys_getsid              # branch to system call
-
-ENTRY(sys32_fdatasync_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_fdatasync           # branch to system call
-
-ENTRY(sys32_mlock_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # size_t
-       jg      sys_mlock               # branch to system call
-
-ENTRY(sys32_munlock_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # size_t
-       jg      sys_munlock             # branch to system call
-
-ENTRY(sys32_mlockall_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_mlockall            # branch to system call
-
-#sys32_munlockall_wrapper              # void
-
-ENTRY(sys32_sched_setparam_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       llgtr   %r3,%r3                 # struct sched_param *
-       jg      sys_sched_setparam      # branch to system call
-
-ENTRY(sys32_sched_getparam_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       llgtr   %r3,%r3                 # struct sched_param *
-       jg      sys_sched_getparam      # branch to system call
-
-ENTRY(sys32_sched_setscheduler_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       lgfr    %r3,%r3                 # int
-       llgtr   %r4,%r4                 # struct sched_param *
-       jg      sys_sched_setscheduler  # branch to system call
-
-ENTRY(sys32_sched_getscheduler_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       jg      sys_sched_getscheduler  # branch to system call
-
-#sys32_sched_yield_wrapper             # void
-
-ENTRY(sys32_sched_get_priority_max_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_sched_get_priority_max      # branch to system call
-
-ENTRY(sys32_sched_get_priority_min_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_sched_get_priority_min      # branch to system call
-
-ENTRY(compat_sys_nanosleep_wrapper)
-       llgtr   %r2,%r2                 # struct compat_timespec *
-       llgtr   %r3,%r3                 # struct compat_timespec *
-       jg      compat_sys_nanosleep            # branch to system call
-
-ENTRY(sys32_mremap_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # unsigned long
-       llgfr   %r4,%r4                 # unsigned long
-       llgfr   %r5,%r5                 # unsigned long
-       llgfr   %r6,%r6                 # unsigned long
-       jg      sys_mremap              # branch to system call
-
-ENTRY(sys32_setresuid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_uid_emu31_t
-       llgfr   %r3,%r3                 # __kernel_old_uid_emu31_t
-       llgfr   %r4,%r4                 # __kernel_old_uid_emu31_t
-       jg      sys32_setresuid16       # branch to system call
-
-ENTRY(sys32_getresuid16_wrapper)
-       llgtr   %r2,%r2                 # __kernel_old_uid_emu31_t *
-       llgtr   %r3,%r3                 # __kernel_old_uid_emu31_t *
-       llgtr   %r4,%r4                 # __kernel_old_uid_emu31_t *
-       jg      sys32_getresuid16       # branch to system call
-
-ENTRY(sys32_poll_wrapper)
-       llgtr   %r2,%r2                 # struct pollfd *
-       llgfr   %r3,%r3                 # unsigned int
-       lgfr    %r4,%r4                 # int
-       jg      sys_poll                # branch to system call
-
-ENTRY(sys32_setresgid16_wrapper)
-       llgfr   %r2,%r2                 # __kernel_old_gid_emu31_t
-       llgfr   %r3,%r3                 # __kernel_old_gid_emu31_t
-       llgfr   %r4,%r4                 # __kernel_old_gid_emu31_t
-       jg      sys32_setresgid16       # branch to system call
-
-ENTRY(sys32_getresgid16_wrapper)
-       llgtr   %r2,%r2                 # __kernel_old_gid_emu31_t *
-       llgtr   %r3,%r3                 # __kernel_old_gid_emu31_t *
-       llgtr   %r4,%r4                 # __kernel_old_gid_emu31_t *
-       jg      sys32_getresgid16       # branch to system call
-
-ENTRY(sys32_prctl_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # unsigned long
-       llgfr   %r4,%r4                 # unsigned long
-       llgfr   %r5,%r5                 # unsigned long
-       llgfr   %r6,%r6                 # unsigned long
-       jg      sys_prctl               # branch to system call
-
-#sys32_rt_sigreturn_wrapper            # done in rt_sigreturn_glue
-
-ENTRY(sys32_pread64_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # char *
-       llgfr   %r4,%r4                 # size_t
-       llgfr   %r5,%r5                 # u32
-       llgfr   %r6,%r6                 # u32
-       jg      sys32_pread64           # branch to system call
-
-ENTRY(sys32_pwrite64_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # const char *
-       llgfr   %r4,%r4                 # size_t
-       llgfr   %r5,%r5                 # u32
-       llgfr   %r6,%r6                 # u32
-       jg      sys32_pwrite64          # branch to system call
-
-ENTRY(sys32_chown16_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # __kernel_old_uid_emu31_t
-       llgfr   %r4,%r4                 # __kernel_old_gid_emu31_t
-       jg      sys32_chown16           # branch to system call
-
-ENTRY(sys32_getcwd_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgfr   %r3,%r3                 # unsigned long
-       jg      sys_getcwd              # branch to system call
-
-ENTRY(sys32_capget_wrapper)
-       llgtr   %r2,%r2                 # cap_user_header_t
-       llgtr   %r3,%r3                 # cap_user_data_t
-       jg      sys_capget              # branch to system call
-
-ENTRY(sys32_capset_wrapper)
-       llgtr   %r2,%r2                 # cap_user_header_t
-       llgtr   %r3,%r3                 # const cap_user_data_t
-       jg      sys_capset              # branch to system call
-
-#sys32_vfork_wrapper                   # done in vfork_glue
-
-ENTRY(sys32_truncate64_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # unsigned long
-       llgfr   %r4,%r4                 # unsigned long
-       jg      sys32_truncate64        # branch to system call
-
-ENTRY(sys32_ftruncate64_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned long
-       llgfr   %r4,%r4                 # unsigned long
-       jg      sys32_ftruncate64       # branch to system call
-
-ENTRY(sys32_lchown_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # uid_t
-       llgfr   %r4,%r4                 # gid_t
-       jg      sys_lchown              # branch to system call
-
-#sys32_getuid_wrapper                  # void
-#sys32_getgid_wrapper                  # void
-#sys32_geteuid_wrapper                 # void
-#sys32_getegid_wrapper                 # void
-
-ENTRY(sys32_setreuid_wrapper)
-       llgfr   %r2,%r2                 # uid_t
-       llgfr   %r3,%r3                 # uid_t
-       jg      sys_setreuid            # branch to system call
-
-ENTRY(sys32_setregid_wrapper)
-       llgfr   %r2,%r2                 # gid_t
-       llgfr   %r3,%r3                 # gid_t
-       jg      sys_setregid            # branch to system call
-
-ENTRY(sys32_getgroups_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # gid_t *
-       jg      sys_getgroups           # branch to system call
-
-ENTRY(sys32_setgroups_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # gid_t *
-       jg      sys_setgroups           # branch to system call
-
-ENTRY(sys32_fchown_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # uid_t
-       llgfr   %r4,%r4                 # gid_t
-       jg      sys_fchown              # branch to system call
-
-ENTRY(sys32_setresuid_wrapper)
-       llgfr   %r2,%r2                 # uid_t
-       llgfr   %r3,%r3                 # uid_t
-       llgfr   %r4,%r4                 # uid_t
-       jg      sys_setresuid           # branch to system call
-
-ENTRY(sys32_getresuid_wrapper)
-       llgtr   %r2,%r2                 # uid_t *
-       llgtr   %r3,%r3                 # uid_t *
-       llgtr   %r4,%r4                 # uid_t *
-       jg      sys_getresuid           # branch to system call
-
-ENTRY(sys32_setresgid_wrapper)
-       llgfr   %r2,%r2                 # gid_t
-       llgfr   %r3,%r3                 # gid_t
-       llgfr   %r4,%r4                 # gid_t
-       jg      sys_setresgid           # branch to system call
-
-ENTRY(sys32_getresgid_wrapper)
-       llgtr   %r2,%r2                 # gid_t *
-       llgtr   %r3,%r3                 # gid_t *
-       llgtr   %r4,%r4                 # gid_t *
-       jg      sys_getresgid           # branch to system call
-
-ENTRY(sys32_chown_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # uid_t
-       llgfr   %r4,%r4                 # gid_t
-       jg      sys_chown               # branch to system call
-
-ENTRY(sys32_setuid_wrapper)
-       llgfr   %r2,%r2                 # uid_t
-       jg      sys_setuid              # branch to system call
-
-ENTRY(sys32_setgid_wrapper)
-       llgfr   %r2,%r2                 # gid_t
-       jg      sys_setgid              # branch to system call
-
-ENTRY(sys32_setfsuid_wrapper)
-       llgfr   %r2,%r2                 # uid_t
-       jg      sys_setfsuid            # branch to system call
-
-ENTRY(sys32_setfsgid_wrapper)
-       llgfr   %r2,%r2                 # gid_t
-       jg      sys_setfsgid            # branch to system call
-
-ENTRY(sys32_pivot_root_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgtr   %r3,%r3                 # const char *
-       jg      sys_pivot_root          # branch to system call
-
-ENTRY(sys32_mincore_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # size_t
-       llgtr   %r4,%r4                 # unsigned char *
-       jg      sys_mincore             # branch to system call
-
-ENTRY(sys32_madvise_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # size_t
-       lgfr    %r4,%r4                 # int
-       jg      sys_madvise             # branch to system call
-
-ENTRY(sys32_getdents64_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # void *
-       llgfr   %r4,%r4                 # unsigned int
-       jg      sys_getdents64          # branch to system call
-
-ENTRY(compat_sys_fcntl64_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned int
-       llgfr   %r4,%r4                 # unsigned long
-       jg      compat_sys_fcntl64      # branch to system call
-
-ENTRY(sys32_stat64_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # struct stat64 *
-       jg      sys32_stat64            # branch to system call
-
-ENTRY(sys32_lstat64_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # struct stat64 *
-       jg      sys32_lstat64           # branch to system call
-
-ENTRY(sys32_stime_wrapper)
-       llgtr   %r2,%r2                 # long *
-       jg      compat_sys_stime        # branch to system call
-
-ENTRY(sys32_fstat64_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgtr   %r3,%r3                 # struct stat64 *
-       jg      sys32_fstat64           # branch to system call
-
-ENTRY(sys32_setxattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # void *
-       llgfr   %r5,%r5                 # size_t
-       lgfr    %r6,%r6                 # int
-       jg      sys_setxattr
-
-ENTRY(sys32_lsetxattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # void *
-       llgfr   %r5,%r5                 # size_t
-       lgfr    %r6,%r6                 # int
-       jg      sys_lsetxattr
-
-ENTRY(sys32_fsetxattr_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # void *
-       llgfr   %r5,%r5                 # size_t
-       lgfr    %r6,%r6                 # int
-       jg      sys_fsetxattr
-
-ENTRY(sys32_getxattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # void *
-       llgfr   %r5,%r5                 # size_t
-       jg      sys_getxattr
-
-ENTRY(sys32_lgetxattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # void *
-       llgfr   %r5,%r5                 # size_t
-       jg      sys_lgetxattr
-
-ENTRY(sys32_fgetxattr_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # void *
-       llgfr   %r5,%r5                 # size_t
-       jg      sys_fgetxattr
-
-ENTRY(sys32_listxattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       llgfr   %r4,%r4                 # size_t
-       jg      sys_listxattr
-
-ENTRY(sys32_llistxattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       llgfr   %r4,%r4                 # size_t
-       jg      sys_llistxattr
-
-ENTRY(sys32_flistxattr_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # char *
-       llgfr   %r4,%r4                 # size_t
-       jg      sys_flistxattr
-
-ENTRY(sys32_removexattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       jg      sys_removexattr
-
-ENTRY(sys32_lremovexattr_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # char *
-       jg      sys_lremovexattr
-
-ENTRY(sys32_fremovexattr_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # char *
-       jg      sys_fremovexattr
-
-ENTRY(sys32_sched_setaffinity_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # unsigned int
-       llgtr   %r4,%r4                 # unsigned long *
-       jg      compat_sys_sched_setaffinity
-
-ENTRY(sys32_sched_getaffinity_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # unsigned int
-       llgtr   %r4,%r4                 # unsigned long *
-       jg      compat_sys_sched_getaffinity
-
-ENTRY(sys32_exit_group_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_exit_group          # branch to system call
-
-ENTRY(sys32_set_tid_address_wrapper)
-       llgtr   %r2,%r2                 # int *
-       jg      sys_set_tid_address     # branch to system call
-
-ENTRY(sys_epoll_create_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_epoll_create        # branch to system call
-
-ENTRY(sys_epoll_ctl_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       lgfr    %r4,%r4                 # int
-       llgtr   %r5,%r5                 # struct epoll_event *
-       jg      sys_epoll_ctl           # branch to system call
-
-ENTRY(sys_epoll_wait_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # struct epoll_event *
-       lgfr    %r4,%r4                 # int
-       lgfr    %r5,%r5                 # int
-       jg      sys_epoll_wait          # branch to system call
-
-ENTRY(sys32_fadvise64_wrapper)
-       lgfr    %r2,%r2                 # int
-       sllg    %r3,%r3,32              # get high word of 64bit loff_t
-       or      %r3,%r4                 # get low word of 64bit loff_t
-       llgfr   %r4,%r5                 # size_t (unsigned long)
-       lgfr    %r5,%r6                 # int
-       jg      sys32_fadvise64
-
-ENTRY(sys32_fadvise64_64_wrapper)
-       llgtr   %r2,%r2                 # struct fadvise64_64_args *
-       jg      sys32_fadvise64_64
-
-ENTRY(sys32_clock_settime_wrapper)
-       lgfr    %r2,%r2                 # clockid_t (int)
-       llgtr   %r3,%r3                 # struct compat_timespec *
-       jg      compat_sys_clock_settime
-
-ENTRY(sys32_clock_gettime_wrapper)
-       lgfr    %r2,%r2                 # clockid_t (int)
-       llgtr   %r3,%r3                 # struct compat_timespec *
-       jg      compat_sys_clock_gettime
-
-ENTRY(sys32_clock_getres_wrapper)
-       lgfr    %r2,%r2                 # clockid_t (int)
-       llgtr   %r3,%r3                 # struct compat_timespec *
-       jg      compat_sys_clock_getres
-
-ENTRY(sys32_clock_nanosleep_wrapper)
-       lgfr    %r2,%r2                 # clockid_t (int)
-       lgfr    %r3,%r3                 # int
-       llgtr   %r4,%r4                 # struct compat_timespec *
-       llgtr   %r5,%r5                 # struct compat_timespec *
-       jg      compat_sys_clock_nanosleep
-
-ENTRY(sys32_timer_create_wrapper)
-       lgfr    %r2,%r2                 # timer_t (int)
-       llgtr   %r3,%r3                 # struct compat_sigevent *
-       llgtr   %r4,%r4                 # timer_t *
-       jg      compat_sys_timer_create
-
-ENTRY(sys32_timer_settime_wrapper)
-       lgfr    %r2,%r2                 # timer_t (int)
-       lgfr    %r3,%r3                 # int
-       llgtr   %r4,%r4                 # struct compat_itimerspec *
-       llgtr   %r5,%r5                 # struct compat_itimerspec *
-       jg      compat_sys_timer_settime
-
-ENTRY(sys32_timer_gettime_wrapper)
-       lgfr    %r2,%r2                 # timer_t (int)
-       llgtr   %r3,%r3                 # struct compat_itimerspec *
-       jg      compat_sys_timer_gettime
-
-ENTRY(sys32_timer_getoverrun_wrapper)
-       lgfr    %r2,%r2                 # timer_t (int)
-       jg      sys_timer_getoverrun
-
-ENTRY(sys32_timer_delete_wrapper)
-       lgfr    %r2,%r2                 # timer_t (int)
-       jg      sys_timer_delete
-
-ENTRY(sys32_io_setup_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # u32 *
-       jg      compat_sys_io_setup
-
-ENTRY(sys32_io_destroy_wrapper)
-       llgfr   %r2,%r2                 # (aio_context_t) u32
-       jg      sys_io_destroy
-
-ENTRY(sys32_io_getevents_wrapper)
-       llgfr   %r2,%r2                 # (aio_context_t) u32
-       lgfr    %r3,%r3                 # long
-       lgfr    %r4,%r4                 # long
-       llgtr   %r5,%r5                 # struct io_event *
-       llgtr   %r6,%r6                 # struct compat_timespec *
-       jg      compat_sys_io_getevents
-
-ENTRY(sys32_io_submit_wrapper)
-       llgfr   %r2,%r2                 # (aio_context_t) u32
-       lgfr    %r3,%r3                 # long
-       llgtr   %r4,%r4                 # struct iocb **
-       jg      compat_sys_io_submit
-
-ENTRY(sys32_io_cancel_wrapper)
-       llgfr   %r2,%r2                 # (aio_context_t) u32
-       llgtr   %r3,%r3                 # struct iocb *
-       llgtr   %r4,%r4                 # struct io_event *
-       jg      sys_io_cancel
-
-ENTRY(compat_sys_statfs64_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgfr   %r3,%r3                 # compat_size_t
-       llgtr   %r4,%r4                 # struct compat_statfs64 *
-       jg      compat_sys_statfs64
-
-ENTRY(compat_sys_fstatfs64_wrapper)
-       llgfr   %r2,%r2                 # unsigned int fd
-       llgfr   %r3,%r3                 # compat_size_t
-       llgtr   %r4,%r4                 # struct compat_statfs64 *
-       jg      compat_sys_fstatfs64
-
-ENTRY(compat_sys_mq_open_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # int
-       llgfr   %r4,%r4                 # mode_t
-       llgtr   %r5,%r5                 # struct compat_mq_attr *
-       jg      compat_sys_mq_open
-
-ENTRY(sys32_mq_unlink_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       jg      sys_mq_unlink
-
-ENTRY(compat_sys_mq_timedsend_wrapper)
-       lgfr    %r2,%r2                 # mqd_t
-       llgtr   %r3,%r3                 # const char *
-       llgfr   %r4,%r4                 # size_t
-       llgfr   %r5,%r5                 # unsigned int
-       llgtr   %r6,%r6                 # const struct compat_timespec *
-       jg      compat_sys_mq_timedsend
-
-ENTRY(compat_sys_mq_timedreceive_wrapper)
-       lgfr    %r2,%r2                 # mqd_t
-       llgtr   %r3,%r3                 # char *
-       llgfr   %r4,%r4                 # size_t
-       llgtr   %r5,%r5                 # unsigned int *
-       llgtr   %r6,%r6                 # const struct compat_timespec *
-       jg      compat_sys_mq_timedreceive
-
-ENTRY(compat_sys_mq_notify_wrapper)
-       lgfr    %r2,%r2                 # mqd_t
-       llgtr   %r3,%r3                 # struct compat_sigevent *
-       jg      compat_sys_mq_notify
-
-ENTRY(compat_sys_mq_getsetattr_wrapper)
-       lgfr    %r2,%r2                 # mqd_t
-       llgtr   %r3,%r3                 # struct compat_mq_attr *
-       llgtr   %r4,%r4                 # struct compat_mq_attr *
-       jg      compat_sys_mq_getsetattr
-
-ENTRY(compat_sys_add_key_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgtr   %r3,%r3                 # const char *
-       llgtr   %r4,%r4                 # const void *
-       llgfr   %r5,%r5                 # size_t
-       llgfr   %r6,%r6                 # (key_serial_t) u32
-       jg      sys_add_key
-
-ENTRY(compat_sys_request_key_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       llgtr   %r3,%r3                 # const char *
-       llgtr   %r4,%r4                 # const void *
-       llgfr   %r5,%r5                 # (key_serial_t) u32
-       jg      sys_request_key
-
-ENTRY(sys32_remap_file_pages_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # unsigned long
-       llgfr   %r4,%r4                 # unsigned long
-       llgfr   %r5,%r5                 # unsigned long
-       llgfr   %r6,%r6                 # unsigned long
-       jg      sys_remap_file_pages
-
-ENTRY(compat_sys_kexec_load_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # unsigned long
-       llgtr   %r4,%r4                 # struct kexec_segment *
-       llgfr   %r5,%r5                 # unsigned long
-       jg      compat_sys_kexec_load
-
-ENTRY(sys_ioprio_set_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       lgfr    %r4,%r4                 # int
-       jg      sys_ioprio_set
-
-ENTRY(sys_ioprio_get_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       jg      sys_ioprio_get
-
-ENTRY(sys_inotify_add_watch_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       llgfr   %r4,%r4                 # u32
-       jg      sys_inotify_add_watch
-
-ENTRY(sys_inotify_rm_watch_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # u32
-       jg      sys_inotify_rm_watch
-
-ENTRY(sys_mkdirat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       lgfr    %r4,%r4                 # int
-       jg      sys_mkdirat
-
-ENTRY(sys_mknodat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       lgfr    %r4,%r4                 # int
-       llgfr   %r5,%r5                 # unsigned int
-       jg      sys_mknodat
-
-ENTRY(sys_fchownat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       llgfr   %r4,%r4                 # uid_t
-       llgfr   %r5,%r5                 # gid_t
-       lgfr    %r6,%r6                 # int
-       jg      sys_fchownat
-
-ENTRY(compat_sys_futimesat_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # struct timeval *
-       jg      compat_sys_futimesat
-
-ENTRY(sys32_fstatat64_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # struct stat64 *
-       lgfr    %r5,%r5                 # int
-       jg      sys32_fstatat64
-
-ENTRY(sys_unlinkat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       lgfr    %r4,%r4                 # int
-       jg      sys_unlinkat
-
-ENTRY(sys_renameat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       lgfr    %r4,%r4                 # int
-       llgtr   %r5,%r5                 # const char *
-       jg      sys_renameat
-
-ENTRY(sys_linkat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       lgfr    %r4,%r4                 # int
-       llgtr   %r5,%r5                 # const char *
-       lgfr    %r6,%r6                 # int
-       jg      sys_linkat
-
-ENTRY(sys_symlinkat_wrapper)
-       llgtr   %r2,%r2                 # const char *
-       lgfr    %r3,%r3                 # int
-       llgtr   %r4,%r4                 # const char *
-       jg      sys_symlinkat
-
-ENTRY(sys_readlinkat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       llgtr   %r4,%r4                 # char *
-       lgfr    %r5,%r5                 # int
-       jg      sys_readlinkat
-
-ENTRY(sys_fchmodat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       llgfr   %r4,%r4                 # mode_t
-       jg      sys_fchmodat
-
-ENTRY(sys_faccessat_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char *
-       lgfr    %r4,%r4                 # int
-       jg      sys_faccessat
-
-ENTRY(compat_sys_pselect6_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # fd_set *
-       llgtr   %r4,%r4                 # fd_set *
-       llgtr   %r5,%r5                 # fd_set *
-       llgtr   %r6,%r6                 # struct timespec *
-       llgt    %r0,164(%r15)           # void *
-       stg     %r0,160(%r15)
-       jg      compat_sys_pselect6
-
-ENTRY(compat_sys_ppoll_wrapper)
-       llgtr   %r2,%r2                 # struct pollfd *
-       llgfr   %r3,%r3                 # unsigned int
-       llgtr   %r4,%r4                 # struct timespec *
-       llgtr   %r5,%r5                 # const sigset_t *
-       llgfr   %r6,%r6                 # size_t
-       jg      compat_sys_ppoll
-
-ENTRY(sys_unshare_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       jg      sys_unshare
-
-ENTRY(sys_splice_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # loff_t *
-       lgfr    %r4,%r4                 # int
-       llgtr   %r5,%r5                 # loff_t *
-       llgfr   %r6,%r6                 # size_t
-       llgf    %r0,164(%r15)           # unsigned int
-       stg     %r0,160(%r15)
-       jg      sys_splice
-
-ENTRY(sys_sync_file_range_wrapper)
-       lgfr    %r2,%r2                 # int
-       sllg    %r3,%r3,32              # get high word of 64bit loff_t
-       or      %r3,%r4                 # get low word of 64bit loff_t
-       sllg    %r4,%r5,32              # get high word of 64bit loff_t
-       or      %r4,%r6                 # get low word of 64bit loff_t
-       llgf    %r5,164(%r15)           # unsigned int
-       jg      sys_sync_file_range
-
-ENTRY(sys_tee_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       llgfr   %r4,%r4                 # size_t
-       llgfr   %r5,%r5                 # unsigned int
-       jg      sys_tee
-
-ENTRY(sys_getcpu_wrapper)
-       llgtr   %r2,%r2                 # unsigned *
-       llgtr   %r3,%r3                 # unsigned *
-       llgtr   %r4,%r4                 # struct getcpu_cache *
-       jg      sys_getcpu
-
-ENTRY(compat_sys_utimes_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # struct compat_timeval *
-       jg      compat_sys_utimes
-
-ENTRY(compat_sys_utimensat_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgtr   %r3,%r3                 # char *
-       llgtr   %r4,%r4                 # struct compat_timespec *
-       lgfr    %r5,%r5                 # int
-       jg      compat_sys_utimensat
-
-ENTRY(sys_eventfd_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       jg      sys_eventfd
-
-ENTRY(sys_fallocate_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       sllg    %r4,%r4,32              # get high word of 64bit loff_t
-       lr      %r4,%r5                 # get low word of 64bit loff_t
-       sllg    %r5,%r6,32              # get high word of 64bit loff_t
-       l       %r5,164(%r15)           # get low word of 64bit loff_t
-       jg      sys_fallocate
-
-ENTRY(sys_timerfd_create_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       jg      sys_timerfd_create
-
-ENTRY(sys_eventfd2_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       lgfr    %r3,%r3                 # int
-       jg      sys_eventfd2
-
-ENTRY(sys_inotify_init1_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_inotify_init1
-
-ENTRY(sys_pipe2_wrapper)
-       llgtr   %r2,%r2                 # u32 *
-       lgfr    %r3,%r3                 # int
-       jg      sys_pipe2               # branch to system call
-
-ENTRY(sys_dup3_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned int
-       lgfr    %r4,%r4                 # int
-       jg      sys_dup3                # branch to system call
-
-ENTRY(sys_epoll_create1_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_epoll_create1       # branch to system call
-
-ENTRY(sys32_readahead_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgfr   %r3,%r3                 # u32
-       llgfr   %r4,%r4                 # u32
-       lgfr    %r5,%r5                 # s32
-       jg      sys32_readahead         # branch to system call
-
-ENTRY(sys_tkill_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       lgfr    %r3,%r3                 # int
-       jg      sys_tkill               # branch to system call
-
-ENTRY(sys_tgkill_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       lgfr    %r3,%r3                 # pid_t
-       lgfr    %r4,%r4                 # int
-       jg      sys_tgkill              # branch to system call
-
-ENTRY(compat_sys_keyctl_wrapper)
-       llgfr   %r2,%r2                 # u32
-       llgfr   %r3,%r3                 # u32
-       llgfr   %r4,%r4                 # u32
-       llgfr   %r5,%r5                 # u32
-       llgfr   %r6,%r6                 # u32
-       jg      compat_sys_keyctl       # branch to system call
-
-ENTRY(sys_perf_event_open_wrapper)
-       llgtr   %r2,%r2                 # const struct perf_event_attr *
-       lgfr    %r3,%r3                 # pid_t
-       lgfr    %r4,%r4                 # int
-       lgfr    %r5,%r5                 # int
-       llgfr   %r6,%r6                 # unsigned long
-       jg      sys_perf_event_open     # branch to system call
-
-ENTRY(sys_clone_wrapper)
-       llgfr   %r2,%r2                 # unsigned long
-       llgfr   %r3,%r3                 # unsigned long
-       llgtr   %r4,%r4                 # int *
-       llgtr   %r5,%r5                 # int *
-       jg      sys_clone               # branch to system call
-
-ENTRY(sys32_execve_wrapper)
-       llgtr   %r2,%r2                 # char *
-       llgtr   %r3,%r3                 # compat_uptr_t *
-       llgtr   %r4,%r4                 # compat_uptr_t *
-       jg      compat_sys_execve       # branch to system call
-
-ENTRY(sys_fanotify_init_wrapper)
-       llgfr   %r2,%r2                 # unsigned int
-       llgfr   %r3,%r3                 # unsigned int
-       jg      sys_fanotify_init       # branch to system call
-
-ENTRY(sys_prlimit64_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       llgfr   %r3,%r3                 # unsigned int
-       llgtr   %r4,%r4                 # const struct rlimit64 __user *
-       llgtr   %r5,%r5                 # struct rlimit64 __user *
-       jg      sys_prlimit64           # branch to system call
-
-ENTRY(sys_name_to_handle_at_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char __user *
-       llgtr   %r4,%r4                 # struct file_handle __user *
-       llgtr   %r5,%r5                 # int __user *
-       lgfr    %r6,%r6                 # int
-       jg      sys_name_to_handle_at
-
-ENTRY(compat_sys_clock_adjtime_wrapper)
-       lgfr    %r2,%r2                 # clockid_t (int)
-       llgtr   %r3,%r3                 # struct compat_timex __user *
-       jg      compat_sys_clock_adjtime
-
-ENTRY(sys_syncfs_wrapper)
-       lgfr    %r2,%r2                 # int
-       jg      sys_syncfs
-
-ENTRY(sys_setns_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       jg      sys_setns
-
-ENTRY(compat_sys_process_vm_readv_wrapper)
-       lgfr    %r2,%r2                 # compat_pid_t
-       llgtr   %r3,%r3                 # struct compat_iovec __user *
-       llgfr   %r4,%r4                 # unsigned long
-       llgtr   %r5,%r5                 # struct compat_iovec __user *
-       llgfr   %r6,%r6                 # unsigned long
-       llgf    %r0,164(%r15)           # unsigned long
-       stg     %r0,160(%r15)
-       jg      compat_sys_process_vm_readv
-
-ENTRY(compat_sys_process_vm_writev_wrapper)
-       lgfr    %r2,%r2                 # compat_pid_t
-       llgtr   %r3,%r3                 # struct compat_iovec __user *
-       llgfr   %r4,%r4                 # unsigned long
-       llgtr   %r5,%r5                 # struct compat_iovec __user *
-       llgfr   %r6,%r6                 # unsigned long
-       llgf    %r0,164(%r15)           # unsigned long
-       stg     %r0,160(%r15)
-       jg      compat_sys_process_vm_writev
-
-ENTRY(sys_s390_runtime_instr_wrapper)
-       lgfr    %r2,%r2                 # int
-       lgfr    %r3,%r3                 # int
-       jg      sys_s390_runtime_instr
-
-ENTRY(sys_kcmp_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       lgfr    %r3,%r3                 # pid_t
-       lgfr    %r4,%r4                 # int
-       llgfr   %r5,%r5                 # unsigned long
-       llgfr   %r6,%r6                 # unsigned long
-       jg      sys_kcmp
-
-ENTRY(sys_finit_module_wrapper)
-       lgfr    %r2,%r2                 # int
-       llgtr   %r3,%r3                 # const char __user *
-       lgfr    %r4,%r4                 # int
-       jg      sys_finit_module
-
-ENTRY(sys_sched_setattr_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       llgtr   %r3,%r3                 # struct sched_attr __user *
-       jg      sys_sched_setattr
-
-ENTRY(sys_sched_getattr_wrapper)
-       lgfr    %r2,%r2                 # pid_t
-       llgtr   %r3,%r3                 # const char __user *
-       llgfr   %r4,%r4                 # unsigned int
-       jg      sys_sched_getattr
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
new file mode 100644 (file)
index 0000000..824c39d
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ *  Compat sytem call wrappers.
+ *
+ *    Copyright IBM Corp. 2014
+ */
+
+#include <linux/syscalls.h>
+#include <linux/compat.h>
+#include "entry.h"
+
+#define COMPAT_SYSCALL_WRAP1(name, ...) \
+       COMPAT_SYSCALL_WRAPx(1, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP2(name, ...) \
+       COMPAT_SYSCALL_WRAPx(2, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP3(name, ...) \
+       COMPAT_SYSCALL_WRAPx(3, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP4(name, ...) \
+       COMPAT_SYSCALL_WRAPx(4, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP5(name, ...) \
+       COMPAT_SYSCALL_WRAPx(5, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_WRAP6(name, ...) \
+       COMPAT_SYSCALL_WRAPx(6, _##name, __VA_ARGS__)
+
+#define __SC_COMPAT_TYPE(t, a) \
+       __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a
+
+#define __SC_COMPAT_CAST(t, a)                                         \
+({                                                                     \
+       long __ReS = a;                                                 \
+                                                                       \
+       BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) &&              \
+                    !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t));            \
+       if (__TYPE_IS_L(t))                                             \
+               __ReS = (s32)a;                                         \
+       if (__TYPE_IS_UL(t))                                            \
+               __ReS = (u32)a;                                         \
+       if (__TYPE_IS_PTR(t))                                           \
+               __ReS = a & 0x7fffffff;                                 \
+       (t)__ReS;                                                       \
+})
+
+/*
+ * The COMPAT_SYSCALL_WRAP macro generates system call wrappers to be used by
+ * compat tasks. These wrappers will only be used for system calls where only
+ * the system call arguments need sign or zero extension or zeroing of the upper
+ * 33 bits of pointers.
+ * Note: since the wrapper function will afterwards call a system call which
+ * again performs zero and sign extension for all system call arguments with
+ * a size of less than eight bytes, these compat wrappers only touch those
+ * system call arguments with a size of eight bytes ((unsigned) long and
+ * pointers). Zero and sign extension for e.g. int parameters will be done by
+ * the regular system call wrappers.
+ */
+#define COMPAT_SYSCALL_WRAPx(x, name, ...)                                     \
+       asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));              \
+       asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\
+       asmlinkage long compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \
+       {                                                                       \
+               return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__));        \
+       }
+
+COMPAT_SYSCALL_WRAP1(exit, int, error_code);
+COMPAT_SYSCALL_WRAP1(close, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode);
+COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname);
+COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname);
+COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename);
+COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev);
+COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode);
+COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name);
+COMPAT_SYSCALL_WRAP1(alarm, unsigned int, seconds);
+COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode);
+COMPAT_SYSCALL_WRAP1(nice, int, increment);
+COMPAT_SYSCALL_WRAP2(kill, int, pid, int, sig);
+COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname);
+COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode);
+COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname);
+COMPAT_SYSCALL_WRAP1(dup, unsigned int, fildes);
+COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes);
+COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk);
+COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler);
+COMPAT_SYSCALL_WRAP1(acct, const char __user *, name);
+COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags);
+COMPAT_SYSCALL_WRAP2(setpgid, pid_t, pid, pid_t, pgid);
+COMPAT_SYSCALL_WRAP1(umask, int, mask);
+COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename);
+COMPAT_SYSCALL_WRAP2(dup2, unsigned int, oldfd, unsigned int, newfd);
+COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask);
+COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len);
+COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new);
+COMPAT_SYSCALL_WRAP3(readlink, const char __user *, path, char __user *, buf, int, bufsiz);
+COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library);
+COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags);
+COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg);
+COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len);
+COMPAT_SYSCALL_WRAP2(fchmod, unsigned int, fd, umode_t, mode);
+COMPAT_SYSCALL_WRAP2(getpriority, int, which, int, who);
+COMPAT_SYSCALL_WRAP3(setpriority, int, which, int, who, int, niceval);
+COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len);
+COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile);
+COMPAT_SYSCALL_WRAP1(fsync, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len);
+COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name);
+COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot);
+COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs);
+COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags);
+COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr);
+COMPAT_SYSCALL_WRAP1(getpgid, pid_t, pid);
+COMPAT_SYSCALL_WRAP1(fchdir, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data);
+COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2);
+COMPAT_SYSCALL_WRAP1(s390_personality, unsigned int, personality);
+COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence);
+COMPAT_SYSCALL_WRAP2(flock, unsigned int, fd, unsigned int, cmd);
+COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags);
+COMPAT_SYSCALL_WRAP1(getsid, pid_t, pid);
+COMPAT_SYSCALL_WRAP1(fdatasync, unsigned int, fd);
+COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len);
+COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len);
+COMPAT_SYSCALL_WRAP1(mlockall, int, flags);
+COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param);
+COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param);
+COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param);
+COMPAT_SYSCALL_WRAP1(sched_getscheduler, pid_t, pid);
+COMPAT_SYSCALL_WRAP1(sched_get_priority_max, int, policy);
+COMPAT_SYSCALL_WRAP1(sched_get_priority_min, int, policy);
+COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr);
+COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout);
+COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5);
+COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size);
+COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr);
+COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data);
+COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group);
+COMPAT_SYSCALL_WRAP2(setreuid, uid_t, ruid, uid_t, euid);
+COMPAT_SYSCALL_WRAP2(setregid, gid_t, rgid, gid_t, egid);
+COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist);
+COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist);
+COMPAT_SYSCALL_WRAP3(fchown, unsigned int, fd, uid_t, user, gid_t, group);
+COMPAT_SYSCALL_WRAP3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid);
+COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid);
+COMPAT_SYSCALL_WRAP3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid);
+COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid);
+COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group);
+COMPAT_SYSCALL_WRAP1(setuid, uid_t, uid);
+COMPAT_SYSCALL_WRAP1(setgid, gid_t, gid);
+COMPAT_SYSCALL_WRAP1(setfsuid, uid_t, uid);
+COMPAT_SYSCALL_WRAP1(setfsgid, gid_t, gid);
+COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old);
+COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec);
+COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior);
+COMPAT_SYSCALL_WRAP5(setxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags);
+COMPAT_SYSCALL_WRAP5(lsetxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags);
+COMPAT_SYSCALL_WRAP5(fsetxattr, int, fd, const char __user *, name, const void __user *, value, size_t, size, int, flags);
+COMPAT_SYSCALL_WRAP3(getdents64, unsigned int, fd, struct linux_dirent64 __user *, dirent, unsigned int, count);
+COMPAT_SYSCALL_WRAP4(getxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size);
+COMPAT_SYSCALL_WRAP4(lgetxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size);
+COMPAT_SYSCALL_WRAP4(fgetxattr, int, fd, const char __user *, name, void __user *, value, size_t, size);
+COMPAT_SYSCALL_WRAP3(listxattr, const char __user *, path, char __user *, list, size_t, size);
+COMPAT_SYSCALL_WRAP3(llistxattr, const char __user *, path, char __user *, list, size_t, size);
+COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size);
+COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name);
+COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name);
+COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name);
+COMPAT_SYSCALL_WRAP1(exit_group, int, error_code);
+COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr);
+COMPAT_SYSCALL_WRAP1(epoll_create, int, size);
+COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event);
+COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout);
+COMPAT_SYSCALL_WRAP1(timer_getoverrun, timer_t, timer_id);
+COMPAT_SYSCALL_WRAP1(timer_delete, compat_timer_t, compat_timer_id);
+COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx);
+COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result);
+COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name);
+COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id);
+COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id);
+COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags);
+COMPAT_SYSCALL_WRAP3(ioprio_set, int, which, int, who, int, ioprio);
+COMPAT_SYSCALL_WRAP2(ioprio_get, int, which, int, who);
+COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask);
+COMPAT_SYSCALL_WRAP2(inotify_rm_watch, int, fd, __s32, wd);
+COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode);
+COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev);
+COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag);
+COMPAT_SYSCALL_WRAP3(unlinkat, int, dfd, const char __user *, pathname, int, flag);
+COMPAT_SYSCALL_WRAP4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname);
+COMPAT_SYSCALL_WRAP5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags);
+COMPAT_SYSCALL_WRAP3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname);
+COMPAT_SYSCALL_WRAP4(readlinkat, int, dfd, const char __user *, path, char __user *, buf, int, bufsiz);
+COMPAT_SYSCALL_WRAP3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode);
+COMPAT_SYSCALL_WRAP3(faccessat, int, dfd, const char __user *, filename, int, mode);
+COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags);
+COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags);
+COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache);
+COMPAT_SYSCALL_WRAP1(eventfd, unsigned int, count);
+COMPAT_SYSCALL_WRAP2(timerfd_create, int, clockid, int, flags);
+COMPAT_SYSCALL_WRAP2(eventfd2, unsigned int, count, int, flags);
+COMPAT_SYSCALL_WRAP1(inotify_init1, int, flags);
+COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags);
+COMPAT_SYSCALL_WRAP3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags);
+COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags);
+COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig);
+COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig);
+COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags);
+COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val);
+COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags);
+COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim);
+COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag);
+COMPAT_SYSCALL_WRAP1(syncfs, int, fd);
+COMPAT_SYSCALL_WRAP2(setns, int, fd, int, nstype);
+COMPAT_SYSCALL_WRAP2(s390_runtime_instr, int, command, int, signum);
+COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2);
+COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags);
+COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
+COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags);
index fca20b5fe79e085e4795861edf7f4bb5736c67cb..6b594439cca5a68fa3fd8ece0484cc0f8b9d11bd 100644 (file)
@@ -380,8 +380,6 @@ static __init void detect_machine_facilities(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
        if (test_facility(3))
                S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
-       if (test_facility(27))
-               S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
        if (test_facility(40))
                S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
        if (test_facility(50) && test_facility(73))
index 0dc2b6d0a1ec8557f7450d5fbd255d2758bf8512..526d3735ed29050d317ef1327039397a4594d71f 100644 (file)
@@ -43,6 +43,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
                 _TIF_MCCK_PENDING)
 _TIF_TRACE    = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
                 _TIF_SYSCALL_TRACEPOINT)
+_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
 
 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
@@ -159,10 +160,12 @@ ENTRY(__switch_to)
        lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
        mvc     __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
        l       %r15,__THREAD_ksp(%r3)          # load kernel stack of next
-       tm      __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
+       lhi     %r6,_TIF_TRANSFER               # transfer TIF bits
+       n       %r6,__TI_flags(%r4)             # isolate TIF bits
        jz      0f
-       ni      __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
-       oi      __TI_flags+3(%r5),_TIF_MCCK_PENDING     # set it in next
+       o       %r6,__TI_flags(%r5)             # set TIF bits of next
+       st      %r6,__TI_flags(%r5)
+       ni      __TI_flags+3(%r4),255-_TIF_TRANSFER # clear TIF bits of prev
 0:     lm      %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        br      %r14
 
index cb533f78c09ed5795ee365eec00374c5db662a9c..6ac78192455f386aa72a15bdd2b73c55f844a5b5 100644 (file)
@@ -67,9 +67,7 @@ struct s390_mmap_arg_struct;
 struct fadvise64_64_args;
 struct old_sigaction;
 
-long sys_sigreturn(void);
-long sys_rt_sigreturn(void);
-long sys32_sigreturn(void);
-long sys32_rt_sigreturn(void);
+long sys_s390_personality(unsigned int personality);
+long sys_s390_runtime_instr(int command, int signum);
 
 #endif /* _ENTRY_H */
index 384e609b47110dc59c7a96cfa419e864cca86b2a..e09dbe5f29015a9fc794cddb2f7ff57053594cdc 100644 (file)
@@ -48,6 +48,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
                 _TIF_MCCK_PENDING)
 _TIF_TRACE    = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
                 _TIF_SYSCALL_TRACEPOINT)
+_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
 
 #define BASED(name) name-system_call(%r13)
 
@@ -189,10 +190,12 @@ ENTRY(__switch_to)
        lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
        mvc     __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
        lg      %r15,__THREAD_ksp(%r3)          # load kernel stack of next
-       tm      __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
+       llill   %r6,_TIF_TRANSFER               # transfer TIF bits
+       ng      %r6,__TI_flags(%r4)             # isolate TIF bits
        jz      0f
-       ni      __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
-       oi      __TI_flags+7(%r5),_TIF_MCCK_PENDING     # set it in next
+       og      %r6,__TI_flags(%r5)             # set TIF bits of next
+       stg     %r6,__TI_flags(%r5)
+       ni      __TI_flags+7(%r4),255-_TIF_TRANSFER # clear TIF bits of prev
 0:     lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        br      %r14
 
index bb27a262c44aa8076dfe7eb9cb5a243d8c315079..a770be97db4da7c513e7952d0360c85a5d5154bb 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
+#include <linux/irq.h>
 #include <asm/irq_regs.h>
 #include <asm/cputime.h>
 #include <asm/lowcore.h>
index 5d2dfa31c4efad44028d41b60b6d43631ecaedc6..61595c1f0a0fe7d502bb6c213519929effe48b02 100644 (file)
@@ -121,7 +121,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
                               : PERF_RECORD_MISC_KERNEL;
 }
 
-void print_debug_cf(void)
+static void print_debug_cf(void)
 {
        struct cpumf_ctr_info cf_info;
        int cpu = smp_processor_id();
index f6be6087a0e98edb3d2917228cb65b4657b61d11..4ac8fafec95fa87d0b680c6222ac3c9e9f06700a 100644 (file)
@@ -85,7 +85,10 @@ void update_cr_regs(struct task_struct *task)
 
        /* merge TIF_SINGLE_STEP into user specified PER registers. */
        if (test_tsk_thread_flag(task, TIF_SINGLE_STEP)) {
-               new.control |= PER_EVENT_IFETCH;
+               if (test_tsk_thread_flag(task, TIF_BLOCK_STEP))
+                       new.control |= PER_EVENT_BRANCH;
+               else
+                       new.control |= PER_EVENT_IFETCH;
 #ifdef CONFIG_64BIT
                new.control |= PER_CONTROL_SUSPENSION;
                new.control |= PER_EVENT_TRANSACTION_END;
@@ -107,14 +110,22 @@ void update_cr_regs(struct task_struct *task)
 
 void user_enable_single_step(struct task_struct *task)
 {
+       clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
        set_tsk_thread_flag(task, TIF_SINGLE_STEP);
 }
 
 void user_disable_single_step(struct task_struct *task)
 {
+       clear_tsk_thread_flag(task, TIF_BLOCK_STEP);
        clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
 }
 
+void user_enable_block_step(struct task_struct *task)
+{
+       set_tsk_thread_flag(task, TIF_SINGLE_STEP);
+       set_tsk_thread_flag(task, TIF_BLOCK_STEP);
+}
+
 /*
  * Called by kernel/ptrace.c when detaching..
  *
index 09e2f468f48bc874e61c3d12b9d751b04c5f0126..f70f2489fa5fe241fd107596d2879d0fe331f7bc 100644 (file)
@@ -47,7 +47,6 @@
 #include <linux/compat.h>
 
 #include <asm/ipl.h>
-#include <asm/uaccess.h>
 #include <asm/facility.h>
 #include <asm/smp.h>
 #include <asm/mmu_context.h>
 #include <asm/sclp.h>
 #include "entry.h"
 
-/*
- * User copy operations.
- */
-struct uaccess_ops uaccess;
-EXPORT_SYMBOL(uaccess);
-
 /*
  * Machine setup..
  */
@@ -294,14 +287,6 @@ static int __init parse_vmalloc(char *arg)
 }
 early_param("vmalloc", parse_vmalloc);
 
-static int __init early_parse_user_mode(char *p)
-{
-       if (!p || strcmp(p, "primary") == 0)
-               return 0;
-       return 1;
-}
-early_param("user_mode", early_parse_user_mode);
-
 void *restart_stack __attribute__((__section__(".data")));
 
 static void __init setup_lowcore(void)
@@ -1009,8 +994,6 @@ void __init setup_arch(char **cmdline_p)
        init_mm.end_data = (unsigned long) &_edata;
        init_mm.brk = (unsigned long) &_end;
 
-       uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
-
        parse_early_param();
        detect_memory_layout(memory_chunk, memory_end);
        os_info_init();
index a7125b62a9a6c1f77289a0d834358fea1971cb87..8827883310ddbc05c765f17b2125e804dc2cd85e 100644 (file)
@@ -773,11 +773,11 @@ void __noreturn cpu_die(void)
 
 void __init smp_fill_possible_mask(void)
 {
-       unsigned int possible, cpu;
+       unsigned int possible, sclp, cpu;
 
-       possible = setup_possible_cpus;
-       if (!possible)
-               possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
+       sclp = sclp_get_max_cpu() ?: nr_cpu_ids;
+       possible = setup_possible_cpus ?: nr_cpu_ids;
+       possible = min(possible, sclp);
        for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
                set_cpu_possible(cpu, true);
 }
index 143992152ec95d7c2b96c1978d1bae78da480efb..542ef488bac176fb0b3a1efed9e34e2e2af0c730 100644 (file)
 #define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
 
 NI_SYSCALL                                                     /* 0 */
-SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
+SYSCALL(sys_exit,sys_exit,compat_sys_exit)
 SYSCALL(sys_fork,sys_fork,sys_fork)
-SYSCALL(sys_read,sys_read,sys32_read_wrapper)
-SYSCALL(sys_write,sys_write,sys32_write_wrapper)
+SYSCALL(sys_read,sys_read,compat_sys_s390_read)
+SYSCALL(sys_write,sys_write,compat_sys_s390_write)
 SYSCALL(sys_open,sys_open,compat_sys_open)                     /* 5 */
-SYSCALL(sys_close,sys_close,sys32_close_wrapper)
+SYSCALL(sys_close,sys_close,compat_sys_close)
 SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
-SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
-SYSCALL(sys_link,sys_link,sys32_link_wrapper)
-SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper)            /* 10 */
-SYSCALL(sys_execve,sys_execve,sys32_execve_wrapper)
-SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
-SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper)            /* old time syscall */
-SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
-SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper)               /* 15 */
-SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper)    /* old lchown16 syscall*/
+SYSCALL(sys_creat,sys_creat,compat_sys_creat)
+SYSCALL(sys_link,sys_link,compat_sys_link)
+SYSCALL(sys_unlink,sys_unlink,compat_sys_unlink)               /* 10 */
+SYSCALL(sys_execve,sys_execve,compat_sys_execve)
+SYSCALL(sys_chdir,sys_chdir,compat_sys_chdir)
+SYSCALL(sys_time,sys_ni_syscall,compat_sys_time)               /* old time syscall */
+SYSCALL(sys_mknod,sys_mknod,compat_sys_mknod)
+SYSCALL(sys_chmod,sys_chmod,compat_sys_chmod)                  /* 15 */
+SYSCALL(sys_lchown16,sys_ni_syscall,compat_sys_s390_lchown16)  /* old lchown16 syscall*/
 NI_SYSCALL                                                     /* old break syscall holder */
 NI_SYSCALL                                                     /* old stat syscall holder */
 SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
 SYSCALL(sys_getpid,sys_getpid,sys_getpid)                      /* 20 */
-SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
-SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
-SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper)    /* old setuid16 syscall*/
-SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16)            /* old getuid16 syscall*/
-SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper)          /* 25 old stime syscall */
-SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper)
-SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper)
+SYSCALL(sys_mount,sys_mount,compat_sys_mount)
+SYSCALL(sys_oldumount,sys_oldumount,compat_sys_oldumount)
+SYSCALL(sys_setuid16,sys_ni_syscall,compat_sys_s390_setuid16)  /* old setuid16 syscall*/
+SYSCALL(sys_getuid16,sys_ni_syscall,compat_sys_s390_getuid16)  /* old getuid16 syscall*/
+SYSCALL(sys_stime,sys_ni_syscall,compat_sys_stime)             /* 25 old stime syscall */
+SYSCALL(sys_ptrace,sys_ptrace,compat_sys_ptrace)
+SYSCALL(sys_alarm,sys_alarm,compat_sys_alarm)
 NI_SYSCALL                                                     /* old fstat syscall */
 SYSCALL(sys_pause,sys_pause,sys_pause)
-SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper)          /* 30 */
+SYSCALL(sys_utime,sys_utime,compat_sys_utime)          /* 30 */
 NI_SYSCALL                                                     /* old stty syscall */
 NI_SYSCALL                                                     /* old gtty syscall */
-SYSCALL(sys_access,sys_access,sys32_access_wrapper)
-SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper)
+SYSCALL(sys_access,sys_access,compat_sys_access)
+SYSCALL(sys_nice,sys_nice,compat_sys_nice)
 NI_SYSCALL                                                     /* 35 old ftime syscall */
 SYSCALL(sys_sync,sys_sync,sys_sync)
-SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper)
-SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper)
-SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper)
-SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper)               /* 40 */
-SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper)
-SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper)
-SYSCALL(sys_times,sys_times,compat_sys_times_wrapper)
+SYSCALL(sys_kill,sys_kill,compat_sys_kill)
+SYSCALL(sys_rename,sys_rename,compat_sys_rename)
+SYSCALL(sys_mkdir,sys_mkdir,compat_sys_mkdir)
+SYSCALL(sys_rmdir,sys_rmdir,compat_sys_rmdir)          /* 40 */
+SYSCALL(sys_dup,sys_dup,compat_sys_dup)
+SYSCALL(sys_pipe,sys_pipe,compat_sys_pipe)
+SYSCALL(sys_times,sys_times,compat_sys_times)
 NI_SYSCALL                                                     /* old prof syscall */
-SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper)                     /* 45 */
-SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper)    /* old setgid16 syscall*/
-SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16)            /* old getgid16 syscall*/
-SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper)
-SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16)          /* old geteuid16 syscall */
-SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16)          /* 50 old getegid16 syscall */
-SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper)
-SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper)
+SYSCALL(sys_brk,sys_brk,compat_sys_brk)                                /* 45 */
+SYSCALL(sys_setgid16,sys_ni_syscall,compat_sys_s390_setgid16)  /* old setgid16 syscall*/
+SYSCALL(sys_getgid16,sys_ni_syscall,compat_sys_s390_getgid16)  /* old getgid16 syscall*/
+SYSCALL(sys_signal,sys_signal,compat_sys_signal)
+SYSCALL(sys_geteuid16,sys_ni_syscall,compat_sys_s390_geteuid16)        /* old geteuid16 syscall */
+SYSCALL(sys_getegid16,sys_ni_syscall,compat_sys_s390_getegid16)        /* 50 old getegid16 syscall */
+SYSCALL(sys_acct,sys_acct,compat_sys_acct)
+SYSCALL(sys_umount,sys_umount,compat_sys_umount)
 NI_SYSCALL                                                     /* old lock syscall */
-SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper)
-SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper)          /* 55 */
+SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl)
+SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl)          /* 55 */
 NI_SYSCALL                                                     /* intel mpx syscall */
-SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper)
+SYSCALL(sys_setpgid,sys_setpgid,compat_sys_setpgid)
 NI_SYSCALL                                                     /* old ulimit syscall */
 NI_SYSCALL                                                     /* old uname syscall */
-SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper)               /* 60 */
-SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper)
-SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper)
-SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper)
+SYSCALL(sys_umask,sys_umask,compat_sys_umask)                  /* 60 */
+SYSCALL(sys_chroot,sys_chroot,compat_sys_chroot)
+SYSCALL(sys_ustat,sys_ustat,compat_sys_ustat)
+SYSCALL(sys_dup2,sys_dup2,compat_sys_dup2)
 SYSCALL(sys_getppid,sys_getppid,sys_getppid)
 SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp)                   /* 65 */
 SYSCALL(sys_setsid,sys_setsid,sys_setsid)
 SYSCALL(sys_sigaction,sys_sigaction,compat_sys_sigaction)
 NI_SYSCALL                                                     /* old sgetmask syscall*/
 NI_SYSCALL                                                     /* old ssetmask syscall*/
-SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper)        /* old setreuid16 syscall */
-SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper)        /* old setregid16 syscall */
-SYSCALL(sys_sigsuspend,sys_sigsuspend,sys_sigsuspend_wrapper)
-SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper)
-SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper)
-SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper)      /* 75 */
-SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper)
+SYSCALL(sys_setreuid16,sys_ni_syscall,compat_sys_s390_setreuid16) /* old setreuid16 syscall */
+SYSCALL(sys_setregid16,sys_ni_syscall,compat_sys_s390_setregid16) /* old setregid16 syscall */
+SYSCALL(sys_sigsuspend,sys_sigsuspend,compat_sys_sigsuspend)
+SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending)
+SYSCALL(sys_sethostname,sys_sethostname,compat_sys_sethostname)
+SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit)      /* 75 */
+SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit)
 SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage)
-SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday_wrapper)
-SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday_wrapper)
-SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper)      /* 80 old getgroups16 syscall */
-SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper)      /* old setgroups16 syscall */
+SYSCALL(sys_gettimeofday,sys_gettimeofday,compat_sys_gettimeofday)
+SYSCALL(sys_settimeofday,sys_settimeofday,compat_sys_settimeofday)
+SYSCALL(sys_getgroups16,sys_ni_syscall,compat_sys_s390_getgroups16)    /* 80 old getgroups16 syscall */
+SYSCALL(sys_setgroups16,sys_ni_syscall,compat_sys_s390_setgroups16)    /* old setgroups16 syscall */
 NI_SYSCALL                                                     /* old select syscall */
-SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper)
+SYSCALL(sys_symlink,sys_symlink,compat_sys_symlink)
 NI_SYSCALL                                                     /* old lstat syscall */
-SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper)      /* 85 */
-SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
-SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
-SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
-SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper)   /* old readdir syscall */
-SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper)          /* 90 */
-SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
+SYSCALL(sys_readlink,sys_readlink,compat_sys_readlink)         /* 85 */
+SYSCALL(sys_uselib,sys_uselib,compat_sys_uselib)
+SYSCALL(sys_swapon,sys_swapon,compat_sys_swapon)
+SYSCALL(sys_reboot,sys_reboot,compat_sys_reboot)
+SYSCALL(sys_ni_syscall,sys_ni_syscall,compat_sys_old_readdir)  /* old readdir syscall */
+SYSCALL(sys_old_mmap,sys_old_mmap,compat_sys_s390_old_mmap)    /* 90 */
+SYSCALL(sys_munmap,sys_munmap,compat_sys_munmap)
 SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
 SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
-SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
-SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper)    /* 95 old fchown16 syscall*/
-SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
-SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper)
+SYSCALL(sys_fchmod,sys_fchmod,compat_sys_fchmod)
+SYSCALL(sys_fchown16,sys_ni_syscall,compat_sys_s390_fchown16)  /* 95 old fchown16 syscall*/
+SYSCALL(sys_getpriority,sys_getpriority,compat_sys_getpriority)
+SYSCALL(sys_setpriority,sys_setpriority,compat_sys_setpriority)
 NI_SYSCALL                                                     /* old profil syscall */
-SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper)
-SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper)    /* 100 */
+SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs)
+SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs)    /* 100 */
 NI_SYSCALL                                                     /* ioperm for i386 */
-SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper)
-SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper)
+SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall)
+SYSCALL(sys_syslog,sys_syslog,compat_sys_syslog)
 SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer)
 SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer)      /* 105 */
-SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
-SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
-SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
+SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat)
+SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat)
+SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat)
 NI_SYSCALL                                                     /* old uname syscall */
 SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,compat_sys_lookup_dcookie)       /* 110 */
 SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
 NI_SYSCALL                                                     /* old "idle" system call */
 NI_SYSCALL                                                     /* vm86old for i386 */
 SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4)
-SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper)         /* 115 */
-SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
+SYSCALL(sys_swapoff,sys_swapoff,compat_sys_swapoff)            /* 115 */
+SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo)
 SYSCALL(sys_s390_ipc,sys_s390_ipc,compat_sys_s390_ipc)
-SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
-SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
-SYSCALL(sys_clone,sys_clone,sys_clone_wrapper)                 /* 120 */
-SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
-SYSCALL(sys_newuname,sys_newuname,sys32_newuname_wrapper)
+SYSCALL(sys_fsync,sys_fsync,compat_sys_fsync)
+SYSCALL(sys_sigreturn,sys_sigreturn,compat_sys_sigreturn)
+SYSCALL(sys_clone,sys_clone,compat_sys_clone)                  /* 120 */
+SYSCALL(sys_setdomainname,sys_setdomainname,compat_sys_setdomainname)
+SYSCALL(sys_newuname,sys_newuname,compat_sys_newuname)
 NI_SYSCALL                                                     /* modify_ldt for i386 */
-SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
-SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper)      /* 125 */
+SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex)
+SYSCALL(sys_mprotect,sys_mprotect,compat_sys_mprotect)         /* 125 */
 SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask)
 NI_SYSCALL                                                     /* old "create module" */
-SYSCALL(sys_init_module,sys_init_module,sys_init_module_wrapper)
-SYSCALL(sys_delete_module,sys_delete_module,sys_delete_module_wrapper)
+SYSCALL(sys_init_module,sys_init_module,compat_sys_init_module)
+SYSCALL(sys_delete_module,sys_delete_module,compat_sys_delete_module)
 NI_SYSCALL                                                     /* 130: old get_kernel_syms */
-SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper)
-SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
-SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
-SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
-SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper)               /* 135 */
-SYSCALL(sys_personality,sys_s390_personality,sys32_personality_wrapper)
+SYSCALL(sys_quotactl,sys_quotactl,compat_sys_quotactl)
+SYSCALL(sys_getpgid,sys_getpgid,compat_sys_getpgid)
+SYSCALL(sys_fchdir,sys_fchdir,compat_sys_fchdir)
+SYSCALL(sys_bdflush,sys_bdflush,compat_sys_bdflush)
+SYSCALL(sys_sysfs,sys_sysfs,compat_sys_sysfs)          /* 135 */
+SYSCALL(sys_personality,sys_s390_personality,compat_sys_s390_personality)
 NI_SYSCALL                                                     /* for afs_syscall */
-SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper)        /* old setfsuid16 syscall */
-SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper)        /* old setfsgid16 syscall */
-SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper)            /* 140 */
-SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper)
-SYSCALL(sys_select,sys_select,compat_sys_select_wrapper)
-SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper)
-SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper)
-SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper)          /* 145 */
-SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
-SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
-SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
+SYSCALL(sys_setfsuid16,sys_ni_syscall,compat_sys_s390_setfsuid16)      /* old setfsuid16 syscall */
+SYSCALL(sys_setfsgid16,sys_ni_syscall,compat_sys_s390_setfsgid16)      /* old setfsgid16 syscall */
+SYSCALL(sys_llseek,sys_llseek,compat_sys_llseek)               /* 140 */
+SYSCALL(sys_getdents,sys_getdents,compat_sys_getdents)
+SYSCALL(sys_select,sys_select,compat_sys_select)
+SYSCALL(sys_flock,sys_flock,compat_sys_flock)
+SYSCALL(sys_msync,sys_msync,compat_sys_msync)
+SYSCALL(sys_readv,sys_readv,compat_sys_readv)          /* 145 */
+SYSCALL(sys_writev,sys_writev,compat_sys_writev)
+SYSCALL(sys_getsid,sys_getsid,compat_sys_getsid)
+SYSCALL(sys_fdatasync,sys_fdatasync,compat_sys_fdatasync)
 SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl)
-SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper)               /* 150 */
-SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
-SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
+SYSCALL(sys_mlock,sys_mlock,compat_sys_mlock)                  /* 150 */
+SYSCALL(sys_munlock,sys_munlock,compat_sys_munlock)
+SYSCALL(sys_mlockall,sys_mlockall,compat_sys_mlockall)
 SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
-SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper)
-SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper)    /* 155 */
-SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper)
-SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper)
+SYSCALL(sys_sched_setparam,sys_sched_setparam,compat_sys_sched_setparam)
+SYSCALL(sys_sched_getparam,sys_sched_getparam,compat_sys_sched_getparam)       /* 155 */
+SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,compat_sys_sched_setscheduler)
+SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,compat_sys_sched_getscheduler)
 SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
-SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper)
-SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper)    /* 160 */
+SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,compat_sys_sched_get_priority_max)
+SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,compat_sys_sched_get_priority_min)       /* 160 */
 SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,compat_sys_sched_rr_get_interval)
-SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper)
-SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper)
-SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper)      /* old setresuid16 syscall */
-SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper)      /* 165 old getresuid16 syscall */
+SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep)
+SYSCALL(sys_mremap,sys_mremap,compat_sys_mremap)
+SYSCALL(sys_setresuid16,sys_ni_syscall,compat_sys_s390_setresuid16)    /* old setresuid16 syscall */
+SYSCALL(sys_getresuid16,sys_ni_syscall,compat_sys_s390_getresuid16)    /* 165 old getresuid16 syscall */
 NI_SYSCALL                                                     /* for vm86 */
 NI_SYSCALL                                                     /* old sys_query_module */
-SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
+SYSCALL(sys_poll,sys_poll,compat_sys_poll)
 NI_SYSCALL                                                     /* old nfsservctl */
-SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper)      /* 170 old setresgid16 syscall */
-SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper)      /* old getresgid16 syscall */
-SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
-SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,sys32_rt_sigreturn)
+SYSCALL(sys_setresgid16,sys_ni_syscall,compat_sys_s390_setresgid16)    /* 170 old setresgid16 syscall */
+SYSCALL(sys_getresgid16,sys_ni_syscall,compat_sys_s390_getresgid16)    /* old getresgid16 syscall */
+SYSCALL(sys_prctl,sys_prctl,compat_sys_prctl)
+SYSCALL(sys_rt_sigreturn,sys_rt_sigreturn,compat_sys_rt_sigreturn)
 SYSCALL(sys_rt_sigaction,sys_rt_sigaction,compat_sys_rt_sigaction)
 SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,compat_sys_rt_sigprocmask) /* 175 */
 SYSCALL(sys_rt_sigpending,sys_rt_sigpending,compat_sys_rt_sigpending)
 SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait)
 SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,compat_sys_rt_sigqueueinfo)
 SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend)
-SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper)         /* 180 */
-SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper)
-SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper)      /* old chown16 syscall */
-SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
-SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
-SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper)            /* 185 */
+SYSCALL(sys_pread64,sys_pread64,compat_sys_s390_pread64)               /* 180 */
+SYSCALL(sys_pwrite64,sys_pwrite64,compat_sys_s390_pwrite64)
+SYSCALL(sys_chown16,sys_ni_syscall,compat_sys_s390_chown16)    /* old chown16 syscall */
+SYSCALL(sys_getcwd,sys_getcwd,compat_sys_getcwd)
+SYSCALL(sys_capget,sys_capget,compat_sys_capget)
+SYSCALL(sys_capset,sys_capset,compat_sys_capset)               /* 185 */
 SYSCALL(sys_sigaltstack,sys_sigaltstack,compat_sys_sigaltstack)
 SYSCALL(sys_sendfile,sys_sendfile64,compat_sys_sendfile)
 NI_SYSCALL                                                     /* streams1 */
 NI_SYSCALL                                                     /* streams2 */
 SYSCALL(sys_vfork,sys_vfork,sys_vfork)                         /* 190 */
-SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
-SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
-SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
-SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper)
-SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper)                /* 195 */
-SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper)
-SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper)
-SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper)
+SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit)
+SYSCALL(sys_mmap2,sys_mmap2,compat_sys_s390_mmap2)
+SYSCALL(sys_truncate64,sys_ni_syscall,compat_sys_s390_truncate64)
+SYSCALL(sys_ftruncate64,sys_ni_syscall,compat_sys_s390_ftruncate64)
+SYSCALL(sys_stat64,sys_ni_syscall,compat_sys_s390_stat64)              /* 195 */
+SYSCALL(sys_lstat64,sys_ni_syscall,compat_sys_s390_lstat64)
+SYSCALL(sys_fstat64,sys_ni_syscall,compat_sys_s390_fstat64)
+SYSCALL(sys_lchown,sys_lchown,compat_sys_lchown)
 SYSCALL(sys_getuid,sys_getuid,sys_getuid)
 SYSCALL(sys_getgid,sys_getgid,sys_getgid)                      /* 200 */
 SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
 SYSCALL(sys_getegid,sys_getegid,sys_getegid)
-SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper)
-SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper)
-SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper)   /* 205 */
-SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper)
-SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper)
-SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper)
-SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper)
-SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper)   /* 210 */
-SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper)
-SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper)
-SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper)
-SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper)
-SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper)      /* 215 */
-SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper)
-SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper)
-SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper)
-SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
-SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper)        /* 220 */
-SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
-SYSCALL(sys_readahead,sys_readahead,sys32_readahead_wrapper)
+SYSCALL(sys_setreuid,sys_setreuid,compat_sys_setreuid)
+SYSCALL(sys_setregid,sys_setregid,compat_sys_setregid)
+SYSCALL(sys_getgroups,sys_getgroups,compat_sys_getgroups)      /* 205 */
+SYSCALL(sys_setgroups,sys_setgroups,compat_sys_setgroups)
+SYSCALL(sys_fchown,sys_fchown,compat_sys_fchown)
+SYSCALL(sys_setresuid,sys_setresuid,compat_sys_setresuid)
+SYSCALL(sys_getresuid,sys_getresuid,compat_sys_getresuid)
+SYSCALL(sys_setresgid,sys_setresgid,compat_sys_setresgid)      /* 210 */
+SYSCALL(sys_getresgid,sys_getresgid,compat_sys_getresgid)
+SYSCALL(sys_chown,sys_chown,compat_sys_chown)
+SYSCALL(sys_setuid,sys_setuid,compat_sys_setuid)
+SYSCALL(sys_setgid,sys_setgid,compat_sys_setgid)
+SYSCALL(sys_setfsuid,sys_setfsuid,compat_sys_setfsuid) /* 215 */
+SYSCALL(sys_setfsgid,sys_setfsgid,compat_sys_setfsgid)
+SYSCALL(sys_pivot_root,sys_pivot_root,compat_sys_pivot_root)
+SYSCALL(sys_mincore,sys_mincore,compat_sys_mincore)
+SYSCALL(sys_madvise,sys_madvise,compat_sys_madvise)
+SYSCALL(sys_getdents64,sys_getdents64,compat_sys_getdents64)   /* 220 */
+SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64)
+SYSCALL(sys_readahead,sys_readahead,compat_sys_s390_readahead)
 SYSCALL(sys_sendfile64,sys_ni_syscall,compat_sys_sendfile64)
-SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
-SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper)   /* 225 */
-SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
-SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper)
-SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper)
-SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper)
-SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper)   /* 230 */
-SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper)
-SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper)
-SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper)
-SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
-SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper)  /* 235 */
+SYSCALL(sys_setxattr,sys_setxattr,compat_sys_setxattr)
+SYSCALL(sys_lsetxattr,sys_lsetxattr,compat_sys_lsetxattr)      /* 225 */
+SYSCALL(sys_fsetxattr,sys_fsetxattr,compat_sys_fsetxattr)
+SYSCALL(sys_getxattr,sys_getxattr,compat_sys_getxattr)
+SYSCALL(sys_lgetxattr,sys_lgetxattr,compat_sys_lgetxattr)
+SYSCALL(sys_fgetxattr,sys_fgetxattr,compat_sys_fgetxattr)
+SYSCALL(sys_listxattr,sys_listxattr,compat_sys_listxattr)      /* 230 */
+SYSCALL(sys_llistxattr,sys_llistxattr,compat_sys_llistxattr)
+SYSCALL(sys_flistxattr,sys_flistxattr,compat_sys_flistxattr)
+SYSCALL(sys_removexattr,sys_removexattr,compat_sys_removexattr)
+SYSCALL(sys_lremovexattr,sys_lremovexattr,compat_sys_lremovexattr)
+SYSCALL(sys_fremovexattr,sys_fremovexattr,compat_sys_fremovexattr)     /* 235 */
 SYSCALL(sys_gettid,sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,sys_tkill,sys_tkill_wrapper)
+SYSCALL(sys_tkill,sys_tkill,compat_sys_tkill)
 SYSCALL(sys_futex,sys_futex,compat_sys_futex)
-SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
-SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper)   /* 240 */
-SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill_wrapper)
+SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,compat_sys_sched_setaffinity)
+SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,compat_sys_sched_getaffinity)      /* 240 */
+SYSCALL(sys_tgkill,sys_tgkill,compat_sys_tgkill)
 NI_SYSCALL                                                     /* reserved for TUX */
-SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper)
-SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper)
-SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper)  /* 245 */
-SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper)
-SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper)
-SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper)
-SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
-SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper)     /* 250 */
-SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
-SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
-SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
-SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
-SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper)       /* 255 */
-SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
-SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper)
-SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper)
-SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper)
-SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper)       /* 260 */
-SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
-SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
+SYSCALL(sys_io_setup,sys_io_setup,compat_sys_io_setup)
+SYSCALL(sys_io_destroy,sys_io_destroy,compat_sys_io_destroy)
+SYSCALL(sys_io_getevents,sys_io_getevents,compat_sys_io_getevents)     /* 245 */
+SYSCALL(sys_io_submit,sys_io_submit,compat_sys_io_submit)
+SYSCALL(sys_io_cancel,sys_io_cancel,compat_sys_io_cancel)
+SYSCALL(sys_exit_group,sys_exit_group,compat_sys_exit_group)
+SYSCALL(sys_epoll_create,sys_epoll_create,compat_sys_epoll_create)
+SYSCALL(sys_epoll_ctl,sys_epoll_ctl,compat_sys_epoll_ctl)      /* 250 */
+SYSCALL(sys_epoll_wait,sys_epoll_wait,compat_sys_epoll_wait)
+SYSCALL(sys_set_tid_address,sys_set_tid_address,compat_sys_set_tid_address)
+SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,compat_sys_s390_fadvise64)
+SYSCALL(sys_timer_create,sys_timer_create,compat_sys_timer_create)
+SYSCALL(sys_timer_settime,sys_timer_settime,compat_sys_timer_settime)  /* 255 */
+SYSCALL(sys_timer_gettime,sys_timer_gettime,compat_sys_timer_gettime)
+SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,compat_sys_timer_getoverrun)
+SYSCALL(sys_timer_delete,sys_timer_delete,compat_sys_timer_delete)
+SYSCALL(sys_clock_settime,sys_clock_settime,compat_sys_clock_settime)
+SYSCALL(sys_clock_gettime,sys_clock_gettime,compat_sys_clock_gettime)  /* 260 */
+SYSCALL(sys_clock_getres,sys_clock_getres,compat_sys_clock_getres)
+SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,compat_sys_clock_nanosleep)
 NI_SYSCALL                                                     /* reserved for vserver */
-SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
-SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
-SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
-SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
+SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,compat_sys_s390_fadvise64_64)
+SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64)
+SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64)
+SYSCALL(sys_remap_file_pages,sys_remap_file_pages,compat_sys_remap_file_pages)
 NI_SYSCALL                                                     /* 268 sys_mbind */
 NI_SYSCALL                                                     /* 269 sys_get_mempolicy */
 NI_SYSCALL                                                     /* 270 sys_set_mempolicy */
-SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper)
-SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper)
-SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper)
-SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper)
-SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */
-SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper)
-SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load_wrapper)
-SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
-SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
-SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl_wrapper)               /* 280 */
+SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open)
+SYSCALL(sys_mq_unlink,sys_mq_unlink,compat_sys_mq_unlink)
+SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend)
+SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive)
+SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify) /* 275 */
+SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr)
+SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load)
+SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key)
+SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key)
+SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl)               /* 280 */
 SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid)
-SYSCALL(sys_ioprio_set,sys_ioprio_set,sys_ioprio_set_wrapper)
-SYSCALL(sys_ioprio_get,sys_ioprio_get,sys_ioprio_get_wrapper)
+SYSCALL(sys_ioprio_set,sys_ioprio_set,compat_sys_ioprio_set)
+SYSCALL(sys_ioprio_get,sys_ioprio_get,compat_sys_ioprio_get)
 SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init)
-SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,sys_inotify_add_watch_wrapper)     /* 285 */
-SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,sys_inotify_rm_watch_wrapper)
+SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,compat_sys_inotify_add_watch)      /* 285 */
+SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
 NI_SYSCALL                                                     /* 287 sys_migrate_pages */
 SYSCALL(sys_openat,sys_openat,compat_sys_openat)
-SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper)
-SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper)   /* 290 */
-SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper)
-SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper)
-SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat64_wrapper)
-SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper)
-SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper)        /* 295 */
-SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper)
-SYSCALL(sys_symlinkat,sys_symlinkat,sys_symlinkat_wrapper)
-SYSCALL(sys_readlinkat,sys_readlinkat,sys_readlinkat_wrapper)
-SYSCALL(sys_fchmodat,sys_fchmodat,sys_fchmodat_wrapper)
-SYSCALL(sys_faccessat,sys_faccessat,sys_faccessat_wrapper)     /* 300 */
-SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6_wrapper)
-SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll_wrapper)
-SYSCALL(sys_unshare,sys_unshare,sys_unshare_wrapper)
+SYSCALL(sys_mkdirat,sys_mkdirat,compat_sys_mkdirat)
+SYSCALL(sys_mknodat,sys_mknodat,compat_sys_mknodat)    /* 290 */
+SYSCALL(sys_fchownat,sys_fchownat,compat_sys_fchownat)
+SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat)
+SYSCALL(sys_fstatat64,sys_newfstatat,compat_sys_s390_fstatat64)
+SYSCALL(sys_unlinkat,sys_unlinkat,compat_sys_unlinkat)
+SYSCALL(sys_renameat,sys_renameat,compat_sys_renameat) /* 295 */
+SYSCALL(sys_linkat,sys_linkat,compat_sys_linkat)
+SYSCALL(sys_symlinkat,sys_symlinkat,compat_sys_symlinkat)
+SYSCALL(sys_readlinkat,sys_readlinkat,compat_sys_readlinkat)
+SYSCALL(sys_fchmodat,sys_fchmodat,compat_sys_fchmodat)
+SYSCALL(sys_faccessat,sys_faccessat,compat_sys_faccessat)      /* 300 */
+SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6)
+SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll)
+SYSCALL(sys_unshare,sys_unshare,compat_sys_unshare)
 SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list)
 SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list)
-SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
-SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
-SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
+SYSCALL(sys_splice,sys_splice,compat_sys_splice)
+SYSCALL(sys_sync_file_range,sys_sync_file_range,compat_sys_s390_sync_file_range)
+SYSCALL(sys_tee,sys_tee,compat_sys_tee)
 SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice)
 NI_SYSCALL                                                     /* 310 sys_move_pages */
-SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
+SYSCALL(sys_getcpu,sys_getcpu,compat_sys_getcpu)
 SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait)
-SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
-SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
-SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper)      /* 315 */
+SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes)
+SYSCALL(sys_s390_fallocate,sys_fallocate,compat_sys_s390_fallocate)
+SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat)      /* 315 */
 SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd)
 NI_SYSCALL                                             /* 317 old sys_timer_fd */
-SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper)
-SYSCALL(sys_timerfd_create,sys_timerfd_create,sys_timerfd_create_wrapper)
+SYSCALL(sys_eventfd,sys_eventfd,compat_sys_eventfd)
+SYSCALL(sys_timerfd_create,sys_timerfd_create,compat_sys_timerfd_create)
 SYSCALL(sys_timerfd_settime,sys_timerfd_settime,compat_sys_timerfd_settime) /* 320 */
 SYSCALL(sys_timerfd_gettime,sys_timerfd_gettime,compat_sys_timerfd_gettime)
 SYSCALL(sys_signalfd4,sys_signalfd4,compat_sys_signalfd4)
-SYSCALL(sys_eventfd2,sys_eventfd2,sys_eventfd2_wrapper)
-SYSCALL(sys_inotify_init1,sys_inotify_init1,sys_inotify_init1_wrapper)
-SYSCALL(sys_pipe2,sys_pipe2,sys_pipe2_wrapper) /* 325 */
-SYSCALL(sys_dup3,sys_dup3,sys_dup3_wrapper)
-SYSCALL(sys_epoll_create1,sys_epoll_create1,sys_epoll_create1_wrapper)
+SYSCALL(sys_eventfd2,sys_eventfd2,compat_sys_eventfd2)
+SYSCALL(sys_inotify_init1,sys_inotify_init1,compat_sys_inotify_init1)
+SYSCALL(sys_pipe2,sys_pipe2,compat_sys_pipe2) /* 325 */
+SYSCALL(sys_dup3,sys_dup3,compat_sys_dup3)
+SYSCALL(sys_epoll_create1,sys_epoll_create1,compat_sys_epoll_create1)
 SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv)
 SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev)
 SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */
-SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
-SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper)
+SYSCALL(sys_perf_event_open,sys_perf_event_open,compat_sys_perf_event_open)
+SYSCALL(sys_fanotify_init,sys_fanotify_init,compat_sys_fanotify_init)
 SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark)
-SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper)
-SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */
+SYSCALL(sys_prlimit64,sys_prlimit64,compat_sys_prlimit64)
+SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,compat_sys_name_to_handle_at) /* 335 */
 SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at)
-SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime_wrapper)
-SYSCALL(sys_syncfs,sys_syncfs,sys_syncfs_wrapper)
-SYSCALL(sys_setns,sys_setns,sys_setns_wrapper)
-SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wrapper) /* 340 */
-SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper)
-SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper)
-SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper)
-SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper)
-SYSCALL(sys_sched_setattr,sys_sched_setattr,sys_sched_setattr_wrapper) /* 345 */
-SYSCALL(sys_sched_getattr,sys_sched_getattr,sys_sched_getattr_wrapper)
+SYSCALL(sys_clock_adjtime,sys_clock_adjtime,compat_sys_clock_adjtime)
+SYSCALL(sys_syncfs,sys_syncfs,compat_sys_syncfs)
+SYSCALL(sys_setns,sys_setns,compat_sys_setns)
+SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv) /* 340 */
+SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev)
+SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,compat_sys_s390_runtime_instr)
+SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp)
+SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
+SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
+SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
index 4b2e3e317004a3cf5d725887c1136c9cbcdd4045..6298fed11cedf8bcfcb536474d33552a115f2fe2 100644 (file)
@@ -451,7 +451,6 @@ static int __init topology_init(void)
        }
        set_topology_timer();
 out:
-       update_cpu_masks();
        return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);
index 8216c0e0b2e299494ee297540ed0d7015e006858..6f9cfa50037246d8d37bb5dd81a21d643ff3f544 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
+#include <asm/pgalloc.h>
 #include <asm/virtio-ccw.h>
 #include "kvm-s390.h"
 #include "trace.h"
@@ -86,9 +87,11 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
        switch (subcode) {
        case 3:
                vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
+               page_table_reset_pgste(current->mm, 0, TASK_SIZE);
                break;
        case 4:
                vcpu->run->s390_reset_flags = 0;
+               page_table_reset_pgste(current->mm, 0, TASK_SIZE);
                break;
        default:
                return -EOPNOTSUPP;
index e0676f390d57d22aeaf991a95c0a7d0e3458d369..10b5db3c9bc4a71d179ed02b994d7fdea6109311 100644 (file)
@@ -68,6 +68,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
        { "instruction_stsch", VCPU_STAT(instruction_stsch) },
        { "instruction_chsc", VCPU_STAT(instruction_chsc) },
+       { "instruction_essa", VCPU_STAT(instruction_essa) },
        { "instruction_stsi", VCPU_STAT(instruction_stsi) },
        { "instruction_stfl", VCPU_STAT(instruction_stfl) },
        { "instruction_tprot", VCPU_STAT(instruction_tprot) },
@@ -283,7 +284,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        if (kvm_is_ucontrol(vcpu->kvm))
                gmap_free(vcpu->arch.gmap);
 
+       if (vcpu->arch.sie_block->cbrlo)
+               __free_page(__pfn_to_page(
+                               vcpu->arch.sie_block->cbrlo >> PAGE_SHIFT));
        free_page((unsigned long)(vcpu->arch.sie_block));
+
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vcpu);
 }
@@ -390,6 +395,8 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
+       struct page *cbrl;
+
        atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
                                                    CPUSTAT_SM |
                                                    CPUSTAT_STOPPED |
@@ -401,6 +408,14 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        vcpu->arch.sie_block->ecb2  = 8;
        vcpu->arch.sie_block->eca   = 0xC1002001U;
        vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
+       if (kvm_enabled_cmma()) {
+               cbrl = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (cbrl) {
+                       vcpu->arch.sie_block->ecb2 |= 0x80;
+                       vcpu->arch.sie_block->ecb2 &= ~0x08;
+                       vcpu->arch.sie_block->cbrlo = page_to_phys(cbrl);
+               }
+       }
        hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
        tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
                     (unsigned long) vcpu);
@@ -761,6 +776,16 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
        return rc;
 }
 
+bool kvm_enabled_cmma(void)
+{
+       if (!MACHINE_IS_LPAR)
+               return false;
+       /* only enable for z10 and later */
+       if (!MACHINE_HAS_EDAT1)
+               return false;
+       return true;
+}
+
 static int __vcpu_run(struct kvm_vcpu *vcpu)
 {
        int rc, exit_reason;
index f9559b0bd620962d095851fc5c884a759b49996c..564514f410f45682272bdc5a3e5064306a9e9960 100644 (file)
@@ -156,6 +156,8 @@ void s390_vcpu_block(struct kvm_vcpu *vcpu);
 void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
 void exit_sie(struct kvm_vcpu *vcpu);
 void exit_sie_sync(struct kvm_vcpu *vcpu);
+/* are we going to support cmma? */
+bool kvm_enabled_cmma(void);
 /* implemented in diag.c */
 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
 
index 75beea632a10ee7c1bba185d344831e11ec684c5..aacb6b129914bc1c7d207d0587fc4fb2efe66ccf 100644 (file)
@@ -636,8 +636,49 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static int handle_essa(struct kvm_vcpu *vcpu)
+{
+       /* entries expected to be 1FF */
+       int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
+       unsigned long *cbrlo, cbrle;
+       struct gmap *gmap;
+       int i;
+
+       VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
+       gmap = vcpu->arch.gmap;
+       vcpu->stat.instruction_essa++;
+       if (!kvm_enabled_cmma() || !vcpu->arch.sie_block->cbrlo)
+               return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
+
+       if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+               return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+
+       if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
+               return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+       /* Rewind PSW to repeat the ESSA instruction */
+       vcpu->arch.sie_block->gpsw.addr =
+               __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
+       vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
+       cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
+       down_read(&gmap->mm->mmap_sem);
+       for (i = 0; i < entries; ++i) {
+               cbrle = cbrlo[i];
+               if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
+                       /* invalid entry */
+                       break;
+               /* try to free backing */
+               __gmap_zap(cbrle, gmap);
+       }
+       up_read(&gmap->mm->mmap_sem);
+       if (i < entries)
+               return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+       return 0;
+}
+
 static const intercept_handler_t b9_handlers[256] = {
        [0x8d] = handle_epsw,
+       [0xab] = handle_essa,
        [0xaf] = handle_pfmf,
 };
 
index b068729e50ace9711774adab984f03a8a41e9338..e3fffe1dff513a05ec2839114b1952c65801721c 100644 (file)
@@ -2,8 +2,7 @@
 # Makefile for s390-specific library files..
 #
 
-lib-y += delay.o string.o uaccess_pt.o find.o
+lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o
 obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
 obj-$(CONFIG_64BIT) += mem64.o
-lib-$(CONFIG_64BIT) += uaccess_mvcos.o
 lib-$(CONFIG_SMP) += spinlock.o
index 620d34d6487e5217ce485ab7a6a00e8a19a9171c..922003c1b90d388c96b26c33a19947dc1c8d95d0 100644 (file)
@@ -4,7 +4,7 @@
  * On s390x the bits are numbered:
  *   |0..............63|64............127|128...........191|192...........255|
  * and on s390:
- *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *   |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
  *
  * The reason for this bit numbering is the fact that the hardware sets bits
  * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
index b1a22173d027b9bde6774194631d635932bc42ff..c7e0e81f4b4ebf73cf15664f048301faaffccabb 100644 (file)
@@ -6,7 +6,11 @@
 #ifndef __ARCH_S390_LIB_UACCESS_H
 #define __ARCH_S390_LIB_UACCESS_H
 
-extern int futex_atomic_op_pt(int, u32 __user *, int, int *);
-extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32);
+unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n);
+unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n);
+unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n);
+unsigned long clear_user_pt(void __user *to, unsigned long n);
+unsigned long strnlen_user_pt(const char __user *src, unsigned long count);
+long strncpy_from_user_pt(char *dst, const char __user *src, long count);
 
 #endif /* __ARCH_S390_LIB_UACCESS_H */
index 4b7993bf69b96bd42f503e8772f29caedf5dd4b0..ae97b8df11aa12e154e3403fc080a603395d5346 100644 (file)
@@ -6,8 +6,11 @@
  *              Gerald Schaefer (gerald.schaefer@de.ibm.com)
  */
 
+#include <linux/jump_label.h>
 #include <linux/errno.h>
+#include <linux/init.h>
 #include <linux/mm.h>
+#include <asm/facility.h>
 #include <asm/uaccess.h>
 #include <asm/futex.h>
 #include "uaccess.h"
 #define SLR    "slgr"
 #endif
 
-static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
+static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE;
+
+static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
+                                                unsigned long size)
 {
        register unsigned long reg0 asm("0") = 0x81UL;
        unsigned long tmp1, tmp2;
@@ -65,7 +71,16 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
        return size;
 }
 
-static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
+unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       if (static_key_true(&have_mvcos))
+               return copy_from_user_mvcos(to, from, n);
+       return copy_from_user_pt(to, from, n);
+}
+EXPORT_SYMBOL(__copy_from_user);
+
+static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
+                                              unsigned long size)
 {
        register unsigned long reg0 asm("0") = 0x810000UL;
        unsigned long tmp1, tmp2;
@@ -94,8 +109,16 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
        return size;
 }
 
-static size_t copy_in_user_mvcos(size_t size, void __user *to,
-                                const void __user *from)
+unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       if (static_key_true(&have_mvcos))
+               return copy_to_user_mvcos(to, from, n);
+       return copy_to_user_pt(to, from, n);
+}
+EXPORT_SYMBOL(__copy_to_user);
+
+static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
+                                              unsigned long size)
 {
        register unsigned long reg0 asm("0") = 0x810081UL;
        unsigned long tmp1, tmp2;
@@ -117,7 +140,15 @@ static size_t copy_in_user_mvcos(size_t size, void __user *to,
        return size;
 }
 
-static size_t clear_user_mvcos(size_t size, void __user *to)
+unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+       if (static_key_true(&have_mvcos))
+               return copy_in_user_mvcos(to, from, n);
+       return copy_in_user_pt(to, from, n);
+}
+EXPORT_SYMBOL(__copy_in_user);
+
+static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
 {
        register unsigned long reg0 asm("0") = 0x810000UL;
        unsigned long tmp1, tmp2;
@@ -145,17 +176,26 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
        return size;
 }
 
-static size_t strnlen_user_mvcos(size_t count, const char __user *src)
+unsigned long __clear_user(void __user *to, unsigned long size)
 {
-       size_t done, len, offset, len_str;
+       if (static_key_true(&have_mvcos))
+               return clear_user_mvcos(to, size);
+       return clear_user_pt(to, size);
+}
+EXPORT_SYMBOL(__clear_user);
+
+static inline unsigned long strnlen_user_mvcos(const char __user *src,
+                                              unsigned long count)
+{
+       unsigned long done, len, offset, len_str;
        char buf[256];
 
        done = 0;
        do {
-               offset = (size_t)src & ~PAGE_MASK;
+               offset = (unsigned long)src & ~PAGE_MASK;
                len = min(256UL, PAGE_SIZE - offset);
                len = min(count - done, len);
-               if (copy_from_user_mvcos(len, src, buf))
+               if (copy_from_user_mvcos(buf, src, len))
                        return 0;
                len_str = strnlen(buf, len);
                done += len_str;
@@ -164,18 +204,26 @@ static size_t strnlen_user_mvcos(size_t count, const char __user *src)
        return done + 1;
 }
 
-static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
-                                     char *dst)
+unsigned long __strnlen_user(const char __user *src, unsigned long count)
 {
-       size_t done, len, offset, len_str;
+       if (static_key_true(&have_mvcos))
+               return strnlen_user_mvcos(src, count);
+       return strnlen_user_pt(src, count);
+}
+EXPORT_SYMBOL(__strnlen_user);
 
-       if (unlikely(!count))
+static inline long strncpy_from_user_mvcos(char *dst, const char __user *src,
+                                          long count)
+{
+       unsigned long done, len, offset, len_str;
+
+       if (unlikely(count <= 0))
                return 0;
        done = 0;
        do {
-               offset = (size_t)src & ~PAGE_MASK;
+               offset = (unsigned long)src & ~PAGE_MASK;
                len = min(count - done, PAGE_SIZE - offset);
-               if (copy_from_user_mvcos(len, src, dst))
+               if (copy_from_user_mvcos(dst, src, len))
                        return -EFAULT;
                len_str = strnlen(dst, len);
                done += len_str;
@@ -185,13 +233,31 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
        return done;
 }
 
-struct uaccess_ops uaccess_mvcos = {
-       .copy_from_user = copy_from_user_mvcos,
-       .copy_to_user = copy_to_user_mvcos,
-       .copy_in_user = copy_in_user_mvcos,
-       .clear_user = clear_user_mvcos,
-       .strnlen_user = strnlen_user_mvcos,
-       .strncpy_from_user = strncpy_from_user_mvcos,
-       .futex_atomic_op = futex_atomic_op_pt,
-       .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
-};
+long __strncpy_from_user(char *dst, const char __user *src, long count)
+{
+       if (static_key_true(&have_mvcos))
+               return strncpy_from_user_mvcos(dst, src, count);
+       return strncpy_from_user_pt(dst, src, count);
+}
+EXPORT_SYMBOL(__strncpy_from_user);
+
+/*
+ * The uaccess page tabe walk variant can be enforced with the "uaccesspt"
+ * kernel parameter. This is mainly for debugging purposes.
+ */
+static int force_uaccess_pt __initdata;
+
+static int __init parse_uaccess_pt(char *__unused)
+{
+       force_uaccess_pt = 1;
+       return 0;
+}
+early_param("uaccesspt", parse_uaccess_pt);
+
+static int __init uaccess_init(void)
+{
+       if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27))
+               static_key_slow_dec(&have_mvcos);
+       return 0;
+}
+early_initcall(uaccess_init);
index 61ebcc9ccb3472abe320fd8ed4939b4cc5074ebe..8d39760bae68f9f4e65bbbf4d42d5db25ef4958c 100644 (file)
@@ -22,7 +22,7 @@
 #define SLR    "slgr"
 #endif
 
-static size_t strnlen_kernel(size_t count, const char __user *src)
+static unsigned long strnlen_kernel(const char __user *src, unsigned long count)
 {
        register unsigned long reg0 asm("0") = 0UL;
        unsigned long tmp1, tmp2;
@@ -42,8 +42,8 @@ static size_t strnlen_kernel(size_t count, const char __user *src)
        return count;
 }
 
-static size_t copy_in_kernel(size_t count, void __user *to,
-                            const void __user *from)
+static unsigned long copy_in_kernel(void __user *to, const void __user *from,
+                                   unsigned long count)
 {
        unsigned long tmp1;
 
@@ -146,8 +146,8 @@ static unsigned long follow_table(struct mm_struct *mm,
 
 #endif /* CONFIG_64BIT */
 
-static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
-                                            size_t n, int write_user)
+static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr,
+                                          unsigned long n, int write_user)
 {
        struct mm_struct *mm = current->mm;
        unsigned long offset, done, size, kaddr;
@@ -189,8 +189,7 @@ fault:
  * Do DAT for user address by page table walk, return kernel address.
  * This function needs to be called with current->mm->page_table_lock held.
  */
-static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
-                                                    int write)
+static inline unsigned long __dat_user_addr(unsigned long uaddr, int write)
 {
        struct mm_struct *mm = current->mm;
        unsigned long kaddr;
@@ -211,29 +210,29 @@ fault:
        return 0;
 }
 
-static size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
+unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n)
 {
-       size_t rc;
+       unsigned long rc;
 
        if (segment_eq(get_fs(), KERNEL_DS))
-               return copy_in_kernel(n, (void __user *) to, from);
+               return copy_in_kernel((void __user *) to, from, n);
        rc = __user_copy_pt((unsigned long) from, to, n, 0);
        if (unlikely(rc))
                memset(to + n - rc, 0, rc);
        return rc;
 }
 
-static size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
+unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n)
 {
        if (segment_eq(get_fs(), KERNEL_DS))
-               return copy_in_kernel(n, to, (void __user *) from);
+               return copy_in_kernel(to, (void __user *) from, n);
        return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
 }
 
-static size_t clear_user_pt(size_t n, void __user *to)
+unsigned long clear_user_pt(void __user *to, unsigned long n)
 {
        void *zpage = (void *) empty_zero_page;
-       long done, size, ret;
+       unsigned long done, size, ret;
 
        done = 0;
        do {
@@ -242,7 +241,7 @@ static size_t clear_user_pt(size_t n, void __user *to)
                else
                        size = n - done;
                if (segment_eq(get_fs(), KERNEL_DS))
-                       ret = copy_in_kernel(n, to, (void __user *) zpage);
+                       ret = copy_in_kernel(to, (void __user *) zpage, n);
                else
                        ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
                done += size;
@@ -253,17 +252,17 @@ static size_t clear_user_pt(size_t n, void __user *to)
        return 0;
 }
 
-static size_t strnlen_user_pt(size_t count, const char __user *src)
+unsigned long strnlen_user_pt(const char __user *src, unsigned long count)
 {
        unsigned long uaddr = (unsigned long) src;
        struct mm_struct *mm = current->mm;
        unsigned long offset, done, len, kaddr;
-       size_t len_str;
+       unsigned long len_str;
 
        if (unlikely(!count))
                return 0;
        if (segment_eq(get_fs(), KERNEL_DS))
-               return strnlen_kernel(count, src);
+               return strnlen_kernel(src, count);
        if (!mm)
                return 0;
        done = 0;
@@ -289,19 +288,18 @@ fault:
        goto retry;
 }
 
-static size_t strncpy_from_user_pt(size_t count, const char __user *src,
-                                  char *dst)
+long strncpy_from_user_pt(char *dst, const char __user *src, long count)
 {
-       size_t done, len, offset, len_str;
+       unsigned long done, len, offset, len_str;
 
-       if (unlikely(!count))
+       if (unlikely(count <= 0))
                return 0;
        done = 0;
        do {
-               offset = (size_t)src & ~PAGE_MASK;
+               offset = (unsigned long)src & ~PAGE_MASK;
                len = min(count - done, PAGE_SIZE - offset);
                if (segment_eq(get_fs(), KERNEL_DS)) {
-                       if (copy_in_kernel(len, (void __user *) dst, src))
+                       if (copy_in_kernel((void __user *) dst, src, len))
                                return -EFAULT;
                } else {
                        if (__user_copy_pt((unsigned long) src, dst, len, 0))
@@ -315,8 +313,8 @@ static size_t strncpy_from_user_pt(size_t count, const char __user *src,
        return done;
 }
 
-static size_t copy_in_user_pt(size_t n, void __user *to,
-                             const void __user *from)
+unsigned long copy_in_user_pt(void __user *to, const void __user *from,
+                             unsigned long n)
 {
        struct mm_struct *mm = current->mm;
        unsigned long offset_max, uaddr, done, size, error_code;
@@ -326,7 +324,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
        int write_user;
 
        if (segment_eq(get_fs(), KERNEL_DS))
-               return copy_in_kernel(n, to, from);
+               return copy_in_kernel(to, from, n);
        if (!mm)
                return n;
        done = 0;
@@ -411,7 +409,7 @@ static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
        return ret;
 }
 
-int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
+int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old)
 {
        int ret;
 
@@ -449,8 +447,8 @@ static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
        return ret;
 }
 
-int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
-                           u32 oldval, u32 newval)
+int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                                 u32 oldval, u32 newval)
 {
        int ret;
 
@@ -471,14 +469,3 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
        put_page(virt_to_page(uaddr));
        return ret;
 }
-
-struct uaccess_ops uaccess_pt = {
-       .copy_from_user         = copy_from_user_pt,
-       .copy_to_user           = copy_to_user_pt,
-       .copy_in_user           = copy_in_user_pt,
-       .clear_user             = clear_user_pt,
-       .strnlen_user           = strnlen_user_pt,
-       .strncpy_from_user      = strncpy_from_user_pt,
-       .futex_atomic_op        = futex_atomic_op_pt,
-       .futex_atomic_cmpxchg   = futex_atomic_cmpxchg_pt,
-};
index d1e0e0c7a7e22e44f7f136a0bcef51bf412f1099..2a2e35416d2fe7fc4a835795f5c64bca444f1de4 100644 (file)
@@ -128,7 +128,7 @@ void memcpy_absolute(void *dest, void *src, size_t count)
 /*
  * Copy memory from kernel (real) to user (virtual)
  */
-int copy_to_user_real(void __user *dest, void *src, size_t count)
+int copy_to_user_real(void __user *dest, void *src, unsigned long count)
 {
        int offs = 0, size, rc;
        char *buf;
@@ -151,32 +151,6 @@ out:
        return rc;
 }
 
-/*
- * Copy memory from user (virtual) to kernel (real)
- */
-int copy_from_user_real(void *dest, void __user *src, size_t count)
-{
-       int offs = 0, size, rc;
-       char *buf;
-
-       buf = (char *) __get_free_page(GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       rc = -EFAULT;
-       while (offs < count) {
-               size = min(PAGE_SIZE, count - offs);
-               if (copy_from_user(buf, src + offs, size))
-                       goto out;
-               if (memcpy_real(dest + offs, buf, size))
-                       goto out;
-               offs += size;
-       }
-       rc = 0;
-out:
-       free_page((unsigned long) buf);
-       return rc;
-}
-
 /*
  * Check if physical address is within prefix or zero page
  */
index 3584ed9b20a183de8c58cb4521c99517c2532c9a..796c9320c709f5850bb0679778a28bc488773be2 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/quicklist.h>
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
+#include <linux/swapops.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -594,6 +595,82 @@ unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
 }
 EXPORT_SYMBOL_GPL(gmap_fault);
 
+static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
+{
+       if (!non_swap_entry(entry))
+               dec_mm_counter(mm, MM_SWAPENTS);
+       else if (is_migration_entry(entry)) {
+               struct page *page = migration_entry_to_page(entry);
+
+               if (PageAnon(page))
+                       dec_mm_counter(mm, MM_ANONPAGES);
+               else
+                       dec_mm_counter(mm, MM_FILEPAGES);
+       }
+       free_swap_and_cache(entry);
+}
+
+/**
+ * The mm->mmap_sem lock must be held
+ */
+static void gmap_zap_unused(struct mm_struct *mm, unsigned long address)
+{
+       unsigned long ptev, pgstev;
+       spinlock_t *ptl;
+       pgste_t pgste;
+       pte_t *ptep, pte;
+
+       ptep = get_locked_pte(mm, address, &ptl);
+       if (unlikely(!ptep))
+               return;
+       pte = *ptep;
+       if (!pte_swap(pte))
+               goto out_pte;
+       /* Zap unused and logically-zero pages */
+       pgste = pgste_get_lock(ptep);
+       pgstev = pgste_val(pgste);
+       ptev = pte_val(pte);
+       if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
+           ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
+               gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
+               pte_clear(mm, address, ptep);
+       }
+       pgste_set_unlock(ptep, pgste);
+out_pte:
+       pte_unmap_unlock(*ptep, ptl);
+}
+
+/*
+ * this function is assumed to be called with mmap_sem held
+ */
+void __gmap_zap(unsigned long address, struct gmap *gmap)
+{
+       unsigned long *table, *segment_ptr;
+       unsigned long segment, pgstev, ptev;
+       struct gmap_pgtable *mp;
+       struct page *page;
+
+       segment_ptr = gmap_table_walk(address, gmap);
+       if (IS_ERR(segment_ptr))
+               return;
+       segment = *segment_ptr;
+       if (segment & _SEGMENT_ENTRY_INVALID)
+               return;
+       page = pfn_to_page(segment >> PAGE_SHIFT);
+       mp = (struct gmap_pgtable *) page->index;
+       address = mp->vmaddr | (address & ~PMD_MASK);
+       /* Page table is present */
+       table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
+       table = table + ((address >> 12) & 0xff);
+       pgstev = table[PTRS_PER_PTE];
+       ptev = table[0];
+       /* quick check, checked again with locks held */
+       if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
+           ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
+               gmap_zap_unused(gmap->mm, address);
+}
+EXPORT_SYMBOL_GPL(__gmap_zap);
+
 void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
 {
 
@@ -671,7 +748,7 @@ EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
 /**
  * gmap_ipte_notify - mark a range of ptes for invalidation notification
  * @gmap: pointer to guest mapping meta data structure
- * @address: virtual address in the guest address space
+ * @start: virtual address in the guest address space
  * @len: size of area
  *
  * Returns 0 if for each page in the given range a gmap mapping exists and
@@ -725,13 +802,12 @@ EXPORT_SYMBOL_GPL(gmap_ipte_notify);
 /**
  * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
  * @mm: pointer to the process mm_struct
- * @addr: virtual address in the process address space
  * @pte: pointer to the page table entry
  *
  * This function is assumed to be called with the page table lock held
  * for the pte to notify.
  */
-void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
+void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte)
 {
        unsigned long segment_offset;
        struct gmap_notifier *nb;
@@ -802,6 +878,78 @@ static inline void page_table_free_pgste(unsigned long *table)
        __free_page(page);
 }
 
+static inline unsigned long page_table_reset_pte(struct mm_struct *mm,
+                       pmd_t *pmd, unsigned long addr, unsigned long end)
+{
+       pte_t *start_pte, *pte;
+       spinlock_t *ptl;
+       pgste_t pgste;
+
+       start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       pte = start_pte;
+       do {
+               pgste = pgste_get_lock(pte);
+               pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
+               pgste_set_unlock(pte, pgste);
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+       pte_unmap_unlock(start_pte, ptl);
+
+       return addr;
+}
+
+static inline unsigned long page_table_reset_pmd(struct mm_struct *mm,
+                       pud_t *pud, unsigned long addr, unsigned long end)
+{
+       unsigned long next;
+       pmd_t *pmd;
+
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               if (pmd_none_or_clear_bad(pmd))
+                       continue;
+               next = page_table_reset_pte(mm, pmd, addr, next);
+       } while (pmd++, addr = next, addr != end);
+
+       return addr;
+}
+
+static inline unsigned long page_table_reset_pud(struct mm_struct *mm,
+                       pgd_t *pgd, unsigned long addr, unsigned long end)
+{
+       unsigned long next;
+       pud_t *pud;
+
+       pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+               if (pud_none_or_clear_bad(pud))
+                       continue;
+               next = page_table_reset_pmd(mm, pud, addr, next);
+       } while (pud++, addr = next, addr != end);
+
+       return addr;
+}
+
+void page_table_reset_pgste(struct mm_struct *mm,
+                       unsigned long start, unsigned long end)
+{
+       unsigned long addr, next;
+       pgd_t *pgd;
+
+       addr = start;
+       down_read(&mm->mmap_sem);
+       pgd = pgd_offset(mm, addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               next = page_table_reset_pud(mm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
+       up_read(&mm->mmap_sem);
+}
+EXPORT_SYMBOL(page_table_reset_pgste);
+
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
                          unsigned long key, bool nq)
 {
@@ -1248,7 +1396,7 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 {
        struct list_head *lh = (struct list_head *) pgtable;
 
-       assert_spin_locked(&mm->page_table_lock);
+       assert_spin_locked(pmd_lockptr(mm, pmdp));
 
        /* FIFO */
        if (!pmd_huge_pte(mm, pmdp))
@@ -1264,7 +1412,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
        pgtable_t pgtable;
        pte_t *ptep;
 
-       assert_spin_locked(&mm->page_table_lock);
+       assert_spin_locked(pmd_lockptr(mm, pmdp));
 
        /* FIFO */
        pgtable = pmd_huge_pte(mm, pmdp);
index 75c69b402e05aa3ca12f3b7ee8ca78864886c1b4..c5c66840ac00a8fd8382584a6dabc5b699596b17 100644 (file)
@@ -139,7 +139,7 @@ void zpci_debug_exit_device(struct zpci_dev *zdev)
 int __init zpci_debug_init(void)
 {
        /* event trace buffer */
-       pci_debug_msg_id = debug_register("pci_msg", 16, 1, 16 * sizeof(long));
+       pci_debug_msg_id = debug_register("pci_msg", 8, 1, 8 * sizeof(long));
        if (!pci_debug_msg_id)
                return -EINVAL;
        debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
index 146b9d5e89f83ba0b41506aa369254d5869dbe88..2f947aba4bd4d248984c1c865248e71fd3a0445c 100644 (file)
@@ -1,10 +1,12 @@
 
 header-y +=
 
+
 generic-y += barrier.h
 generic-y += clkdev.h
+generic-y += cputime.h
 generic-y += hash.h
+generic-y += mcs_spinlock.h
+generic-y += preempt.h
 generic-y += trace_clock.h
 generic-y += xor.h
-generic-y += preempt.h
-
diff --git a/arch/score/include/asm/cputime.h b/arch/score/include/asm/cputime.h
deleted file mode 100644 (file)
index 1fced99..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SCORE_CPUTIME_H
-#define _ASM_SCORE_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* _ASM_SCORE_CPUTIME_H */
index 6357710753d548f3a2ccf8c964d4053636796bc6..364d204298fae1af77191ac5f496b6dc5cdb30d1 100644 (file)
@@ -123,15 +123,6 @@ config SYS_SUPPORTS_NUMA
 config SYS_SUPPORTS_PCI
        bool
 
-config SYS_SUPPORTS_CMT
-       bool
-
-config SYS_SUPPORTS_MTU2
-       bool
-
-config SYS_SUPPORTS_TMU
-       bool
-
 config STACKTRACE_SUPPORT
        def_bool y
 
@@ -191,14 +182,14 @@ config CPU_SH3
        bool
        select CPU_HAS_INTEVT
        select CPU_HAS_SR_RB
-       select SYS_SUPPORTS_TMU
+       select SYS_SUPPORTS_SH_TMU
 
 config CPU_SH4
        bool
        select CPU_HAS_INTEVT
        select CPU_HAS_SR_RB
        select CPU_HAS_FPU if !CPU_SH4AL_DSP
-       select SYS_SUPPORTS_TMU
+       select SYS_SUPPORTS_SH_TMU
        select SYS_SUPPORTS_HUGETLBFS if MMU
 
 config CPU_SH4A
@@ -213,7 +204,7 @@ config CPU_SH4AL_DSP
 config CPU_SH5
        bool
        select CPU_HAS_FPU
-       select SYS_SUPPORTS_TMU
+       select SYS_SUPPORTS_SH_TMU
        select SYS_SUPPORTS_HUGETLBFS if MMU
 
 config CPU_SHX2
@@ -250,7 +241,7 @@ choice
 config CPU_SUBTYPE_SH7619
        bool "Support SH7619 processor"
        select CPU_SH2
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
 
 # SH-2A Processor Support
 
@@ -258,50 +249,50 @@ config CPU_SUBTYPE_SH7201
        bool "Support SH7201 processor"
        select CPU_SH2A
        select CPU_HAS_FPU
-       select SYS_SUPPORTS_MTU2
+       select SYS_SUPPORTS_SH_MTU2
  
 config CPU_SUBTYPE_SH7203
        bool "Support SH7203 processor"
        select CPU_SH2A
        select CPU_HAS_FPU
-       select SYS_SUPPORTS_CMT
-       select SYS_SUPPORTS_MTU2
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_MTU2
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select PINCTRL
 
 config CPU_SUBTYPE_SH7206
        bool "Support SH7206 processor"
        select CPU_SH2A
-       select SYS_SUPPORTS_CMT
-       select SYS_SUPPORTS_MTU2
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_MTU2
 
 config CPU_SUBTYPE_SH7263
        bool "Support SH7263 processor"
        select CPU_SH2A
        select CPU_HAS_FPU
-       select SYS_SUPPORTS_CMT
-       select SYS_SUPPORTS_MTU2
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_MTU2
 
 config CPU_SUBTYPE_SH7264
        bool "Support SH7264 processor"
        select CPU_SH2A
        select CPU_HAS_FPU
-       select SYS_SUPPORTS_CMT
-       select SYS_SUPPORTS_MTU2
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_MTU2
        select PINCTRL
 
 config CPU_SUBTYPE_SH7269
        bool "Support SH7269 processor"
        select CPU_SH2A
        select CPU_HAS_FPU
-       select SYS_SUPPORTS_CMT
-       select SYS_SUPPORTS_MTU2
+       select SYS_SUPPORTS_SH_CMT
+       select SYS_SUPPORTS_SH_MTU2
        select PINCTRL
 
 config CPU_SUBTYPE_MXG
        bool "Support MX-G processor"
        select CPU_SH2A
-       select SYS_SUPPORTS_MTU2
+       select SYS_SUPPORTS_SH_MTU2
        help
          Select MX-G if running on an R8A03022BG part.
 
@@ -354,7 +345,7 @@ config CPU_SUBTYPE_SH7720
        bool "Support SH7720 processor"
        select CPU_SH3
        select CPU_HAS_DSP
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select USB_ARCH_HAS_OHCI
        select USB_OHCI_SH if USB_OHCI_HCD
@@ -366,7 +357,7 @@ config CPU_SUBTYPE_SH7721
        bool "Support SH7721 processor"
        select CPU_SH3
        select CPU_HAS_DSP
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
        select USB_ARCH_HAS_OHCI
        select USB_OHCI_SH if USB_OHCI_HCD
        help
@@ -422,7 +413,7 @@ config CPU_SUBTYPE_SH7723
        select CPU_SHX2
        select ARCH_SHMOBILE
        select ARCH_SPARSEMEM_ENABLE
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select PINCTRL
        help
@@ -434,7 +425,7 @@ config CPU_SUBTYPE_SH7724
        select CPU_SHX2
        select ARCH_SHMOBILE
        select ARCH_SPARSEMEM_ENABLE
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select PINCTRL
        help
@@ -514,7 +505,7 @@ config CPU_SUBTYPE_SH7343
        bool "Support SH7343 processor"
        select CPU_SH4AL_DSP
        select ARCH_SHMOBILE
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
 
 config CPU_SUBTYPE_SH7722
        bool "Support SH7722 processor"
@@ -523,7 +514,7 @@ config CPU_SUBTYPE_SH7722
        select ARCH_SHMOBILE
        select ARCH_SPARSEMEM_ENABLE
        select SYS_SUPPORTS_NUMA
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select PINCTRL
 
@@ -534,7 +525,7 @@ config CPU_SUBTYPE_SH7366
        select ARCH_SHMOBILE
        select ARCH_SPARSEMEM_ENABLE
        select SYS_SUPPORTS_NUMA
-       select SYS_SUPPORTS_CMT
+       select SYS_SUPPORTS_SH_CMT
 
 endchoice
 
@@ -567,27 +558,6 @@ source "arch/sh/boards/Kconfig"
 
 menu "Timer and clock configuration"
 
-config SH_TIMER_TMU
-       bool "TMU timer driver"
-       depends on SYS_SUPPORTS_TMU
-       default y
-       help
-         This enables the build of the TMU timer driver.
-
-config SH_TIMER_CMT
-       bool "CMT timer driver"
-       depends on SYS_SUPPORTS_CMT
-       default y
-       help
-         This enables build of the CMT timer driver.
-
-config SH_TIMER_MTU2
-       bool "MTU2 timer driver"
-       depends on SYS_SUPPORTS_MTU2
-       default y
-       help
-         This enables build of the MTU2 timer driver.
-
 config SH_PCLK_FREQ
        int "Peripheral clock frequency (in Hz)"
        depends on SH_CLK_CPG_LEGACY
index 0cd7198a452425dbc96d7dbc72c7c61223527bb2..c19e47dacb31bceec56abeac4c96ca32ad3a2977 100644 (file)
@@ -8,18 +8,21 @@ generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
 generic-y += fcntl.h
+generic-y += hash.h
 generic-y += ioctl.h
 generic-y += ipcbuf.h
 generic-y += irq_regs.h
 generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
+generic-y += mman.h
+generic-y += msgbuf.h
 generic-y += param.h
 generic-y += parport.h
 generic-y += percpu.h
 generic-y += poll.h
-generic-y += mman.h
-generic-y += msgbuf.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sembuf.h
@@ -34,5 +37,3 @@ generic-y += termios.h
 generic-y += trace_clock.h
 generic-y += ucontext.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 2ea4483fd7227c669796644bc47bd3e80b252222..be616ee0cf8799a6c7deeb058a7a92da6a8f213c 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/thread_info.h>
 #include <linux/irqflags.h>
 #include <linux/smp.h>
-#include <linux/cpuidle.h>
 #include <linux/atomic.h>
 #include <asm/pgalloc.h>
 #include <asm/smp.h>
@@ -40,8 +39,7 @@ void arch_cpu_idle_dead(void)
 
 void arch_cpu_idle(void)
 {
-       if (cpuidle_idle_call())
-               sh_idle();
+       sh_idle();
 }
 
 void __init select_idle_routine(void)
index 0833736afa3238185196bb10e408ebf9328e35ea..65a1ecd77f96005b8977df46076475266d0a37e6 100644 (file)
@@ -217,19 +217,6 @@ void __init init_IRQ(void)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_chip *chip = irq_data_get_irq_chip(data);
-
-       printk(KERN_INFO "IRQ%u: moving from cpu%u to cpu%u\n",
-              irq, data->node, cpu);
-
-       raw_spin_lock_irq(&desc->lock);
-       chip->irq_set_affinity(data, cpumask_of(cpu), false);
-       raw_spin_unlock_irq(&desc->lock);
-}
-
 /*
  * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
  * the affinity settings do not allow other CPUs, force them onto any
@@ -250,11 +237,8 @@ void migrate_irqs(void)
                                                    irq, cpu);
 
                                cpumask_setall(data->affinity);
-                               newcpu = cpumask_any_and(data->affinity,
-                                                        cpu_online_mask);
                        }
-
-                       route_irq(data, irq, newcpu);
+                       irq_set_affinity(irq, data->affinity);
                }
        }
 }
index 4b60a0c325ecaa1820b7b6619cfc4ea89ec7e8bf..a45821818003fce80d501b59653ac35eedcc6875 100644 (file)
@@ -6,15 +6,16 @@ generic-y += cputime.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h
-generic-y += linkage.h
-generic-y += local64.h
-generic-y += mutex.h
+generic-y += hash.h
 generic-y += irq_regs.h
+generic-y += linkage.h
 generic-y += local.h
+generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += module.h
+generic-y += mutex.h
+generic-y += preempt.h
 generic-y += serial.h
 generic-y += trace_clock.h
 generic-y += types.h
 generic-y += word-at-a-time.h
-generic-y += preempt.h
-generic-y += hash.h
index dd3bef4b9896c8921f2257cf5fab24399722495d..05710393959f174bc0bf9a8596ef5fbf9bafa391 100644 (file)
@@ -32,7 +32,6 @@
 
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern cpumask_t cpu_core_map[NR_CPUS];
-extern int sparc64_multi_core;
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
index 1754390a426fb1e3c8eef4f1b947b04271bc794c..a2d10fc64fafd34ace3614383ba0500612da706f 100644 (file)
@@ -42,8 +42,6 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
 #define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
 #define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
 #define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
-#define mc_capable()                           (sparc64_multi_core)
-#define smt_capable()                          (sparc64_multi_core)
 #endif /* CONFIG_SMP */
 
 extern cpumask_t cpu_core_map[NR_CPUS];
index b90bf23e3aabf36950c03383afe215a8af6006ee..a1a4400d40258acbfa00b08b9672604e8cc22935 100644 (file)
@@ -896,10 +896,6 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
 
        mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
 
-#ifdef CONFIG_SMP
-       sparc64_multi_core = 1;
-#endif
-
        hp = mdesc_grab();
 
        set_core_ids(hp);
index 6b39125eb9271d986b32896fcd35015cbd752ef8..9a690d39c01b50f2465263063246565479a0c126 100644 (file)
@@ -555,9 +555,6 @@ static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
 
                cpu_data(cpuid).core_id = portid + 1;
                cpu_data(cpuid).proc_id = portid;
-#ifdef CONFIG_SMP
-               sparc64_multi_core = 1;
-#endif
        } else {
                cpu_data(cpuid).dcache_size =
                        of_getintprop_default(dp, "dcache-size", 16 * 1024);
index b085311dcd0ea9f81840e1fc2550c4f870e984eb..9781048161ab8865a3ce203074d4067312989b95 100644 (file)
@@ -53,8 +53,6 @@
 
 #include "cpumap.h"
 
-int sparc64_multi_core __read_mostly;
-
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
index c3d82b5f54ca8501960d9145990304d5b15e4a3c..3fddf64c7fc63ab81bb1ec0599b9cefa790ec6c8 100644 (file)
@@ -659,8 +659,7 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val
                ft->clock_tick_ref = cpu_data(cpu).clock_tick;
        }
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-           (val == CPUFREQ_RESUMECHANGE)) {
+           (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
                cpu_data(cpu).clock_tick =
                        cpufreq_scale(ft->clock_tick_ref,
                                      ft->ref_freq,
@@ -733,7 +732,7 @@ void __irq_entry timer_interrupt(int irq, struct pt_regs *regs)
        irq_enter();
 
        local_cpu_data().irq0_irqs++;
-       kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
+       kstat_incr_irq_this_cpu(0);
 
        if (unlikely(!evt->event_handler)) {
                printk(KERN_WARNING
index 3793c75e45d982fb4ae72fb7a17c0f2478e98fea..0aa5675e7025e1f791da57ed9bc52133dd4121da 100644 (file)
@@ -11,6 +11,7 @@ generic-y += errno.h
 generic-y += exec.h
 generic-y += fb.h
 generic-y += fcntl.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
@@ -18,12 +19,14 @@ generic-y += ipcbuf.h
 generic-y += irq_regs.h
 generic-y += local.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
 generic-y += parport.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sembuf.h
@@ -38,5 +41,3 @@ generic-y += termios.h
 generic-y += trace_clock.h
 generic-y += types.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 88a330dcdede1b06b85b1d7eba2da5580a97e99b..a5e4b6068213f4f147639a9ba428b120d8e9c886 100644 (file)
@@ -1,8 +1,28 @@
-generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
-generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
-generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
-generic-y += switch_to.h clkdev.h
-generic-y += trace_clock.h
-generic-y += preempt.h
-generic-y += hash.h
 generic-y += barrier.h
+generic-y += bug.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += current.h
+generic-y += delay.h
+generic-y += device.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += ftrace.h
+generic-y += futex.h
+generic-y += hardirq.h
+generic-y += hash.h
+generic-y += hw_irq.h
+generic-y += io.h
+generic-y += irq_regs.h
+generic-y += kdebug.h
+generic-y += mcs_spinlock.h
+generic-y += mutex.h
+generic-y += param.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += sections.h
+generic-y += switch_to.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += xor.h
index 3ef4f9d9bf5deaf74f06d254f8d6b6c3fae75ba6..1e5fb872a4aa60d3292705b8bc34a69a9038d85a 100644 (file)
@@ -16,6 +16,7 @@ generic-y += fcntl.h
 generic-y += ftrace.h
 generic-y += futex.h
 generic-y += hardirq.h
+generic-y += hash.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
@@ -24,6 +25,7 @@ generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += local.h
+generic-y += mcs_spinlock.h
 generic-y += mman.h
 generic-y += module.h
 generic-y += msgbuf.h
@@ -32,6 +34,7 @@ generic-y += parport.h
 generic-y += percpu.h
 generic-y += poll.h
 generic-y += posix_types.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sections.h
@@ -60,5 +63,3 @@ generic-y += unaligned.h
 generic-y += user.h
 generic-y += vga.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 0af5250d914fd7b52d01873571dd6facb7bba8d4..8453fe1342eaf137bcf2c707bc93ac4717ee27db 100644 (file)
@@ -1585,6 +1585,20 @@ config EFI_STUB
 
          See Documentation/efi-stub.txt for more information.
 
+config EFI_MIXED
+       bool "EFI mixed-mode support"
+       depends on EFI_STUB && X86_64
+       ---help---
+          Enabling this feature allows a 64-bit kernel to be booted
+          on a 32-bit firmware, provided that your CPU supports 64-bit
+          mode.
+
+          Note that it is not possible to boot a mixed-mode enabled
+          kernel via the EFI boot stub - a bootloader that supports
+          the EFI handover protocol must be used.
+
+          If unsure, say N.
+
 config SECCOMP
        def_bool y
        prompt "Enable seccomp to safely compute untrusted bytecode"
index 321a52ccf63ad58c982f5e8f6588f70267aae947..61bd2ad94281884f13b70f3bb9e397fdcef9338a 100644 (file)
@@ -81,6 +81,15 @@ config X86_PTDUMP
          kernel.
          If in doubt, say "N"
 
+config EFI_PGT_DUMP
+       bool "Dump the EFI pagetable"
+       depends on EFI && X86_PTDUMP
+       ---help---
+         Enable this if you want to dump the EFI page table before
+         enabling virtual mode. This can be used to debug miscellaneous
+         issues with the mapping of the EFI runtime regions into that
+         table.
+
 config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        default y
index eeda43abed6ec8d48837abaa5f6c18a40c38785a..3b9348a0c1a496244575dced7f89303ae78b168d 100644 (file)
@@ -82,8 +82,8 @@ else
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
-        # Don't autogenerate MMX or SSE instructions
-        KBUILD_CFLAGS += -mno-mmx -mno-sse
+        # Don't autogenerate traditional x87, MMX or SSE instructions
+        KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
 
        # Use -mpreferred-stack-boundary=3 if supported.
        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
@@ -152,6 +152,7 @@ cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTI
 
 # does binutils support specific instructions?
 asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
+asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
 avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
 avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 
index 878df7e88cd4d9c4293d9540432fdd3d7c78fc6e..abb9eba61b500192cd816dd9283fe8c8fb70b858 100644 (file)
@@ -80,7 +80,7 @@ targets += voffset.h
 $(obj)/voffset.h: vmlinux FORCE
        $(call if_changed,voffset)
 
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
 
 quiet_cmd_zoffset = ZOFFSET $@
       cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
index a7677babf946dc406735d94512eca4765b31569d..1e6146137f8e56753aab7d68b0e0b45af3c5fed9 100644 (file)
 
 static efi_system_table_t *sys_table;
 
+static struct efi_config *efi_early;
+
+#define efi_call_early(f, ...)                                         \
+       efi_early->call(efi_early->f, __VA_ARGS__);
+
+#define BOOT_SERVICES(bits)                                            \
+static void setup_boot_services##bits(struct efi_config *c)            \
+{                                                                      \
+       efi_system_table_##bits##_t *table;                             \
+       efi_boot_services_##bits##_t *bt;                               \
+                                                                       \
+       table = (typeof(table))sys_table;                               \
+                                                                       \
+       c->text_output = table->con_out;                                \
+                                                                       \
+       bt = (typeof(bt))(unsigned long)(table->boottime);              \
+                                                                       \
+       c->allocate_pool = bt->allocate_pool;                           \
+       c->allocate_pages = bt->allocate_pages;                         \
+       c->get_memory_map = bt->get_memory_map;                         \
+       c->free_pool = bt->free_pool;                                   \
+       c->free_pages = bt->free_pages;                                 \
+       c->locate_handle = bt->locate_handle;                           \
+       c->handle_protocol = bt->handle_protocol;                       \
+       c->exit_boot_services = bt->exit_boot_services;                 \
+}
+BOOT_SERVICES(32);
+BOOT_SERVICES(64);
 
-#include "../../../../drivers/firmware/efi/efi-stub-helper.c"
+static void efi_printk(efi_system_table_t *, char *);
+static void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+
+static efi_status_t
+__file_size32(void *__fh, efi_char16_t *filename_16,
+             void **handle, u64 *file_sz)
+{
+       efi_file_handle_32_t *h, *fh = __fh;
+       efi_file_info_t *info;
+       efi_status_t status;
+       efi_guid_t info_guid = EFI_FILE_INFO_ID;
+       u32 info_sz;
+
+       status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
+                                EFI_FILE_MODE_READ, (u64)0);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table, "Failed to open file: ");
+               efi_char16_printk(sys_table, filename_16);
+               efi_printk(sys_table, "\n");
+               return status;
+       }
+
+       *handle = h;
+
+       info_sz = 0;
+       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+                                &info_sz, NULL);
+       if (status != EFI_BUFFER_TOO_SMALL) {
+               efi_printk(sys_table, "Failed to get file info size\n");
+               return status;
+       }
+
+grow:
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               info_sz, (void **)&info);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table, "Failed to alloc mem for file info\n");
+               return status;
+       }
+
+       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+                                &info_sz, info);
+       if (status == EFI_BUFFER_TOO_SMALL) {
+               efi_call_early(free_pool, info);
+               goto grow;
+       }
+
+       *file_sz = info->file_size;
+       efi_call_early(free_pool, info);
+
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table, "Failed to get initrd info\n");
+
+       return status;
+}
+
+static efi_status_t
+__file_size64(void *__fh, efi_char16_t *filename_16,
+             void **handle, u64 *file_sz)
+{
+       efi_file_handle_64_t *h, *fh = __fh;
+       efi_file_info_t *info;
+       efi_status_t status;
+       efi_guid_t info_guid = EFI_FILE_INFO_ID;
+       u32 info_sz;
 
+       status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
+                                EFI_FILE_MODE_READ, (u64)0);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table, "Failed to open file: ");
+               efi_char16_printk(sys_table, filename_16);
+               efi_printk(sys_table, "\n");
+               return status;
+       }
 
+       *handle = h;
+
+       info_sz = 0;
+       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+                                &info_sz, NULL);
+       if (status != EFI_BUFFER_TOO_SMALL) {
+               efi_printk(sys_table, "Failed to get file info size\n");
+               return status;
+       }
+
+grow:
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               info_sz, (void **)&info);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table, "Failed to alloc mem for file info\n");
+               return status;
+       }
+
+       status = efi_early->call((unsigned long)h->get_info, h, &info_guid,
+                                &info_sz, info);
+       if (status == EFI_BUFFER_TOO_SMALL) {
+               efi_call_early(free_pool, info);
+               goto grow;
+       }
+
+       *file_sz = info->file_size;
+       efi_call_early(free_pool, info);
+
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table, "Failed to get initrd info\n");
+
+       return status;
+}
+static efi_status_t
+efi_file_size(efi_system_table_t *sys_table, void *__fh,
+             efi_char16_t *filename_16, void **handle, u64 *file_sz)
+{
+       if (efi_early->is64)
+               return __file_size64(__fh, filename_16, handle, file_sz);
+
+       return __file_size32(__fh, filename_16, handle, file_sz);
+}
+
+static inline efi_status_t
+efi_file_read(void *__fh, void *handle, unsigned long *size, void *addr)
+{
+       unsigned long func;
+
+       if (efi_early->is64) {
+               efi_file_handle_64_t *fh = __fh;
+
+               func = (unsigned long)fh->read;
+               return efi_early->call(func, handle, size, addr);
+       } else {
+               efi_file_handle_32_t *fh = __fh;
+
+               func = (unsigned long)fh->read;
+               return efi_early->call(func, handle, size, addr);
+       }
+}
+
+static inline efi_status_t efi_file_close(void *__fh, void *handle)
+{
+       if (efi_early->is64) {
+               efi_file_handle_64_t *fh = __fh;
+
+               return efi_early->call((unsigned long)fh->close, handle);
+       } else {
+               efi_file_handle_32_t *fh = __fh;
+
+               return efi_early->call((unsigned long)fh->close, handle);
+       }
+}
+
+static inline efi_status_t __open_volume32(void *__image, void **__fh)
+{
+       efi_file_io_interface_t *io;
+       efi_loaded_image_32_t *image = __image;
+       efi_file_handle_32_t *fh;
+       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+       efi_status_t status;
+       void *handle = (void *)(unsigned long)image->device_handle;
+       unsigned long func;
+
+       status = efi_call_early(handle_protocol, handle,
+                               &fs_proto, (void **)&io);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table, "Failed to handle fs_proto\n");
+               return status;
+       }
+
+       func = (unsigned long)io->open_volume;
+       status = efi_early->call(func, io, &fh);
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table, "Failed to open volume\n");
+
+       *__fh = fh;
+       return status;
+}
+
+static inline efi_status_t __open_volume64(void *__image, void **__fh)
+{
+       efi_file_io_interface_t *io;
+       efi_loaded_image_64_t *image = __image;
+       efi_file_handle_64_t *fh;
+       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+       efi_status_t status;
+       void *handle = (void *)(unsigned long)image->device_handle;
+       unsigned long func;
+
+       status = efi_call_early(handle_protocol, handle,
+                               &fs_proto, (void **)&io);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table, "Failed to handle fs_proto\n");
+               return status;
+       }
+
+       func = (unsigned long)io->open_volume;
+       status = efi_early->call(func, io, &fh);
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table, "Failed to open volume\n");
+
+       *__fh = fh;
+       return status;
+}
+
+static inline efi_status_t
+efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
+{
+       if (efi_early->is64)
+               return __open_volume64(__image, __fh);
+
+       return __open_volume32(__image, __fh);
+}
+
+static void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
+{
+       unsigned long output_string;
+       size_t offset;
+
+       if (efi_early->is64) {
+               struct efi_simple_text_output_protocol_64 *out;
+               u64 *func;
+
+               offset = offsetof(typeof(*out), output_string);
+               output_string = efi_early->text_output + offset;
+               func = (u64 *)output_string;
+
+               efi_early->call(*func, efi_early->text_output, str);
+       } else {
+               struct efi_simple_text_output_protocol_32 *out;
+               u32 *func;
+
+               offset = offsetof(typeof(*out), output_string);
+               output_string = efi_early->text_output + offset;
+               func = (u32 *)output_string;
+
+               efi_early->call(*func, efi_early->text_output, str);
+       }
+}
+
+#include "../../../../drivers/firmware/efi/efi-stub-helper.c"
 
 static void find_bits(unsigned long mask, u8 *pos, u8 *size)
 {
@@ -47,105 +309,97 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
        *size = len;
 }
 
-static efi_status_t setup_efi_pci(struct boot_params *params)
+static efi_status_t
+__setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
 {
-       efi_pci_io_protocol *pci;
+       struct pci_setup_rom *rom = NULL;
        efi_status_t status;
-       void **pci_handle;
-       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
-       unsigned long nr_pci, size = 0;
-       int i;
-       struct setup_data *data;
+       unsigned long size;
+       uint64_t attributes;
 
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+       status = efi_early->call(pci->attributes, pci,
+                                EfiPciIoAttributeOperationGet, 0, 0,
+                                &attributes);
+       if (status != EFI_SUCCESS)
+               return status;
 
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
+       if (!pci->romimage || !pci->romsize)
+               return EFI_INVALID_PARAMETER;
 
-       status = efi_call_phys5(sys_table->boottime->locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL, &pci_proto,
-                               NULL, &size, pci_handle);
+       size = pci->romsize + sizeof(*rom);
 
-       if (status == EFI_BUFFER_TOO_SMALL) {
-               status = efi_call_phys3(sys_table->boottime->allocate_pool,
-                                       EFI_LOADER_DATA, size, &pci_handle);
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
+       if (status != EFI_SUCCESS)
+               return status;
 
-               if (status != EFI_SUCCESS)
-                       return status;
+       memset(rom, 0, sizeof(*rom));
 
-               status = efi_call_phys5(sys_table->boottime->locate_handle,
-                                       EFI_LOCATE_BY_PROTOCOL, &pci_proto,
-                                       NULL, &size, pci_handle);
-       }
+       rom->data.type = SETUP_PCI;
+       rom->data.len = size - sizeof(struct setup_data);
+       rom->data.next = 0;
+       rom->pcilen = pci->romsize;
+       *__rom = rom;
 
-       if (status != EFI_SUCCESS)
-               goto free_handle;
-
-       nr_pci = size / sizeof(void *);
-       for (i = 0; i < nr_pci; i++) {
-               void *h = pci_handle[i];
-               uint64_t attributes;
-               struct pci_setup_rom *rom;
+       status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+                                PCI_VENDOR_ID, 1, &(rom->vendor));
 
-               status = efi_call_phys3(sys_table->boottime->handle_protocol,
-                                       h, &pci_proto, &pci);
+       if (status != EFI_SUCCESS)
+               goto free_struct;
 
-               if (status != EFI_SUCCESS)
-                       continue;
+       status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+                                PCI_DEVICE_ID, 1, &(rom->devid));
 
-               if (!pci)
-                       continue;
+       if (status != EFI_SUCCESS)
+               goto free_struct;
 
-#ifdef CONFIG_X86_64
-               status = efi_call_phys4(pci->attributes, pci,
-                                       EfiPciIoAttributeOperationGet, 0,
-                                       &attributes);
-#else
-               status = efi_call_phys5(pci->attributes, pci,
-                                       EfiPciIoAttributeOperationGet, 0, 0,
-                                       &attributes);
-#endif
-               if (status != EFI_SUCCESS)
-                       continue;
+       status = efi_early->call(pci->get_location, pci, &(rom->segment),
+                                &(rom->bus), &(rom->device), &(rom->function));
 
-               if (!pci->romimage || !pci->romsize)
-                       continue;
+       if (status != EFI_SUCCESS)
+               goto free_struct;
 
-               size = pci->romsize + sizeof(*rom);
+       memcpy(rom->romdata, pci->romimage, pci->romsize);
+       return status;
 
-               status = efi_call_phys3(sys_table->boottime->allocate_pool,
-                               EFI_LOADER_DATA, size, &rom);
+free_struct:
+       efi_call_early(free_pool, rom);
+       return status;
+}
 
-               if (status != EFI_SUCCESS)
-                       continue;
+static efi_status_t
+setup_efi_pci32(struct boot_params *params, void **pci_handle,
+               unsigned long size)
+{
+       efi_pci_io_protocol_32 *pci = NULL;
+       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+       u32 *handles = (u32 *)(unsigned long)pci_handle;
+       efi_status_t status;
+       unsigned long nr_pci;
+       struct setup_data *data;
+       int i;
 
-               rom->data.type = SETUP_PCI;
-               rom->data.len = size - sizeof(struct setup_data);
-               rom->data.next = 0;
-               rom->pcilen = pci->romsize;
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
 
-               status = efi_call_phys5(pci->pci.read, pci,
-                                       EfiPciIoWidthUint16, PCI_VENDOR_ID,
-                                       1, &(rom->vendor));
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
 
-               if (status != EFI_SUCCESS)
-                       goto free_struct;
+       nr_pci = size / sizeof(u32);
+       for (i = 0; i < nr_pci; i++) {
+               struct pci_setup_rom *rom = NULL;
+               u32 h = handles[i];
 
-               status = efi_call_phys5(pci->pci.read, pci,
-                                       EfiPciIoWidthUint16, PCI_DEVICE_ID,
-                                       1, &(rom->devid));
+               status = efi_call_early(handle_protocol, h,
+                                       &pci_proto, (void **)&pci);
 
                if (status != EFI_SUCCESS)
-                       goto free_struct;
+                       continue;
 
-               status = efi_call_phys5(pci->get_location, pci,
-                                       &(rom->segment), &(rom->bus),
-                                       &(rom->device), &(rom->function));
+               if (!pci)
+                       continue;
 
+               status = __setup_efi_pci32(pci, &rom);
                if (status != EFI_SUCCESS)
-                       goto free_struct;
-
-               memcpy(rom->romdata, pci->romimage, pci->romsize);
+                       continue;
 
                if (data)
                        data->next = (unsigned long)rom;
@@ -154,105 +408,155 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
 
                data = (struct setup_data *)rom;
 
-               continue;
-       free_struct:
-               efi_call_phys1(sys_table->boottime->free_pool, rom);
        }
 
-free_handle:
-       efi_call_phys1(sys_table->boottime->free_pool, pci_handle);
        return status;
 }
 
-/*
- * See if we have Graphics Output Protocol
- */
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
-                             unsigned long size)
+static efi_status_t
+__setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
 {
-       struct efi_graphics_output_protocol *gop, *first_gop;
-       struct efi_pixel_bitmask pixel_info;
-       unsigned long nr_gops;
+       struct pci_setup_rom *rom;
        efi_status_t status;
-       void **gop_handle;
-       u16 width, height;
-       u32 fb_base, fb_size;
-       u32 pixels_per_scan_line;
-       int pixel_format;
-       int i;
+       unsigned long size;
+       uint64_t attributes;
 
-       status = efi_call_phys3(sys_table->boottime->allocate_pool,
-                               EFI_LOADER_DATA, size, &gop_handle);
+       status = efi_early->call(pci->attributes, pci,
+                                EfiPciIoAttributeOperationGet, 0,
+                                &attributes);
        if (status != EFI_SUCCESS)
                return status;
 
-       status = efi_call_phys5(sys_table->boottime->locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL, proto,
-                               NULL, &size, gop_handle);
+       if (!pci->romimage || !pci->romsize)
+               return EFI_INVALID_PARAMETER;
+
+       size = pci->romsize + sizeof(*rom);
+
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
        if (status != EFI_SUCCESS)
-               goto free_handle;
+               return status;
 
-       first_gop = NULL;
+       rom->data.type = SETUP_PCI;
+       rom->data.len = size - sizeof(struct setup_data);
+       rom->data.next = 0;
+       rom->pcilen = pci->romsize;
+       *__rom = rom;
 
-       nr_gops = size / sizeof(void *);
-       for (i = 0; i < nr_gops; i++) {
-               struct efi_graphics_output_mode_info *info;
-               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
-               bool conout_found = false;
-               void *dummy;
-               void *h = gop_handle[i];
+       status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+                                PCI_VENDOR_ID, 1, &(rom->vendor));
+
+       if (status != EFI_SUCCESS)
+               goto free_struct;
+
+       status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
+                                PCI_DEVICE_ID, 1, &(rom->devid));
+
+       if (status != EFI_SUCCESS)
+               goto free_struct;
+
+       status = efi_early->call(pci->get_location, pci, &(rom->segment),
+                                &(rom->bus), &(rom->device), &(rom->function));
+
+       if (status != EFI_SUCCESS)
+               goto free_struct;
+
+       memcpy(rom->romdata, pci->romimage, pci->romsize);
+       return status;
+
+free_struct:
+       efi_call_early(free_pool, rom);
+       return status;
+
+}
+
+static efi_status_t
+setup_efi_pci64(struct boot_params *params, void **pci_handle,
+               unsigned long size)
+{
+       efi_pci_io_protocol_64 *pci = NULL;
+       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+       u64 *handles = (u64 *)(unsigned long)pci_handle;
+       efi_status_t status;
+       unsigned long nr_pci;
+       struct setup_data *data;
+       int i;
+
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       nr_pci = size / sizeof(u64);
+       for (i = 0; i < nr_pci; i++) {
+               struct pci_setup_rom *rom = NULL;
+               u64 h = handles[i];
+
+               status = efi_call_early(handle_protocol, h,
+                                       &pci_proto, (void **)&pci);
 
-               status = efi_call_phys3(sys_table->boottime->handle_protocol,
-                                       h, proto, &gop);
                if (status != EFI_SUCCESS)
                        continue;
 
-               status = efi_call_phys3(sys_table->boottime->handle_protocol,
-                                       h, &conout_proto, &dummy);
+               if (!pci)
+                       continue;
 
-               if (status == EFI_SUCCESS)
-                       conout_found = true;
+               status = __setup_efi_pci64(pci, &rom);
+               if (status != EFI_SUCCESS)
+                       continue;
 
-               status = efi_call_phys4(gop->query_mode, gop,
-                                       gop->mode->mode, &size, &info);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
-                       /*
-                        * Systems that use the UEFI Console Splitter may
-                        * provide multiple GOP devices, not all of which are
-                        * backed by real hardware. The workaround is to search
-                        * for a GOP implementing the ConOut protocol, and if
-                        * one isn't found, to just fall back to the first GOP.
-                        */
-                       width = info->horizontal_resolution;
-                       height = info->vertical_resolution;
-                       fb_base = gop->mode->frame_buffer_base;
-                       fb_size = gop->mode->frame_buffer_size;
-                       pixel_format = info->pixel_format;
-                       pixel_info = info->pixel_information;
-                       pixels_per_scan_line = info->pixels_per_scan_line;
+               if (data)
+                       data->next = (unsigned long)rom;
+               else
+                       params->hdr.setup_data = (unsigned long)rom;
+
+               data = (struct setup_data *)rom;
 
-                       /*
-                        * Once we've found a GOP supporting ConOut,
-                        * don't bother looking any further.
-                        */
-                       first_gop = gop;
-                       if (conout_found)
-                               break;
-               }
        }
 
-       /* Did we find any GOPs? */
-       if (!first_gop)
+       return status;
+}
+
+static efi_status_t setup_efi_pci(struct boot_params *params)
+{
+       efi_status_t status;
+       void **pci_handle = NULL;
+       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
+       unsigned long size = 0;
+
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               &pci_proto, NULL, &size, pci_handle);
+
+       if (status == EFI_BUFFER_TOO_SMALL) {
+               status = efi_call_early(allocate_pool,
+                                       EFI_LOADER_DATA,
+                                       size, (void **)&pci_handle);
+
+               if (status != EFI_SUCCESS)
+                       return status;
+
+               status = efi_call_early(locate_handle,
+                                       EFI_LOCATE_BY_PROTOCOL, &pci_proto,
+                                       NULL, &size, pci_handle);
+       }
+
+       if (status != EFI_SUCCESS)
                goto free_handle;
 
-       /* EFI framebuffer */
-       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+       if (efi_early->is64)
+               status = setup_efi_pci64(params, pci_handle, size);
+       else
+               status = setup_efi_pci32(params, pci_handle, size);
 
-       si->lfb_width = width;
-       si->lfb_height = height;
-       si->lfb_base = fb_base;
-       si->pages = 1;
+free_handle:
+       efi_call_early(free_pool, pci_handle);
+       return status;
+}
 
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+                struct efi_pixel_bitmask pixel_info, int pixel_format)
+{
        if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
                si->lfb_depth = 32;
                si->lfb_linelength = pixels_per_scan_line * 4;
@@ -297,62 +601,319 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
                si->rsvd_size = 0;
                si->rsvd_pos = 0;
        }
+}
+
+static efi_status_t
+__gop_query32(struct efi_graphics_output_protocol_32 *gop32,
+             struct efi_graphics_output_mode_info **info,
+             unsigned long *size, u32 *fb_base)
+{
+       struct efi_graphics_output_protocol_mode_32 *mode;
+       efi_status_t status;
+       unsigned long m;
+
+       m = gop32->mode;
+       mode = (struct efi_graphics_output_protocol_mode_32 *)m;
+
+       status = efi_early->call(gop32->query_mode, gop32,
+                                mode->mode, size, info);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       *fb_base = mode->frame_buffer_base;
+       return status;
+}
+
+static efi_status_t
+setup_gop32(struct screen_info *si, efi_guid_t *proto,
+           unsigned long size, void **gop_handle)
+{
+       struct efi_graphics_output_protocol_32 *gop32, *first_gop;
+       unsigned long nr_gops;
+       u16 width, height;
+       u32 pixels_per_scan_line;
+       u32 fb_base;
+       struct efi_pixel_bitmask pixel_info;
+       int pixel_format;
+       efi_status_t status;
+       u32 *handles = (u32 *)(unsigned long)gop_handle;
+       int i;
+
+       first_gop = NULL;
+       gop32 = NULL;
+
+       nr_gops = size / sizeof(u32);
+       for (i = 0; i < nr_gops; i++) {
+               struct efi_graphics_output_mode_info *info = NULL;
+               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+               bool conout_found = false;
+               void *dummy = NULL;
+               u32 h = handles[i];
+
+               status = efi_call_early(handle_protocol, h,
+                                       proto, (void **)&gop32);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               status = efi_call_early(handle_protocol, h,
+                                       &conout_proto, &dummy);
+               if (status == EFI_SUCCESS)
+                       conout_found = true;
+
+               status = __gop_query32(gop32, &info, &size, &fb_base);
+               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+                       /*
+                        * Systems that use the UEFI Console Splitter may
+                        * provide multiple GOP devices, not all of which are
+                        * backed by real hardware. The workaround is to search
+                        * for a GOP implementing the ConOut protocol, and if
+                        * one isn't found, to just fall back to the first GOP.
+                        */
+                       width = info->horizontal_resolution;
+                       height = info->vertical_resolution;
+                       pixel_format = info->pixel_format;
+                       pixel_info = info->pixel_information;
+                       pixels_per_scan_line = info->pixels_per_scan_line;
+
+                       /*
+                        * Once we've found a GOP supporting ConOut,
+                        * don't bother looking any further.
+                        */
+                       first_gop = gop32;
+                       if (conout_found)
+                               break;
+               }
+       }
+
+       /* Did we find any GOPs? */
+       if (!first_gop)
+               goto out;
+
+       /* EFI framebuffer */
+       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+       si->lfb_width = width;
+       si->lfb_height = height;
+       si->lfb_base = fb_base;
+       si->pages = 1;
+
+       setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
 
        si->lfb_size = si->lfb_linelength * si->lfb_height;
 
        si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+       return status;
+}
 
-free_handle:
-       efi_call_phys1(sys_table->boottime->free_pool, gop_handle);
+static efi_status_t
+__gop_query64(struct efi_graphics_output_protocol_64 *gop64,
+             struct efi_graphics_output_mode_info **info,
+             unsigned long *size, u32 *fb_base)
+{
+       struct efi_graphics_output_protocol_mode_64 *mode;
+       efi_status_t status;
+       unsigned long m;
+
+       m = gop64->mode;
+       mode = (struct efi_graphics_output_protocol_mode_64 *)m;
+
+       status = efi_early->call(gop64->query_mode, gop64,
+                                mode->mode, size, info);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       *fb_base = mode->frame_buffer_base;
+       return status;
+}
+
+static efi_status_t
+setup_gop64(struct screen_info *si, efi_guid_t *proto,
+           unsigned long size, void **gop_handle)
+{
+       struct efi_graphics_output_protocol_64 *gop64, *first_gop;
+       unsigned long nr_gops;
+       u16 width, height;
+       u32 pixels_per_scan_line;
+       u32 fb_base;
+       struct efi_pixel_bitmask pixel_info;
+       int pixel_format;
+       efi_status_t status;
+       u64 *handles = (u64 *)(unsigned long)gop_handle;
+       int i;
+
+       first_gop = NULL;
+       gop64 = NULL;
+
+       nr_gops = size / sizeof(u64);
+       for (i = 0; i < nr_gops; i++) {
+               struct efi_graphics_output_mode_info *info = NULL;
+               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+               bool conout_found = false;
+               void *dummy = NULL;
+               u64 h = handles[i];
+
+               status = efi_call_early(handle_protocol, h,
+                                       proto, (void **)&gop64);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               status = efi_call_early(handle_protocol, h,
+                                       &conout_proto, &dummy);
+               if (status == EFI_SUCCESS)
+                       conout_found = true;
+
+               status = __gop_query64(gop64, &info, &size, &fb_base);
+               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+                       /*
+                        * Systems that use the UEFI Console Splitter may
+                        * provide multiple GOP devices, not all of which are
+                        * backed by real hardware. The workaround is to search
+                        * for a GOP implementing the ConOut protocol, and if
+                        * one isn't found, to just fall back to the first GOP.
+                        */
+                       width = info->horizontal_resolution;
+                       height = info->vertical_resolution;
+                       pixel_format = info->pixel_format;
+                       pixel_info = info->pixel_information;
+                       pixels_per_scan_line = info->pixels_per_scan_line;
+
+                       /*
+                        * Once we've found a GOP supporting ConOut,
+                        * don't bother looking any further.
+                        */
+                       first_gop = gop64;
+                       if (conout_found)
+                               break;
+               }
+       }
+
+       /* Did we find any GOPs? */
+       if (!first_gop)
+               goto out;
+
+       /* EFI framebuffer */
+       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+       si->lfb_width = width;
+       si->lfb_height = height;
+       si->lfb_base = fb_base;
+       si->pages = 1;
+
+       setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+       si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+       si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
        return status;
 }
 
 /*
- * See if we have Universal Graphics Adapter (UGA) protocol
+ * See if we have Graphics Output Protocol
  */
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
+static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
                              unsigned long size)
 {
-       struct efi_uga_draw_protocol *uga, *first_uga;
-       unsigned long nr_ugas;
        efi_status_t status;
-       u32 width, height;
-       void **uga_handle = NULL;
-       int i;
+       void **gop_handle = NULL;
 
-       status = efi_call_phys3(sys_table->boottime->allocate_pool,
-                               EFI_LOADER_DATA, size, &uga_handle);
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               size, (void **)&gop_handle);
        if (status != EFI_SUCCESS)
                return status;
 
-       status = efi_call_phys5(sys_table->boottime->locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL, uga_proto,
-                               NULL, &size, uga_handle);
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               proto, NULL, &size, gop_handle);
        if (status != EFI_SUCCESS)
                goto free_handle;
 
+       if (efi_early->is64)
+               status = setup_gop64(si, proto, size, gop_handle);
+       else
+               status = setup_gop32(si, proto, size, gop_handle);
+
+free_handle:
+       efi_call_early(free_pool, gop_handle);
+       return status;
+}
+
+static efi_status_t
+setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+{
+       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
+       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+       unsigned long nr_ugas;
+       u32 *handles = (u32 *)uga_handle;;
+       efi_status_t status;
+       int i;
+
        first_uga = NULL;
+       nr_ugas = size / sizeof(u32);
+       for (i = 0; i < nr_ugas; i++) {
+               efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
+               u32 w, h, depth, refresh;
+               void *pciio;
+               u32 handle = handles[i];
+
+               status = efi_call_early(handle_protocol, handle,
+                                       &uga_proto, (void **)&uga);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
+
+               status = efi_early->call((unsigned long)uga->get_mode, uga,
+                                        &w, &h, &depth, &refresh);
+               if (status == EFI_SUCCESS && (!first_uga || pciio)) {
+                       *width = w;
+                       *height = h;
+
+                       /*
+                        * Once we've found a UGA supporting PCIIO,
+                        * don't bother looking any further.
+                        */
+                       if (pciio)
+                               break;
 
-       nr_ugas = size / sizeof(void *);
+                       first_uga = uga;
+               }
+       }
+
+       return status;
+}
+
+static efi_status_t
+setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+{
+       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
+       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+       unsigned long nr_ugas;
+       u64 *handles = (u64 *)uga_handle;;
+       efi_status_t status;
+       int i;
+
+       first_uga = NULL;
+       nr_ugas = size / sizeof(u64);
        for (i = 0; i < nr_ugas; i++) {
                efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
-               void *handle = uga_handle[i];
                u32 w, h, depth, refresh;
                void *pciio;
+               u64 handle = handles[i];
 
-               status = efi_call_phys3(sys_table->boottime->handle_protocol,
-                                       handle, uga_proto, &uga);
+               status = efi_call_early(handle_protocol, handle,
+                                       &uga_proto, (void **)&uga);
                if (status != EFI_SUCCESS)
                        continue;
 
-               efi_call_phys3(sys_table->boottime->handle_protocol,
-                              handle, &pciio_proto, &pciio);
+               efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
 
-               status = efi_call_phys5(uga->get_mode, uga, &w, &h,
-                                       &depth, &refresh);
+               status = efi_early->call((unsigned long)uga->get_mode, uga,
+                                        &w, &h, &depth, &refresh);
                if (status == EFI_SUCCESS && (!first_uga || pciio)) {
-                       width = w;
-                       height = h;
+                       *width = w;
+                       *height = h;
 
                        /*
                         * Once we've found a UGA supporting PCIIO,
@@ -365,7 +926,39 @@ static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
                }
        }
 
-       if (!first_uga)
+       return status;
+}
+
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
+static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
+                             unsigned long size)
+{
+       efi_status_t status;
+       u32 width, height;
+       void **uga_handle = NULL;
+
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               size, (void **)&uga_handle);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               uga_proto, NULL, &size, uga_handle);
+       if (status != EFI_SUCCESS)
+               goto free_handle;
+
+       height = 0;
+       width = 0;
+
+       if (efi_early->is64)
+               status = setup_uga64(uga_handle, size, &width, &height);
+       else
+               status = setup_uga32(uga_handle, size, &width, &height);
+
+       if (!width && !height)
                goto free_handle;
 
        /* EFI framebuffer */
@@ -384,9 +977,8 @@ static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
        si->rsvd_size = 8;
        si->rsvd_pos = 24;
 
-
 free_handle:
-       efi_call_phys1(sys_table->boottime->free_pool, uga_handle);
+       efi_call_early(free_pool, uga_handle);
        return status;
 }
 
@@ -404,29 +996,28 @@ void setup_graphics(struct boot_params *boot_params)
        memset(si, 0, sizeof(*si));
 
        size = 0;
-       status = efi_call_phys5(sys_table->boottime->locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL, &graphics_proto,
-                               NULL, &size, gop_handle);
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               &graphics_proto, NULL, &size, gop_handle);
        if (status == EFI_BUFFER_TOO_SMALL)
                status = setup_gop(si, &graphics_proto, size);
 
        if (status != EFI_SUCCESS) {
                size = 0;
-               status = efi_call_phys5(sys_table->boottime->locate_handle,
-                                       EFI_LOCATE_BY_PROTOCOL, &uga_proto,
-                                       NULL, &size, uga_handle);
+               status = efi_call_early(locate_handle,
+                                       EFI_LOCATE_BY_PROTOCOL,
+                                       &uga_proto, NULL, &size, uga_handle);
                if (status == EFI_BUFFER_TOO_SMALL)
                        setup_uga(si, &uga_proto, size);
        }
 }
 
-
 /*
  * Because the x86 boot code expects to be passed a boot_params we
  * need to create one ourselves (usually the bootloader would create
  * one for us).
  */
-struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
+struct boot_params *make_boot_params(struct efi_config *c)
 {
        struct boot_params *boot_params;
        struct sys_desc_table *sdt;
@@ -434,7 +1025,7 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
        struct setup_header *hdr;
        struct efi_info *efi;
        efi_loaded_image_t *image;
-       void *options;
+       void *options, *handle;
        efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
        int options_size = 0;
        efi_status_t status;
@@ -445,14 +1036,21 @@ struct boot_params *make_boot_params(void *handle, efi_system_table_t *_table)
        unsigned long ramdisk_addr;
        unsigned long ramdisk_size;
 
-       sys_table = _table;
+       efi_early = c;
+       sys_table = (efi_system_table_t *)(unsigned long)efi_early->table;
+       handle = (void *)(unsigned long)efi_early->image_handle;
 
        /* Check if we were booted by the EFI firmware */
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                return NULL;
 
-       status = efi_call_phys3(sys_table->boottime->handle_protocol,
-                               handle, &proto, (void *)&image);
+       if (efi_early->is64)
+               setup_boot_services64(efi_early);
+       else
+               setup_boot_services32(efi_early);
+
+       status = efi_call_early(handle_protocol, handle,
+                               &proto, (void *)&image);
        if (status != EFI_SUCCESS) {
                efi_printk(sys_table, "Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
                return NULL;
@@ -641,14 +1239,13 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
                sizeof(struct e820entry) * nr_desc;
 
        if (*e820ext) {
-               efi_call_phys1(sys_table->boottime->free_pool, *e820ext);
+               efi_call_early(free_pool, *e820ext);
                *e820ext = NULL;
                *e820ext_size = 0;
        }
 
-       status = efi_call_phys3(sys_table->boottime->allocate_pool,
-                               EFI_LOADER_DATA, size, e820ext);
-
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               size, (void **)e820ext);
        if (status == EFI_SUCCESS)
                *e820ext_size = size;
 
@@ -656,12 +1253,13 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
 }
 
 static efi_status_t exit_boot(struct boot_params *boot_params,
-                             void *handle)
+                             void *handle, bool is64)
 {
        struct efi_info *efi = &boot_params->efi_info;
        unsigned long map_sz, key, desc_size;
        efi_memory_desc_t *mem_map;
        struct setup_data *e820ext;
+       const char *signature;
        __u32 e820ext_size;
        __u32 nr_desc, prev_nr_desc;
        efi_status_t status;
@@ -691,11 +1289,13 @@ get_map:
                if (status != EFI_SUCCESS)
                        goto free_mem_map;
 
-               efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+               efi_call_early(free_pool, mem_map);
                goto get_map; /* Allocated memory, get map again */
        }
 
-       memcpy(&efi->efi_loader_signature, EFI_LOADER_SIGNATURE, sizeof(__u32));
+       signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+       memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
+
        efi->efi_systab = (unsigned long)sys_table;
        efi->efi_memdesc_size = desc_size;
        efi->efi_memdesc_version = desc_version;
@@ -708,8 +1308,7 @@ get_map:
 #endif
 
        /* Might as well exit boot services now */
-       status = efi_call_phys2(sys_table->boottime->exit_boot_services,
-                               handle, key);
+       status = efi_call_early(exit_boot_services, handle, key);
        if (status != EFI_SUCCESS) {
                /*
                 * ExitBootServices() will fail if any of the event
@@ -722,7 +1321,7 @@ get_map:
                        goto free_mem_map;
 
                called_exit = true;
-               efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+               efi_call_early(free_pool, mem_map);
                goto get_map;
        }
 
@@ -736,23 +1335,31 @@ get_map:
        return EFI_SUCCESS;
 
 free_mem_map:
-       efi_call_phys1(sys_table->boottime->free_pool, mem_map);
+       efi_call_early(free_pool, mem_map);
        return status;
 }
 
-
 /*
  * On success we return a pointer to a boot_params structure, and NULL
  * on failure.
  */
-struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
+struct boot_params *efi_main(struct efi_config *c,
                             struct boot_params *boot_params)
 {
-       struct desc_ptr *gdt;
+       struct desc_ptr *gdt = NULL;
        efi_loaded_image_t *image;
        struct setup_header *hdr = &boot_params->hdr;
        efi_status_t status;
        struct desc_struct *desc;
+       void *handle;
+       efi_system_table_t *_table;
+       bool is64;
+
+       efi_early = c;
+
+       _table = (efi_system_table_t *)(unsigned long)efi_early->table;
+       handle = (void *)(unsigned long)efi_early->image_handle;
+       is64 = efi_early->is64;
 
        sys_table = _table;
 
@@ -760,13 +1367,17 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                goto fail;
 
+       if (is64)
+               setup_boot_services64(efi_early);
+       else
+               setup_boot_services32(efi_early);
+
        setup_graphics(boot_params);
 
        setup_efi_pci(boot_params);
 
-       status = efi_call_phys3(sys_table->boottime->allocate_pool,
-                               EFI_LOADER_DATA, sizeof(*gdt),
-                               (void **)&gdt);
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               sizeof(*gdt), (void **)&gdt);
        if (status != EFI_SUCCESS) {
                efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
                goto fail;
@@ -797,7 +1408,7 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
                hdr->code32_start = bzimage_addr;
        }
 
-       status = exit_boot(boot_params, handle);
+       status = exit_boot(boot_params, handle, is64);
        if (status != EFI_SUCCESS)
                goto fail;
 
index 81b6b652b46a948440601964e4e0f114563f4f5d..c88c31ecad1231bb2fc45257a8d98bafe7bc519b 100644 (file)
@@ -37,6 +37,24 @@ struct efi_graphics_output_mode_info {
        u32 pixels_per_scan_line;
 } __packed;
 
+struct efi_graphics_output_protocol_mode_32 {
+       u32 max_mode;
+       u32 mode;
+       u32 info;
+       u32 size_of_info;
+       u64 frame_buffer_base;
+       u32 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_64 {
+       u32 max_mode;
+       u32 mode;
+       u64 info;
+       u64 size_of_info;
+       u64 frame_buffer_base;
+       u64 frame_buffer_size;
+} __packed;
+
 struct efi_graphics_output_protocol_mode {
        u32 max_mode;
        u32 mode;
@@ -46,6 +64,20 @@ struct efi_graphics_output_protocol_mode {
        unsigned long frame_buffer_size;
 } __packed;
 
+struct efi_graphics_output_protocol_32 {
+       u32 query_mode;
+       u32 set_mode;
+       u32 blt;
+       u32 mode;
+};
+
+struct efi_graphics_output_protocol_64 {
+       u64 query_mode;
+       u64 set_mode;
+       u64 blt;
+       u64 mode;
+};
+
 struct efi_graphics_output_protocol {
        void *query_mode;
        unsigned long set_mode;
@@ -53,10 +85,38 @@ struct efi_graphics_output_protocol {
        struct efi_graphics_output_protocol_mode *mode;
 };
 
+struct efi_uga_draw_protocol_32 {
+       u32 get_mode;
+       u32 set_mode;
+       u32 blt;
+};
+
+struct efi_uga_draw_protocol_64 {
+       u64 get_mode;
+       u64 set_mode;
+       u64 blt;
+};
+
 struct efi_uga_draw_protocol {
        void *get_mode;
        void *set_mode;
        void *blt;
 };
 
+struct efi_config {
+       u64 image_handle;
+       u64 table;
+       u64 allocate_pool;
+       u64 allocate_pages;
+       u64 get_memory_map;
+       u64 free_pool;
+       u64 free_pages;
+       u64 locate_handle;
+       u64 handle_protocol;
+       u64 exit_boot_services;
+       u64 text_output;
+       efi_status_t (*call)(unsigned long, ...);
+       bool is64;
+} __packed;
+
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index cedc60de86eb71eb8d51f495f1f9d9b799efedf1..7ff3632806b18ec9a48bd0ae88bdc7a9e9dbe091 100644 (file)
@@ -1 +1,30 @@
+#include <asm/segment.h>
+#include <asm/msr.h>
+#include <asm/processor-flags.h>
+
 #include "../../platform/efi/efi_stub_64.S"
+
+#ifdef CONFIG_EFI_MIXED
+       .code64
+       .text
+ENTRY(efi64_thunk)
+       push    %rbp
+       push    %rbx
+
+       subq    $16, %rsp
+       leaq    efi_exit32(%rip), %rax
+       movl    %eax, 8(%rsp)
+       leaq    efi_gdt64(%rip), %rax
+       movl    %eax, 4(%rsp)
+       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
+       leaq    efi32_boot_gdt(%rip), %rax
+       movl    %eax, (%rsp)
+
+       call    __efi64_thunk
+
+       addq    $16, %rsp
+       pop     %rbx
+       pop     %rbp
+       ret
+ENDPROC(efi64_thunk)
+#endif /* CONFIG_EFI_MIXED */
index 9116aac232c746ae4f5262508fba437db9fbb431..de9d4200d305ba86eb48bb60f3a852b210fb2649 100644 (file)
@@ -42,26 +42,53 @@ ENTRY(startup_32)
 ENTRY(efi_pe_entry)
        add     $0x4, %esp
 
+       call    1f
+1:     popl    %esi
+       subl    $1b, %esi
+
+       popl    %ecx
+       movl    %ecx, efi32_config(%esi)        /* Handle */
+       popl    %ecx
+       movl    %ecx, efi32_config+8(%esi)      /* EFI System table pointer */
+
+       /* Relocate efi_config->call() */
+       leal    efi32_config(%esi), %eax
+       add     %esi, 88(%eax)
+       pushl   %eax
+
        call    make_boot_params
        cmpl    $0, %eax
-       je      1f
-       movl    0x4(%esp), %esi
-       movl    (%esp), %ecx
+       je      fail
+       popl    %ecx
        pushl   %eax
-       pushl   %esi
        pushl   %ecx
-       sub     $0x4, %esp
+       jmp     2f              /* Skip efi_config initialization */
 
-ENTRY(efi_stub_entry)
+ENTRY(efi32_stub_entry)
        add     $0x4, %esp
+       popl    %ecx
+       popl    %edx
+
+       call    1f
+1:     popl    %esi
+       subl    $1b, %esi
+
+       movl    %ecx, efi32_config(%esi)        /* Handle */
+       movl    %edx, efi32_config+8(%esi)      /* EFI System table pointer */
+
+       /* Relocate efi_config->call() */
+       leal    efi32_config(%esi), %eax
+       add     %esi, 88(%eax)
+       pushl   %eax
+2:
        call    efi_main
        cmpl    $0, %eax
        movl    %eax, %esi
        jne     2f
-1:
+fail:
        /* EFI init failed, so hang. */
        hlt
-       jmp     1b
+       jmp     fail
 2:
        call    3f
 3:
@@ -202,6 +229,15 @@ relocated:
        xorl    %ebx, %ebx
        jmp     *%eax
 
+#ifdef CONFIG_EFI_STUB
+       .data
+efi32_config:
+       .fill 11,8,0
+       .long efi_call_phys
+       .long 0
+       .byte 0
+#endif
+
 /*
  * Stack and heap for uncompression
  */
index c5c1ae0997e7b223128b009a220b899e22ffeb25..57e58a5fa21073de1c86f4ceee6b76e674d87399 100644 (file)
@@ -113,7 +113,8 @@ ENTRY(startup_32)
        lgdt    gdt(%ebp)
 
        /* Enable PAE mode */
-       movl    $(X86_CR4_PAE), %eax
+       movl    %cr4, %eax
+       orl     $X86_CR4_PAE, %eax
        movl    %eax, %cr4
 
  /*
@@ -178,6 +179,13 @@ ENTRY(startup_32)
         */
        pushl   $__KERNEL_CS
        leal    startup_64(%ebp), %eax
+#ifdef CONFIG_EFI_MIXED
+       movl    efi32_config(%ebp), %ebx
+       cmp     $0, %ebx
+       jz      1f
+       leal    handover_entry(%ebp), %eax
+1:
+#endif
        pushl   %eax
 
        /* Enter paged protected Mode, activating Long Mode */
@@ -188,6 +196,30 @@ ENTRY(startup_32)
        lret
 ENDPROC(startup_32)
 
+#ifdef CONFIG_EFI_MIXED
+       .org 0x190
+ENTRY(efi32_stub_entry)
+       add     $0x4, %esp              /* Discard return address */
+       popl    %ecx
+       popl    %edx
+       popl    %esi
+
+       leal    (BP_scratch+4)(%esi), %esp
+       call    1f
+1:     pop     %ebp
+       subl    $1b, %ebp
+
+       movl    %ecx, efi32_config(%ebp)
+       movl    %edx, efi32_config+8(%ebp)
+       sgdtl   efi32_boot_gdt(%ebp)
+
+       leal    efi32_config(%ebp), %eax
+       movl    %eax, efi_config(%ebp)
+
+       jmp     startup_32
+ENDPROC(efi32_stub_entry)
+#endif
+
        .code64
        .org 0x200
 ENTRY(startup_64)
@@ -209,26 +241,48 @@ ENTRY(startup_64)
        jmp     preferred_addr
 
 ENTRY(efi_pe_entry)
-       mov     %rcx, %rdi
-       mov     %rdx, %rsi
-       pushq   %rdi
-       pushq   %rsi
+       movq    %rcx, efi64_config(%rip)        /* Handle */
+       movq    %rdx, efi64_config+8(%rip) /* EFI System table pointer */
+
+       leaq    efi64_config(%rip), %rax
+       movq    %rax, efi_config(%rip)
+
+       call    1f
+1:     popq    %rbp
+       subq    $1b, %rbp
+
+       /*
+        * Relocate efi_config->call().
+        */
+       addq    %rbp, efi64_config+88(%rip)
+
+       movq    %rax, %rdi
        call    make_boot_params
        cmpq    $0,%rax
-       je      1f
-       mov     %rax, %rdx
-       popq    %rsi
-       popq    %rdi
+       je      fail
+       mov     %rax, %rsi
+       jmp     2f              /* Skip the relocation */
 
-ENTRY(efi_stub_entry)
+handover_entry:
+       call    1f
+1:     popq    %rbp
+       subq    $1b, %rbp
+
+       /*
+        * Relocate efi_config->call().
+        */
+       movq    efi_config(%rip), %rax
+       addq    %rbp, 88(%rax)
+2:
+       movq    efi_config(%rip), %rdi
        call    efi_main
        movq    %rax,%rsi
        cmpq    $0,%rax
        jne     2f
-1:
+fail:
        /* EFI init failed, so hang. */
        hlt
-       jmp     1b
+       jmp     fail
 2:
        call    3f
 3:
@@ -307,6 +361,20 @@ preferred_addr:
        leaq    relocated(%rbx), %rax
        jmp     *%rax
 
+#ifdef CONFIG_EFI_STUB
+       .org 0x390
+ENTRY(efi64_stub_entry)
+       movq    %rdi, efi64_config(%rip)        /* Handle */
+       movq    %rsi, efi64_config+8(%rip) /* EFI System table pointer */
+
+       leaq    efi64_config(%rip), %rax
+       movq    %rax, efi_config(%rip)
+
+       movq    %rdx, %rsi
+       jmp     handover_entry
+ENDPROC(efi64_stub_entry)
+#endif
+
        .text
 relocated:
 
@@ -372,6 +440,25 @@ gdt:
        .quad   0x0000000000000000      /* TS continued */
 gdt_end:
 
+#ifdef CONFIG_EFI_STUB
+efi_config:
+       .quad   0
+
+#ifdef CONFIG_EFI_MIXED
+       .global efi32_config
+efi32_config:
+       .fill   11,8,0
+       .quad   efi64_thunk
+       .byte   0
+#endif
+
+       .global efi64_config
+efi64_config:
+       .fill   11,8,0
+       .quad   efi_call6
+       .byte   1
+#endif /* CONFIG_EFI_STUB */
+
 /*
  * Stack and heap for uncompression
  */
index 100a9a10076a649e7e7008a3579867391ca16fa4..f0d0b20fe14982529a10d4cec1e8aff6ebf4d96d 100644 (file)
@@ -67,6 +67,13 @@ static int is_transmeta(void)
               cpu_vendor[2] == A32('M', 'x', '8', '6');
 }
 
+static int is_intel(void)
+{
+       return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
+              cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
+              cpu_vendor[2] == A32('n', 't', 'e', 'l');
+}
+
 /* Returns a bitmask of which words we have error bits in */
 static int check_cpuflags(void)
 {
@@ -153,6 +160,19 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
                asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
 
                err = check_cpuflags();
+       } else if (err == 0x01 &&
+                  !(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
+                  is_intel() && cpu.level == 6 &&
+                  (cpu.model == 9 || cpu.model == 13)) {
+               /* PAE is disabled on this Pentium M but can be forced */
+               if (cmdline_find_option_bool("forcepae")) {
+                       puts("WARNING: Forcing PAE in CPU flags\n");
+                       set_bit(X86_FEATURE_PAE, cpu.flags);
+                       err = check_cpuflags();
+               }
+               else {
+                       puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
+               }
        }
 
        if (err_flags_ptr)
index ec3b8ba68096c5434d523c1182196f6ab42f8239..0ca9a5c362bc9681de69956f9c31ee9965d7bb10 100644 (file)
@@ -283,7 +283,7 @@ _start:
        # Part 2 of the header, from the old setup.S
 
                .ascii  "HdrS"          # header signature
-               .word   0x020c          # header version number (>= 0x0105)
+               .word   0x020d          # header version number (>= 0x0105)
                                        # or else old loadlin-1.5 will fail)
                .globl realmode_swtch
 realmode_swtch:        .word   0, 0            # default_switch, SETUPSEG
@@ -350,7 +350,7 @@ cmd_line_ptr:       .long   0               # (Header version 0x0202 or later)
                                        # can be located anywhere in
                                        # low memory 0x10000 or higher.
 
-ramdisk_max:   .long 0x7fffffff
+initrd_addr_max: .long 0x7fffffff
                                        # (Header version 0x0203 or later)
                                        # The highest safe address for
                                        # the contents of an initrd
@@ -375,7 +375,8 @@ xloadflags:
 # define XLF0 0
 #endif
 
-#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64)
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) && \
+       !defined(CONFIG_EFI_MIXED)
    /* kernel/boot_param/ramdisk could be loaded above 4g */
 # define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G
 #else
@@ -383,10 +384,14 @@ xloadflags:
 #endif
 
 #ifdef CONFIG_EFI_STUB
-# ifdef CONFIG_X86_64
-#  define XLF23 XLF_EFI_HANDOVER_64            /* 64-bit EFI handover ok */
+# ifdef CONFIG_EFI_MIXED
+#  define XLF23 (XLF_EFI_HANDOVER_32|XLF_EFI_HANDOVER_64)
 # else
-#  define XLF23 XLF_EFI_HANDOVER_32            /* 32-bit EFI handover ok */
+#  ifdef CONFIG_X86_64
+#   define XLF23 XLF_EFI_HANDOVER_64           /* 64-bit EFI handover ok */
+#  else
+#   define XLF23 XLF_EFI_HANDOVER_32           /* 32-bit EFI handover ok */
+#  endif
 # endif
 #else
 # define XLF23 0
@@ -426,13 +431,7 @@ pref_address:              .quad LOAD_PHYSICAL_ADDR        # preferred load addr
 #define INIT_SIZE VO_INIT_SIZE
 #endif
 init_size:             .long INIT_SIZE         # kernel initialization size
-handover_offset:
-#ifdef CONFIG_EFI_STUB
-                       .long 0x30              # offset to the handover
-                                               # protocol entry point
-#else
-                       .long 0
-#endif
+handover_offset:       .long 0                 # Filled in by build.c
 
 # End of setup header #####################################################
 
index 8e15b22391fc40ad9e0a981ef3176d76643e7c45..1a2f2121cada2a11a1b273bdc518b1dcce34ea99 100644 (file)
@@ -53,7 +53,8 @@ int is_big_kernel;
 
 #define PECOFF_RELOC_RESERVE 0x20
 
-unsigned long efi_stub_entry;
+unsigned long efi32_stub_entry;
+unsigned long efi64_stub_entry;
 unsigned long efi_pe_entry;
 unsigned long startup_64;
 
@@ -219,6 +220,52 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
        update_pecoff_section_header(".text", text_start, text_sz);
 }
 
+static int reserve_pecoff_reloc_section(int c)
+{
+       /* Reserve 0x20 bytes for .reloc section */
+       memset(buf+c, 0, PECOFF_RELOC_RESERVE);
+       return PECOFF_RELOC_RESERVE;
+}
+
+static void efi_stub_defaults(void)
+{
+       /* Defaults for old kernel */
+#ifdef CONFIG_X86_32
+       efi_pe_entry = 0x10;
+#else
+       efi_pe_entry = 0x210;
+       startup_64 = 0x200;
+#endif
+}
+
+static void efi_stub_entry_update(void)
+{
+       unsigned long addr = efi32_stub_entry;
+
+#ifdef CONFIG_X86_64
+       /* Yes, this is really how we defined it :( */
+       addr = efi64_stub_entry - 0x200;
+#endif
+
+#ifdef CONFIG_EFI_MIXED
+       if (efi32_stub_entry != addr)
+               die("32-bit and 64-bit EFI entry points do not match\n");
+#endif
+       put_unaligned_le32(addr, &buf[0x264]);
+}
+
+#else
+
+static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
+static inline void update_pecoff_text(unsigned int text_start,
+                                     unsigned int file_sz) {}
+static inline void efi_stub_defaults(void) {}
+static inline void efi_stub_entry_update(void) {}
+
+static inline int reserve_pecoff_reloc_section(int c)
+{
+       return 0;
+}
 #endif /* CONFIG_EFI_STUB */
 
 
@@ -250,7 +297,8 @@ static void parse_zoffset(char *fname)
        p = (char *)buf;
 
        while (p && *p) {
-               PARSE_ZOFS(p, efi_stub_entry);
+               PARSE_ZOFS(p, efi32_stub_entry);
+               PARSE_ZOFS(p, efi64_stub_entry);
                PARSE_ZOFS(p, efi_pe_entry);
                PARSE_ZOFS(p, startup_64);
 
@@ -271,15 +319,7 @@ int main(int argc, char ** argv)
        void *kernel;
        u32 crc = 0xffffffffUL;
 
-       /* Defaults for old kernel */
-#ifdef CONFIG_X86_32
-       efi_pe_entry = 0x10;
-       efi_stub_entry = 0x30;
-#else
-       efi_pe_entry = 0x210;
-       efi_stub_entry = 0x230;
-       startup_64 = 0x200;
-#endif
+       efi_stub_defaults();
 
        if (argc != 5)
                usage();
@@ -302,11 +342,7 @@ int main(int argc, char ** argv)
                die("Boot block hasn't got boot flag (0xAA55)");
        fclose(file);
 
-#ifdef CONFIG_EFI_STUB
-       /* Reserve 0x20 bytes for .reloc section */
-       memset(buf+c, 0, PECOFF_RELOC_RESERVE);
-       c += PECOFF_RELOC_RESERVE;
-#endif
+       c += reserve_pecoff_reloc_section(c);
 
        /* Pad unused space with zeros */
        setup_sectors = (c + 511) / 512;
@@ -315,9 +351,7 @@ int main(int argc, char ** argv)
        i = setup_sectors*512;
        memset(buf+c, 0, i-c);
 
-#ifdef CONFIG_EFI_STUB
        update_pecoff_setup_and_reloc(i);
-#endif
 
        /* Set the default root device */
        put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
@@ -342,14 +376,9 @@ int main(int argc, char ** argv)
        buf[0x1f1] = setup_sectors-1;
        put_unaligned_le32(sys_size, &buf[0x1f4]);
 
-#ifdef CONFIG_EFI_STUB
        update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
 
-#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
-       efi_stub_entry -= 0x200;
-#endif
-       put_unaligned_le32(efi_stub_entry, &buf[0x264]);
-#endif
+       efi_stub_entry_update();
 
        crc = partial_crc32(buf, i, crc);
        if (fwrite(buf, 1, i, dest) != i)
index a7fef2621cc9a7f89a824a761413451c1d1f06b9..619e7f7426c6d2cf5416cd7179ed5c37c5d0340e 100644 (file)
@@ -60,7 +60,6 @@ CONFIG_CRASH_DUMP=y
 CONFIG_HIBERNATION=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TRACE_RTC=y
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_DOCK=y
 CONFIG_CPU_FREQ=y
 # CONFIG_CPU_FREQ_STAT is not set
index c1119d4c1281dff554e5b0ebe06a4eb7948bab36..6181c69b786bd963107e698bf8e936529f05546f 100644 (file)
@@ -58,7 +58,6 @@ CONFIG_CRASH_DUMP=y
 CONFIG_HIBERNATION=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TRACE_RTC=y
-CONFIG_ACPI_PROCFS=y
 CONFIG_ACPI_DOCK=y
 CONFIG_CPU_FREQ=y
 # CONFIG_CPU_FREQ_STAT is not set
index 7f669853317a3e940647319c21705ca951fb18ac..4acddc43ee0cc60ae92e1506caf1915409fd94cc 100644 (file)
@@ -5,3 +5,5 @@ genhdr-y += unistd_64.h
 genhdr-y += unistd_x32.h
 
 generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += mcs_spinlock.h
index 1d2091a226bcac5b55b05225816bf1f374860c3c..19b0ebafcd3e3f9d5a7aa5d5d9900de66d03a374 100644 (file)
@@ -93,9 +93,6 @@ static inline int is_vsmp_box(void)
        return 0;
 }
 #endif
-extern void xapic_wait_icr_idle(void);
-extern u32 safe_xapic_wait_icr_idle(void);
-extern void xapic_icr_write(u32, u32);
 extern int setup_profiling_timer(unsigned int);
 
 static inline void native_apic_mem_write(u32 reg, u32 v)
@@ -184,7 +181,6 @@ extern int x2apic_phys;
 extern int x2apic_preenabled;
 extern void check_x2apic(void);
 extern void enable_x2apic(void);
-extern void x2apic_icr_write(u32 low, u32 id);
 static inline int x2apic_enabled(void)
 {
        u64 msr;
@@ -221,7 +217,6 @@ static inline void x2apic_force_phys(void)
 {
 }
 
-#define        nox2apic        0
 #define        x2apic_preenabled 0
 #define        x2apic_supported()      0
 #endif
@@ -351,7 +346,7 @@ struct apic {
        int trampoline_phys_low;
        int trampoline_phys_high;
 
-       void (*wait_for_init_deassert)(atomic_t *deassert);
+       bool wait_for_init_deassert;
        void (*smp_callin_clear_local_apic)(void);
        void (*inquire_remote_apic)(int apicid);
 
@@ -517,13 +512,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu);
 extern int default_check_phys_apicid_present(int phys_apicid);
 #endif
 
-static inline void default_wait_for_init_deassert(atomic_t *deassert)
-{
-       while (!atomic_read(deassert))
-               cpu_relax();
-       return;
-}
-
 extern void generic_bigsmp_probe(void);
 
 
index e099f9502acec86cb5ec1412745d90384820e625..63211ef5046aef83ed4efd3ff163f5578202d119 100644 (file)
@@ -37,7 +37,7 @@
 #define X86_FEATURE_PAT                (0*32+16) /* Page Attribute Table */
 #define X86_FEATURE_PSE36      (0*32+17) /* 36-bit PSEs */
 #define X86_FEATURE_PN         (0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLSH     (0*32+19) /* "clflush" CLFLUSH instruction */
+#define X86_FEATURE_CLFLUSH    (0*32+19) /* CLFLUSH instruction */
 #define X86_FEATURE_DS         (0*32+21) /* "dts" Debug Store */
 #define X86_FEATURE_ACPI       (0*32+22) /* ACPI via MSR */
 #define X86_FEATURE_MMX                (0*32+23) /* Multimedia Extensions */
 #define X86_FEATURE_INVPCID    (9*32+10) /* Invalidate Processor Context ID */
 #define X86_FEATURE_RTM                (9*32+11) /* Restricted Transactional Memory */
 #define X86_FEATURE_MPX                (9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F    (9*32+16) /* AVX-512 Foundation */
 #define X86_FEATURE_RDSEED     (9*32+18) /* The RDSEED instruction */
 #define X86_FEATURE_ADX                (9*32+19) /* The ADCX and ADOX instructions */
 #define X86_FEATURE_SMAP       (9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_CLFLUSHOPT (9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_AVX512PF   (9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER   (9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD   (9*32+28) /* AVX-512 Conflict Detection */
 
 /*
  * BUG word(s)
@@ -313,7 +318,7 @@ extern const char * const x86_power_flags[32];
 #define cpu_has_pmm_enabled    boot_cpu_has(X86_FEATURE_PMM_EN)
 #define cpu_has_ds             boot_cpu_has(X86_FEATURE_DS)
 #define cpu_has_pebs           boot_cpu_has(X86_FEATURE_PEBS)
-#define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLUSH)
 #define cpu_has_bts            boot_cpu_has(X86_FEATURE_BTS)
 #define cpu_has_gbpages                boot_cpu_has(X86_FEATURE_GBPAGES)
 #define cpu_has_arch_perfmon   boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
diff --git a/arch/x86/include/asm/cputime.h b/arch/x86/include/asm/cputime.h
deleted file mode 100644 (file)
index 6d68ad7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/cputime.h>
index acd86c850414e7f3adfffe860ffd22bd17e0d459..0869434eaf725e7a5aa5efb6f5ce8a62ed8176d3 100644 (file)
  */
 #define EFI_OLD_MEMMAP         EFI_ARCH_1
 
+#define EFI32_LOADER_SIGNATURE "EL32"
+#define EFI64_LOADER_SIGNATURE "EL64"
+
 #ifdef CONFIG_X86_32
 
-#define EFI_LOADER_SIGNATURE   "EL32"
 
 extern unsigned long asmlinkage efi_call_phys(void *, ...);
 
@@ -57,8 +59,6 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
 
 #else /* !CONFIG_X86_32 */
 
-#define EFI_LOADER_SIGNATURE   "EL64"
-
 extern u64 efi_call0(void *fp);
 extern u64 efi_call1(void *fp, u64 arg1);
 extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
@@ -119,7 +119,6 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
 #endif /* CONFIG_X86_32 */
 
 extern int add_efi_memmap;
-extern unsigned long x86_efi_facility;
 extern struct efi_scratch efi_scratch;
 extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
 extern int efi_memblock_x86_reserve_range(void);
@@ -130,10 +129,12 @@ extern void efi_memory_uc(u64 addr, unsigned long size);
 extern void __init efi_map_region(efi_memory_desc_t *md);
 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
 extern void efi_sync_low_kernel_mappings(void);
-extern void efi_setup_page_tables(void);
+extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 extern void __init old_map_region(efi_memory_desc_t *md);
 extern void __init runtime_code_page_mkexec(void);
 extern void __init efi_runtime_mkexec(void);
+extern void __init efi_dump_pagetable(void);
 extern void __init efi_apply_memmap_quirks(void);
 
 struct efi_setup_data {
@@ -153,8 +154,40 @@ static inline bool efi_is_native(void)
        return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
 }
 
+static inline bool efi_runtime_supported(void)
+{
+       if (efi_is_native())
+               return true;
+
+       if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
+               return true;
+
+       return false;
+}
+
 extern struct console early_efi_console;
 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
+
+#ifdef CONFIG_EFI_MIXED
+extern void efi_thunk_runtime_setup(void);
+extern efi_status_t efi_thunk_set_virtual_address_map(
+       void *phys_set_virtual_address_map,
+       unsigned long memory_map_size,
+       unsigned long descriptor_size,
+       u32 descriptor_version,
+       efi_memory_desc_t *virtual_map);
+#else
+static inline void efi_thunk_runtime_setup(void) {}
+static inline efi_status_t efi_thunk_set_virtual_address_map(
+       void *phys_set_virtual_address_map,
+       unsigned long memory_map_size,
+       unsigned long descriptor_size,
+       u32 descriptor_version,
+       efi_memory_desc_t *virtual_map)
+{
+       return EFI_SUCCESS;
+}
+#endif /* CONFIG_EFI_MIXED */
 #else
 /*
  * IF EFI is not configured, have the EFI calls return -ENOSYS.
index d3d74698dce9c09151a6421176224885454901df..1c7eefe3250295762c258a66af22a05d53882334 100644 (file)
@@ -145,10 +145,10 @@ static int fd_request_irq(void)
 {
        if (can_use_virtual_dma)
                return request_irq(FLOPPY_IRQ, floppy_hardint,
-                                  IRQF_DISABLED, "floppy", NULL);
+                                  0, "floppy", NULL);
        else
                return request_irq(FLOPPY_IRQ, floppy_interrupt,
-                                  IRQF_DISABLED, "floppy", NULL);
+                                  0, "floppy", NULL);
 }
 
 static unsigned long dma_mem_alloc(unsigned long size)
index ab0ae1aa6d0af1c5d64edcd313e5283aa099c04f..230853da4ec09ea6590aed6d724e1fd13b9e356c 100644 (file)
@@ -33,6 +33,9 @@ typedef struct {
 #ifdef CONFIG_X86_MCE_THRESHOLD
        unsigned int irq_threshold_count;
 #endif
+#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
+       unsigned int irq_hv_callback_count;
+#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
index cd9c41938b8a1ba8f830c888a078e8395cf76af1..c163215abb9ad99ee1c9eac3210acfb0380614b6 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_X86_MSHYPER_H
 
 #include <linux/types.h>
+#include <linux/interrupt.h>
 #include <asm/hyperv.h>
 
 struct ms_hyperv_info {
@@ -16,6 +17,7 @@ void hyperv_callback_vector(void);
 #define trace_hyperv_callback_vector hyperv_callback_vector
 #endif
 void hyperv_vector_handler(struct pt_regs *regs);
-void hv_register_vmbus_handler(int irq, irq_handler_t handler);
+void hv_setup_vmbus_irq(void (*handler)(void));
+void hv_remove_vmbus_irq(void);
 
 #endif
index e139b13f2a33a3572d91e4f7297952e1bf8a1ffc..de36f22eb0b9e79db05711cd46196c0c072bb023 100644 (file)
@@ -214,6 +214,8 @@ do {                                                            \
 
 struct msr *msrs_alloc(void);
 void msrs_free(struct msr *msrs);
+int msr_set_bit(u32 msr, u8 bit);
+int msr_clear_bit(u32 msr, u8 bit);
 
 #ifdef CONFIG_SMP
 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
index 86f9301903c818c632b227537b44142b71883c8d..5f2fc4441b11016608f476848e3a60bd1e08d7a5 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_NMI_H
 #define _ASM_X86_NMI_H
 
+#include <linux/irq_work.h>
 #include <linux/pm.h>
 #include <asm/irq.h>
 #include <asm/io.h>
@@ -38,6 +39,8 @@ typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
 struct nmiaction {
        struct list_head        list;
        nmi_handler_t           handler;
+       u64                     max_duration;
+       struct irq_work         irq_work;
        unsigned long           flags;
        const char              *name;
 };
index 5ad38ad07890fc4ca8698aae41c6e60e48a451f4..b459ddf27d64149915fb2e2938f2392a8322d957 100644 (file)
         : (prot))
 
 #ifndef __ASSEMBLY__
-
 #include <asm/x86_init.h>
 
+void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
+
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -445,20 +446,10 @@ static inline int pte_same(pte_t a, pte_t b)
        return a.pte == b.pte;
 }
 
-static inline int pteval_present(pteval_t pteval)
-{
-       /*
-        * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
-        * way clearly states that the intent is that protnone and numa
-        * hinting ptes are considered present for the purposes of
-        * pagetable operations like zapping, protection changes, gup etc.
-        */
-       return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
-}
-
 static inline int pte_present(pte_t a)
 {
-       return pteval_present(pte_flags(a));
+       return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
+                              _PAGE_NUMA);
 }
 
 #define pte_accessible pte_accessible
index 1aa9ccd432236af7d5667657e4267c3a8ba218f6..708f19fb4fc788ea0a51838e43ea8ea50c9699d0 100644 (file)
@@ -382,9 +382,13 @@ static inline void update_page_count(int level, unsigned long pages) { }
  * as a pte too.
  */
 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+                                   unsigned int *level);
 extern phys_addr_t slow_virt_to_phys(void *__address);
 extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
                                   unsigned numpages, unsigned long page_flags);
+void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
+                              unsigned numpages);
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_DEFS_H */
index fdedd38fd0fc7feeabc26f8b366b525b65685904..a4ea02351f4d02bee5a43e78a9013b77ec94ea48 100644 (file)
@@ -449,6 +449,15 @@ struct stack_canary {
 };
 DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
 #endif
+/*
+ * per-CPU IRQ handling stacks
+ */
+struct irq_stack {
+       u32                     stack[THREAD_SIZE/sizeof(u32)];
+} __aligned(THREAD_SIZE);
+
+DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
+DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
 #endif /* X86_64 */
 
 extern unsigned int xstate_size;
index 645cad2c95ff9c87ebd0592c95e891b7a53290fe..e820c080a4e99e45354fc7b6e5720d9f1933b241 100644 (file)
@@ -191,6 +191,14 @@ static inline void clflush(volatile void *__p)
        asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
 }
 
+static inline void clflushopt(volatile void *__p)
+{
+       alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
+                      ".byte 0x66; clflush %P0",
+                      X86_FEATURE_CLFLUSHOPT,
+                      "+m" (*(volatile char __force *)__p));
+}
+
 #define nop() asm volatile ("nop")
 
 
index e1940c06ed022d8b9ad7988aaf76970afc92c4b5..47e5de25ba799f787d7f9344c28daa797fde47b7 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/compiler.h>
 #include <asm/page.h>
+#include <asm/percpu.h>
 #include <asm/types.h>
 
 /*
@@ -32,12 +33,6 @@ struct thread_info {
        mm_segment_t            addr_limit;
        struct restart_block    restart_block;
        void __user             *sysenter_return;
-#ifdef CONFIG_X86_32
-       unsigned long           previous_esp;   /* ESP of the previous stack in
-                                                  case of nested (IRQ) stacks
-                                               */
-       __u8                    supervisor_stack[0];
-#endif
        unsigned int            sig_on_uaccess_error:1;
        unsigned int            uaccess_err:1;  /* uaccess failed */
 };
@@ -153,9 +148,9 @@ struct thread_info {
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
 
-#ifdef CONFIG_X86_32
+#define STACK_WARN             (THREAD_SIZE/8)
+#define KERNEL_STACK_OFFSET    (5*(BITS_PER_LONG/8))
 
-#define STACK_WARN     (THREAD_SIZE/8)
 /*
  * macros/functions for gaining access to the thread information structure
  *
@@ -163,42 +158,6 @@ struct thread_info {
  */
 #ifndef __ASSEMBLY__
 
-#define current_stack_pointer ({               \
-       unsigned long sp;                       \
-       asm("mov %%esp,%0" : "=g" (sp));        \
-       sp;                                     \
-})
-
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
-       return (struct thread_info *)
-               (current_stack_pointer & ~(THREAD_SIZE - 1));
-}
-
-#else /* !__ASSEMBLY__ */
-
-/* how to get the thread information struct from ASM */
-#define GET_THREAD_INFO(reg)    \
-       movl $-THREAD_SIZE, reg; \
-       andl %esp, reg
-
-/* use this one if reg already contains %esp */
-#define GET_THREAD_INFO_WITH_ESP(reg) \
-       andl $-THREAD_SIZE, reg
-
-#endif
-
-#else /* X86_32 */
-
-#include <asm/percpu.h>
-#define KERNEL_STACK_OFFSET (5*8)
-
-/*
- * macros/functions for gaining access to the thread information structure
- * preempt_count needs to be 1 initially, until the scheduler is functional.
- */
-#ifndef __ASSEMBLY__
 DECLARE_PER_CPU(unsigned long, kernel_stack);
 
 static inline struct thread_info *current_thread_info(void)
@@ -213,8 +172,8 @@ static inline struct thread_info *current_thread_info(void)
 
 /* how to get the thread information struct from ASM */
 #define GET_THREAD_INFO(reg) \
-       movq PER_CPU_VAR(kernel_stack),reg ; \
-       subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+       _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
+       _ASM_SUB $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg ;
 
 /*
  * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
@@ -224,8 +183,6 @@ static inline struct thread_info *current_thread_info(void)
 
 #endif
 
-#endif /* !X86_32 */
-
 /*
  * Thread-synchronous status.
  *
index d35f24e231cd2429c8d17c0d5b3d722951d3e04b..b28097e4c8c3e1e941df963f49cd2db32d89589a 100644 (file)
@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
 
 extern const struct cpumask *cpu_coregroup_mask(int cpu);
 
-#ifdef ENABLE_TOPO_DEFINES
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
+
+#ifdef ENABLE_TOPO_DEFINES
 #define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
 #define topology_thread_cpumask(cpu)           (per_cpu(cpu_sibling_map, cpu))
 #endif
@@ -133,12 +134,6 @@ static inline void arch_fix_phys_package_id(int num, u32 slot)
 struct pci_bus;
 void x86_pci_root_bus_resources(int bus, struct list_head *resources);
 
-#ifdef CONFIG_SMP
-#define mc_capable()   ((boot_cpu_data.x86_max_cores > 1) && \
-                       (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids))
-#define smt_capable()                  (smp_num_siblings > 1)
-#endif
-
 #ifdef CONFIG_NUMA
 extern int get_mp_bus_to_node(int busnum);
 extern void set_mp_bus_to_node(int busnum, int node);
index c2a48139c3401b5d0202a83c676242119106b1f2..3f556c6a015769a1a53a2615e3a0cf26da1725a3 100644 (file)
@@ -23,6 +23,9 @@
 #  include <asm/unistd_64.h>
 #  include <asm/unistd_64_x32.h>
 #  define __ARCH_WANT_COMPAT_SYS_TIME
+#  define __ARCH_WANT_COMPAT_SYS_GETDENTS64
+#  define __ARCH_WANT_COMPAT_SYS_PREADV64
+#  define __ARCH_WANT_COMPAT_SYS_PWRITEV64
 
 # endif
 
index 554738963b28cf47dd76fa9947338bb1eae5a0d0..6c1d7411eb009a5a96ef55004fdedf60aa1ef822 100644 (file)
@@ -6,11 +6,14 @@
 
 #define XSTATE_CPUID           0x0000000d
 
-#define XSTATE_FP      0x1
-#define XSTATE_SSE     0x2
-#define XSTATE_YMM     0x4
-#define XSTATE_BNDREGS 0x8
-#define XSTATE_BNDCSR  0x10
+#define XSTATE_FP              0x1
+#define XSTATE_SSE             0x2
+#define XSTATE_YMM             0x4
+#define XSTATE_BNDREGS         0x8
+#define XSTATE_BNDCSR          0x10
+#define XSTATE_OPMASK          0x20
+#define XSTATE_ZMM_Hi256       0x40
+#define XSTATE_Hi16_ZMM                0x80
 
 #define XSTATE_FPSSE   (XSTATE_FP | XSTATE_SSE)
 
@@ -23,7 +26,8 @@
 #define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
 
 /* Supported features which support lazy state saving */
-#define XSTATE_LAZY    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+#define XSTATE_LAZY    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM                  \
+                       | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
 
 /* Supported features which require eager state saving */
 #define XSTATE_EAGER   (XSTATE_BNDREGS | XSTATE_BNDCSR)
index c19fc60ff06272f733df17a03f8cbf577d69073f..4924f4be2b992198995a3bf328f011de65dc2230 100644 (file)
 #define THERM_LOG_THRESHOLD1           (1 << 9)
 
 /* MISC_ENABLE bits: architectural */
-#define MSR_IA32_MISC_ENABLE_FAST_STRING       (1ULL << 0)
-#define MSR_IA32_MISC_ENABLE_TCC               (1ULL << 1)
-#define MSR_IA32_MISC_ENABLE_EMON              (1ULL << 7)
-#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL       (1ULL << 11)
-#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL      (1ULL << 12)
-#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP        (1ULL << 16)
-#define MSR_IA32_MISC_ENABLE_MWAIT             (1ULL << 18)
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID       (1ULL << 22)
-#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE      (1ULL << 23)
-#define MSR_IA32_MISC_ENABLE_XD_DISABLE                (1ULL << 34)
+#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT           0
+#define MSR_IA32_MISC_ENABLE_FAST_STRING               (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
+#define MSR_IA32_MISC_ENABLE_TCC_BIT                   1
+#define MSR_IA32_MISC_ENABLE_TCC                       (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
+#define MSR_IA32_MISC_ENABLE_EMON_BIT                  7
+#define MSR_IA32_MISC_ENABLE_EMON                      (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT           11
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL               (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT          12
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL              (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT    16
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP                (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
+#define MSR_IA32_MISC_ENABLE_MWAIT_BIT                 18
+#define MSR_IA32_MISC_ENABLE_MWAIT                     (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT           22
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID               (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT);
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT          23
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE              (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT            34
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE                        (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
 
 /* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
-#define MSR_IA32_MISC_ENABLE_X87_COMPAT                (1ULL << 2)
-#define MSR_IA32_MISC_ENABLE_TM1               (1ULL << 3)
-#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE        (1ULL << 4)
-#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE   (1ULL << 6)
-#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK     (1ULL << 8)
-#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE  (1ULL << 9)
-#define MSR_IA32_MISC_ENABLE_FERR              (1ULL << 10)
-#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX    (1ULL << 10)
-#define MSR_IA32_MISC_ENABLE_TM2               (1ULL << 13)
-#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE  (1ULL << 19)
-#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK    (1ULL << 20)
-#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT       (1ULL << 24)
-#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE  (1ULL << 37)
-#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE     (1ULL << 38)
-#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE   (1ULL << 39)
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT            2
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT                        (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
+#define MSR_IA32_MISC_ENABLE_TM1_BIT                   3
+#define MSR_IA32_MISC_ENABLE_TM1                       (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT    4
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE                (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT       6
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE           (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT         8
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK             (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT      9
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_BIT                  10
+#define MSR_IA32_MISC_ENABLE_FERR                      (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT                10
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX            (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
+#define MSR_IA32_MISC_ENABLE_TM2_BIT                   13
+#define MSR_IA32_MISC_ENABLE_TM2                       (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT      19
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT                20
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK            (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT           24
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT               (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT      37
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT         38
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE             (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT       39
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE           (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
 
 #define MSR_IA32_TSC_DEADLINE          0x000006E0
 
index 1dac94265b5978282262b77d4f496ae512701eb8..8e61d23b8f64679577682756272d1e2ff242de07 100644 (file)
@@ -53,10 +53,6 @@ EXPORT_SYMBOL(acpi_disabled);
 # include <asm/proto.h>
 #endif                         /* X86 */
 
-#define BAD_MADT_ENTRY(entry, end) (                                       \
-               (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
-               ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-
 #define PREFIX                 "ACPI: "
 
 int acpi_noirq;                                /* skip ACPI IRQ initialization */
@@ -613,10 +609,10 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
        int nid;
 
        nid = acpi_get_node(handle);
-       if (nid == -1 || !node_online(nid))
-               return;
-       set_apicid_to_node(physid, nid);
-       numa_set_node(cpu, nid);
+       if (nid != -1) {
+               set_apicid_to_node(physid, nid);
+               numa_set_node(cpu, nid);
+       }
 #endif
 }
 
index 7f26c9a70a9e0ada12f46382d7ee2cd73b2d5f29..53e20531470ee1ee125c510bf0c26308f74820d3 100644 (file)
@@ -133,6 +133,10 @@ static inline void imcr_apic_to_pic(void)
  * +1=force-enable
  */
 static int force_enable_local_apic __initdata;
+
+/* Control whether x2APIC mode is enabled or not */
+static bool nox2apic __initdata;
+
 /*
  * APIC command line parameters
  */
@@ -162,8 +166,7 @@ int x2apic_mode;
 /* x2apic enabled before OS handover */
 int x2apic_preenabled;
 static int x2apic_disabled;
-static int nox2apic;
-static __init int setup_nox2apic(char *str)
+static int __init setup_nox2apic(char *str)
 {
        if (x2apic_enabled()) {
                int apicid = native_apic_msr_read(APIC_ID);
@@ -178,7 +181,7 @@ static __init int setup_nox2apic(char *str)
        } else
                setup_clear_cpu_cap(X86_FEATURE_X2APIC);
 
-       nox2apic = 1;
+       nox2apic = true;
 
        return 0;
 }
@@ -283,8 +286,12 @@ u32 native_safe_apic_wait_icr_idle(void)
 
 void native_apic_icr_write(u32 low, u32 id)
 {
+       unsigned long flags;
+
+       local_irq_save(flags);
        apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
        apic_write(APIC_ICR, low);
+       local_irq_restore(flags);
 }
 
 u64 native_apic_icr_read(void)
index 2c621a6b901a7642838c5a74a54be4bf3fcbe58a..7c1b29479513a1d78f79410915a4ee30e5d1c53e 100644 (file)
@@ -198,7 +198,7 @@ static struct apic apic_flat =  {
 
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
-       .wait_for_init_deassert         = NULL,
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = default_inquire_remote_apic,
 
@@ -314,7 +314,7 @@ static struct apic apic_physflat =  {
 
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
-       .wait_for_init_deassert         = NULL,
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = default_inquire_remote_apic,
 
index 191ce75c0e54b8f2808cce5f730f5d62c18dfa09..8c7c98249c205f0f596e7c0866e89444dc3d4bc8 100644 (file)
@@ -172,8 +172,7 @@ struct apic apic_noop = {
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 
-       .wait_for_init_deassert         = NULL,
-
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = NULL,
 
index 3e67f9e3d7ef1e8eb093d9e8398b21aaf1a7b93e..a5b45df8bc881cafbc2560f3b7ea72ef02aa7c73 100644 (file)
@@ -248,7 +248,7 @@ static const struct apic apic_numachip __refconst = {
        .wakeup_secondary_cpu           = numachip_wakeup_secondary,
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
-       .wait_for_init_deassert         = NULL,
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = NULL, /* REMRD not supported */
 
index d50e3640d5aed53daf4110ef68193cf0cbdac826..e4840aa7a255b63db235ab5cecd07ecabf5c5013 100644 (file)
@@ -199,8 +199,7 @@ static struct apic apic_bigsmp = {
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 
-       .wait_for_init_deassert         = default_wait_for_init_deassert,
-
+       .wait_for_init_deassert         = true,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = default_inquire_remote_apic,
 
index c55224731b2d7f0de89d6a37a00e71bd4d8792ec..6f8f8b348a39fbf378953936cc796d5e1842849f 100644 (file)
@@ -394,12 +394,6 @@ static void es7000_enable_apic_mode(void)
                WARN(1, "Command failed, status = %x\n", mip_status);
 }
 
-static void es7000_wait_for_init_deassert(atomic_t *deassert)
-{
-       while (!atomic_read(deassert))
-               cpu_relax();
-}
-
 static unsigned int es7000_get_apic_id(unsigned long x)
 {
        return (x >> 24) & 0xFF;
@@ -658,8 +652,7 @@ static struct apic __refdata apic_es7000_cluster = {
        .trampoline_phys_low            = 0x467,
        .trampoline_phys_high           = 0x469,
 
-       .wait_for_init_deassert         = NULL,
-
+       .wait_for_init_deassert         = false,
        /* Nothing to do for most platforms, since cleared by the INIT cycle: */
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = default_inquire_remote_apic,
@@ -722,8 +715,7 @@ static struct apic __refdata apic_es7000 = {
        .trampoline_phys_low            = 0x467,
        .trampoline_phys_high           = 0x469,
 
-       .wait_for_init_deassert         = es7000_wait_for_init_deassert,
-
+       .wait_for_init_deassert         = true,
        /* Nothing to do for most platforms, since cleared by the INIT cycle: */
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = default_inquire_remote_apic,
index 1e42e8f305ee2096ed52c20f6c569fe8a8339b7b..030ea1c04f72b3184e339ed840f7d13382641ba6 100644 (file)
@@ -505,8 +505,7 @@ static struct apic __refdata apic_numaq = {
        .trampoline_phys_high           = NUMAQ_TRAMPOLINE_PHYS_HIGH,
 
        /* We don't do anything here because we use NMI's to boot instead */
-       .wait_for_init_deassert         = NULL,
-
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = numaq_smp_callin_clear_local_apic,
        .inquire_remote_apic            = NULL,
 
index eb35ef9ee63f779bde870822a01dcc98bd503b01..cceb352c968c62b13404d3ce78478f64e7a5d3c5 100644 (file)
@@ -119,8 +119,7 @@ static struct apic apic_default = {
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 
-       .wait_for_init_deassert         = default_wait_for_init_deassert,
-
+       .wait_for_init_deassert         = true,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = default_inquire_remote_apic,
 
index 00146f9b0254315acfb191015314fedf35fa9ab0..b656128611cd3ebd71af4ba0f053e72471845e21 100644 (file)
@@ -532,8 +532,7 @@ static struct apic apic_summit = {
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
 
-       .wait_for_init_deassert         = default_wait_for_init_deassert,
-
+       .wait_for_init_deassert         = true,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = default_inquire_remote_apic,
 
index cac85ee6913f70ca64f3a8656dbff5d5cc603348..e66766bf164191de15d7ea9bf7b62c6506b2d6f2 100644 (file)
@@ -279,7 +279,7 @@ static struct apic apic_x2apic_cluster = {
 
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
-       .wait_for_init_deassert         = NULL,
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = NULL,
 
index de231e328cae310bf4a39a1fb70ebc83993bb67c..6d600ebf6c127f94c4bb194a9e8bb25f0c607ffe 100644 (file)
@@ -133,7 +133,7 @@ static struct apic apic_x2apic_phys = {
 
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
-       .wait_for_init_deassert         = NULL,
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = NULL,
 
index d263b1307de1e5b30a114cc0f37c017197d5728f..7834389ba5be0952b5e673ff11d6a6c64cee106d 100644 (file)
@@ -396,7 +396,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
        .wakeup_secondary_cpu           = uv_wakeup_secondary,
        .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
        .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
-       .wait_for_init_deassert         = NULL,
+       .wait_for_init_deassert         = false,
        .smp_callin_clear_local_apic    = NULL,
        .inquire_remote_apic            = NULL,
 
index c67ffa6860642af5e487345f7fcf6badb8554f48..ce8b8ff0e0ef4e1915272431c9b543e303101264 100644 (file)
@@ -218,7 +218,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c)
         */
        WARN_ONCE(1, "WARNING: This combination of AMD"
                " processors is not suitable for SMP.\n");
-       add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
+       add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
 }
 
 static void init_amd_k7(struct cpuinfo_x86 *c)
@@ -233,9 +233,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
        if (c->x86_model >= 6 && c->x86_model <= 10) {
                if (!cpu_has(c, X86_FEATURE_XMM)) {
                        printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
-                       rdmsr(MSR_K7_HWCR, l, h);
-                       l &= ~0x00008000;
-                       wrmsr(MSR_K7_HWCR, l, h);
+                       msr_clear_bit(MSR_K7_HWCR, 15);
                        set_cpu_cap(c, X86_FEATURE_XMM);
                }
        }
@@ -509,14 +507,8 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 #endif
 
        /* F16h erratum 793, CVE-2013-6885 */
-       if (c->x86 == 0x16 && c->x86_model <= 0xf) {
-               u64 val;
-
-               rdmsrl(MSR_AMD64_LS_CFG, val);
-               if (!(val & BIT(15)))
-                       wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
-       }
-
+       if (c->x86 == 0x16 && c->x86_model <= 0xf)
+               msr_set_bit(MSR_AMD64_LS_CFG, 15);
 }
 
 static const int amd_erratum_383[];
@@ -536,11 +528,8 @@ static void init_amd(struct cpuinfo_x86 *c)
         * Errata 63 for SH-B3 steppings
         * Errata 122 for all steppings (F+ have it disabled by default)
         */
-       if (c->x86 == 0xf) {
-               rdmsrl(MSR_K7_HWCR, value);
-               value |= 1 << 6;
-               wrmsrl(MSR_K7_HWCR, value);
-       }
+       if (c->x86 == 0xf)
+               msr_set_bit(MSR_K7_HWCR, 6);
 #endif
 
        early_init_amd(c);
@@ -623,14 +612,11 @@ static void init_amd(struct cpuinfo_x86 *c)
            (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
            !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 
-               if (!rdmsrl_safe(0xc0011005, &value)) {
-                       value |= 1ULL << 54;
-                       wrmsrl_safe(0xc0011005, value);
+               if (msr_set_bit(0xc0011005, 54) > 0) {
                        rdmsrl(0xc0011005, value);
-                       if (value & (1ULL << 54)) {
+                       if (value & BIT_64(54)) {
                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
-                               printk(KERN_INFO FW_INFO "CPU: Re-enabling "
-                                 "disabled Topology Extensions Support\n");
+                               pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
                        }
                }
        }
@@ -709,19 +695,12 @@ static void init_amd(struct cpuinfo_x86 *c)
                 * Disable GART TLB Walk Errors on Fam10h. We do this here
                 * because this is always needed when GART is enabled, even in a
                 * kernel which has no MCE support built in.
-                * BIOS should disable GartTlbWlk Errors themself. If
-                * it doesn't do it here as suggested by the BKDG.
+                * BIOS should disable GartTlbWlk Errors already. If
+                * it doesn't, do it here as suggested by the BKDG.
                 *
                 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
                 */
-               u64 mask;
-               int err;
-
-               err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
-               if (err == 0) {
-                       mask |= (1 << 10);
-                       wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
-               }
+               msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
 
                /*
                 * On family 10h BIOS may not have properly enabled WC+ support,
@@ -733,10 +712,7 @@ static void init_amd(struct cpuinfo_x86 *c)
                 * NOTE: we want to use the _safe accessors so as not to #GP kvm
                 * guests on older kvm hosts.
                 */
-
-               rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
-               value &= ~(1ULL << 24);
-               wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
+               msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
 
                if (cpu_has_amd_erratum(c, amd_erratum_383))
                        set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
index 8e28bf2fc3ef4a15bf672dee29bc6729552556f2..a135239badb7fd4762ebf939ae755183660641b2 100644 (file)
@@ -1025,7 +1025,8 @@ __setup("show_msr=", setup_show_msr);
 
 static __init int setup_noclflush(char *arg)
 {
-       setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
+       setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
+       setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
        return 1;
 }
 __setup("noclflush", setup_noclflush);
@@ -1078,6 +1079,10 @@ static __init int setup_disablecpuid(char *arg)
 }
 __setup("clearcpuid=", setup_disablecpuid);
 
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
+       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
 #ifdef CONFIG_X86_64
 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
 struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
@@ -1094,10 +1099,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
        &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 
-DEFINE_PER_CPU(unsigned long, kernel_stack) =
-       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
-EXPORT_PER_CPU_SYMBOL(kernel_stack);
-
 DEFINE_PER_CPU(char *, irq_stack_ptr) =
        init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
 
index 5cd9bfabd6450e6743dc03479dad8cba38f9eec9..897d6201ef10b25ca98b3ea66ab4ff1706d5c81d 100644 (file)
@@ -31,11 +31,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
 
        /* Unmask CPUID levels if masked: */
        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
-               rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-
-               if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
-                       misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
-                       wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+               if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
+                                 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
                        c->cpuid_level = cpuid_eax(0);
                        get_cpu_cap(c);
                }
@@ -129,16 +126,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
         * Ingo Molnar reported a Pentium D (model 6) and a Xeon
         * (model 2) with the same problem.
         */
-       if (c->x86 == 15) {
-               rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-
-               if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
-                       printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
-
-                       misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
-                       wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
-               }
-       }
+       if (c->x86 == 15)
+               if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
+                                 MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
+                       pr_info("kmemcheck: Disabling fast string operations\n");
 #endif
 
        /*
@@ -195,10 +186,16 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
        }
 }
 
-static void intel_workarounds(struct cpuinfo_x86 *c)
+static int forcepae;
+static int __init forcepae_setup(char *__unused)
 {
-       unsigned long lo, hi;
+       forcepae = 1;
+       return 1;
+}
+__setup("forcepae", forcepae_setup);
 
+static void intel_workarounds(struct cpuinfo_x86 *c)
+{
 #ifdef CONFIG_X86_F00F_BUG
        /*
         * All current models of Pentium and Pentium with MMX technology CPUs
@@ -224,17 +221,27 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
        if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
                clear_cpu_cap(c, X86_FEATURE_SEP);
 
+       /*
+        * PAE CPUID issue: many Pentium M report no PAE but may have a
+        * functionally usable PAE implementation.
+        * Forcefully enable PAE if kernel parameter "forcepae" is present.
+        */
+       if (forcepae) {
+               printk(KERN_WARNING "PAE forced!\n");
+               set_cpu_cap(c, X86_FEATURE_PAE);
+               add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
+       }
+
        /*
         * P4 Xeon errata 037 workaround.
         * Hardware prefetcher may cause stale data to be loaded into the cache.
         */
        if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
-               rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
-               if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
-                       printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
-                       printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
-                       lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
-                       wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
+               if (msr_set_bit(MSR_IA32_MISC_ENABLE,
+                               MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
+                   > 0) {
+                       pr_info("CPU: C0 stepping P4 Xeon detected.\n");
+                       pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
                }
        }
 
index 9f7ca266864a47cbb5484fead33a063d48f01f27..76f98fe5b35c4a2f3860b762b54f039cb7ad642c 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/hardirq.h>
 #include <linux/efi.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <asm/processor.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv.h>
 #include <asm/irq_regs.h>
 #include <asm/i8259.h>
 #include <asm/apic.h>
+#include <asm/timer.h>
 
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
 
+#if IS_ENABLED(CONFIG_HYPERV)
+static void (*vmbus_handler)(void);
+
+void hyperv_vector_handler(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       irq_enter();
+       exit_idle();
+
+       inc_irq_stat(irq_hv_callback_count);
+       if (vmbus_handler)
+               vmbus_handler();
+
+       irq_exit();
+       set_irq_regs(old_regs);
+}
+
+void hv_setup_vmbus_irq(void (*handler)(void))
+{
+       vmbus_handler = handler;
+       /*
+        * Setup the IDT for hypervisor callback. Prevent reallocation
+        * at module reload.
+        */
+       if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
+               alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
+                               hyperv_callback_vector);
+}
+
+void hv_remove_vmbus_irq(void)
+{
+       /* We have no way to deallocate the interrupt gate */
+       vmbus_handler = NULL;
+}
+EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
+EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
+#endif
+
 static uint32_t  __init ms_hyperv_platform(void)
 {
        u32 eax;
@@ -105,6 +146,11 @@ static void __init ms_hyperv_init_platform(void)
 
        if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
                clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+
+#ifdef CONFIG_X86_IO_APIC
+       no_timer_check = 1;
+#endif
+
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -113,41 +159,3 @@ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
        .init_platform          = ms_hyperv_init_platform,
 };
 EXPORT_SYMBOL(x86_hyper_ms_hyperv);
-
-#if IS_ENABLED(CONFIG_HYPERV)
-static int vmbus_irq = -1;
-static irq_handler_t vmbus_isr;
-
-void hv_register_vmbus_handler(int irq, irq_handler_t handler)
-{
-       /*
-        * Setup the IDT for hypervisor callback.
-        */
-       alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
-
-       vmbus_irq = irq;
-       vmbus_isr = handler;
-}
-
-void hyperv_vector_handler(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-       struct irq_desc *desc;
-
-       irq_enter();
-       exit_idle();
-
-       desc = irq_to_desc(vmbus_irq);
-
-       if (desc)
-               generic_handle_irq_desc(vmbus_irq, desc);
-
-       irq_exit();
-       set_irq_regs(old_regs);
-}
-#else
-void hv_register_vmbus_handler(int irq, irq_handler_t handler)
-{
-}
-#endif
-EXPORT_SYMBOL_GPL(hv_register_vmbus_handler);
index 79f9f848bee4b1c021b80bed07e0dcbcec88a20b..ae407f7226c89961fde14ddd6d932f174c64d8fc 100644 (file)
@@ -892,7 +892,6 @@ static void x86_pmu_enable(struct pmu *pmu)
                 * hw_perf_group_sched_in() or x86_pmu_enable()
                 *
                 * step1: save events moving to new counters
-                * step2: reprogram moved events into new counters
                 */
                for (i = 0; i < n_running; i++) {
                        event = cpuc->event_list[i];
@@ -918,6 +917,9 @@ static void x86_pmu_enable(struct pmu *pmu)
                        x86_pmu_stop(event, PERF_EF_UPDATE);
                }
 
+               /*
+                * step2: reprogram moved events into new counters
+                */
                for (i = 0; i < cpuc->n_events; i++) {
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
@@ -1043,7 +1045,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be performed
-        * at commit time (->commit_txn) as a whole
+        * at commit time (->commit_txn) as a whole.
         */
        if (cpuc->group_flag & PERF_EVENT_TXN)
                goto done_collect;
@@ -1058,6 +1060,10 @@ static int x86_pmu_add(struct perf_event *event, int flags)
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
 done_collect:
+       /*
+        * Commit the collect_events() state. See x86_pmu_del() and
+        * x86_pmu_*_txn().
+        */
        cpuc->n_events = n;
        cpuc->n_added += n - n0;
        cpuc->n_txn += n - n0;
@@ -1183,28 +1189,38 @@ static void x86_pmu_del(struct perf_event *event, int flags)
         * If we're called during a txn, we don't need to do anything.
         * The events never got scheduled and ->cancel_txn will truncate
         * the event_list.
+        *
+        * XXX assumes any ->del() called during a TXN will only be on
+        * an event added during that same TXN.
         */
        if (cpuc->group_flag & PERF_EVENT_TXN)
                return;
 
+       /*
+        * Not a TXN, therefore cleanup properly.
+        */
        x86_pmu_stop(event, PERF_EF_UPDATE);
 
        for (i = 0; i < cpuc->n_events; i++) {
-               if (event == cpuc->event_list[i]) {
+               if (event == cpuc->event_list[i])
+                       break;
+       }
 
-                       if (i >= cpuc->n_events - cpuc->n_added)
-                               --cpuc->n_added;
+       if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
+               return;
 
-                       if (x86_pmu.put_event_constraints)
-                               x86_pmu.put_event_constraints(cpuc, event);
+       /* If we have a newly added event; make sure to decrease n_added. */
+       if (i >= cpuc->n_events - cpuc->n_added)
+               --cpuc->n_added;
 
-                       while (++i < cpuc->n_events)
-                               cpuc->event_list[i-1] = cpuc->event_list[i];
+       if (x86_pmu.put_event_constraints)
+               x86_pmu.put_event_constraints(cpuc, event);
+
+       /* Delete the array entry. */
+       while (++i < cpuc->n_events)
+               cpuc->event_list[i-1] = cpuc->event_list[i];
+       --cpuc->n_events;
 
-                       --cpuc->n_events;
-                       break;
-               }
-       }
        perf_event_update_userpage(event);
 }
 
@@ -1598,7 +1614,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
        __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
        /*
-        * Truncate the collected events.
+        * Truncate collected array by the number of events added in this
+        * transaction. See x86_pmu_add() and x86_pmu_*_txn().
         */
        __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
        __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
@@ -1609,6 +1626,8 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
  * Commit group events scheduling transaction
  * Perform the group schedulability test as a whole
  * Return 0 if success
+ *
+ * Does not cancel the transaction on failure; expects the caller to do this.
  */
 static int x86_pmu_commit_txn(struct pmu *pmu)
 {
index 4972c244d0bc2fbe445706943e0248073fd17a26..3b2f9bdd974be198d0622e306ddbba427add2d25 100644 (file)
@@ -130,9 +130,11 @@ struct cpu_hw_events {
        unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
        int                     enabled;
 
-       int                     n_events;
-       int                     n_added;
-       int                     n_txn;
+       int                     n_events; /* the # of events in the below arrays */
+       int                     n_added;  /* the # last events in the below arrays;
+                                            they've never been enabled yet */
+       int                     n_txn;    /* the # last events in the below arrays;
+                                            added in the current transaction */
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
        u64                     tags[X86_PMC_IDX_MAX];
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
index 047f540cf3f71cfe03f69d796a1957b8a58caebc..bd2253d40cffe16363569384084bdf6d6d73e7f5 100644 (file)
@@ -66,6 +66,47 @@ DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
 DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
 DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
 
+static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
+static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
+static void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
+static void uncore_pmu_event_read(struct perf_event *event);
+
+static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+{
+       return container_of(event->pmu, struct intel_uncore_pmu, pmu);
+}
+
+static struct intel_uncore_box *
+uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
+{
+       struct intel_uncore_box *box;
+
+       box = *per_cpu_ptr(pmu->box, cpu);
+       if (box)
+               return box;
+
+       raw_spin_lock(&uncore_box_lock);
+       list_for_each_entry(box, &pmu->box_list, list) {
+               if (box->phys_id == topology_physical_package_id(cpu)) {
+                       atomic_inc(&box->refcnt);
+                       *per_cpu_ptr(pmu->box, cpu) = box;
+                       break;
+               }
+       }
+       raw_spin_unlock(&uncore_box_lock);
+
+       return *per_cpu_ptr(pmu->box, cpu);
+}
+
+static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+{
+       /*
+        * perf core schedules event on the basis of cpu, uncore events are
+        * collected by one of the cpus inside a physical package.
+        */
+       return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
+}
+
 static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
 {
        u64 count;
@@ -1639,6 +1680,349 @@ static struct intel_uncore_type *snb_msr_uncores[] = {
        &snb_uncore_cbox,
        NULL,
 };
+
+enum {
+       SNB_PCI_UNCORE_IMC,
+};
+
+static struct uncore_event_desc snb_uncore_imc_events[] = {
+       INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
+       INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
+       INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
+
+       INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
+       INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
+       INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
+
+       { /* end: all zeroes */ },
+};
+
+#define SNB_UNCORE_PCI_IMC_EVENT_MASK          0xff
+#define SNB_UNCORE_PCI_IMC_BAR_OFFSET          0x48
+
+/* page size multiple covering all config regs */
+#define SNB_UNCORE_PCI_IMC_MAP_SIZE            0x6000
+
+#define SNB_UNCORE_PCI_IMC_DATA_READS          0x1
+#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE     0x5050
+#define SNB_UNCORE_PCI_IMC_DATA_WRITES         0x2
+#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE    0x5054
+#define SNB_UNCORE_PCI_IMC_CTR_BASE            SNB_UNCORE_PCI_IMC_DATA_READS_BASE
+
+static struct attribute *snb_uncore_imc_formats_attr[] = {
+       &format_attr_event.attr,
+       NULL,
+};
+
+static struct attribute_group snb_uncore_imc_format_group = {
+       .name = "format",
+       .attrs = snb_uncore_imc_formats_attr,
+};
+
+static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
+{
+       struct pci_dev *pdev = box->pci_dev;
+       int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
+       resource_size_t addr;
+       u32 pci_dword;
+
+       pci_read_config_dword(pdev, where, &pci_dword);
+       addr = pci_dword;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       pci_read_config_dword(pdev, where + 4, &pci_dword);
+       addr |= ((resource_size_t)pci_dword << 32);
+#endif
+
+       addr &= ~(PAGE_SIZE - 1);
+
+       box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
+       box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
+}
+
+static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
+{}
+
+static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
+{}
+
+static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{}
+
+static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
+{}
+
+static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       return (u64)*(unsigned int *)(box->io_addr + hwc->event_base);
+}
+
+/*
+ * custom event_init() function because we define our own fixed, free
+ * running counters, so we do not want to conflict with generic uncore
+ * logic. Also simplifies processing
+ */
+static int snb_uncore_imc_event_init(struct perf_event *event)
+{
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       struct hw_perf_event *hwc = &event->hw;
+       u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
+       int idx, base;
+
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
+       pmu = uncore_event_to_pmu(event);
+       /* no device found for this pmu */
+       if (pmu->func_id < 0)
+               return -ENOENT;
+
+       /* Sampling not supported yet */
+       if (hwc->sample_period)
+               return -EINVAL;
+
+       /* unsupported modes and filters */
+       if (event->attr.exclude_user   ||
+           event->attr.exclude_kernel ||
+           event->attr.exclude_hv     ||
+           event->attr.exclude_idle   ||
+           event->attr.exclude_host   ||
+           event->attr.exclude_guest  ||
+           event->attr.sample_period) /* no sampling */
+               return -EINVAL;
+
+       /*
+        * Place all uncore events for a particular physical package
+        * onto a single cpu
+        */
+       if (event->cpu < 0)
+               return -EINVAL;
+
+       /* check only supported bits are set */
+       if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
+               return -EINVAL;
+
+       box = uncore_pmu_to_box(pmu, event->cpu);
+       if (!box || box->cpu < 0)
+               return -EINVAL;
+
+       event->cpu = box->cpu;
+
+       event->hw.idx = -1;
+       event->hw.last_tag = ~0ULL;
+       event->hw.extra_reg.idx = EXTRA_REG_NONE;
+       event->hw.branch_reg.idx = EXTRA_REG_NONE;
+       /*
+        * check event is known (whitelist, determines counter)
+        */
+       switch (cfg) {
+       case SNB_UNCORE_PCI_IMC_DATA_READS:
+               base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
+               idx = UNCORE_PMC_IDX_FIXED;
+               break;
+       case SNB_UNCORE_PCI_IMC_DATA_WRITES:
+               base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
+               idx = UNCORE_PMC_IDX_FIXED + 1;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* must be done before validate_group */
+       event->hw.event_base = base;
+       event->hw.config = cfg;
+       event->hw.idx = idx;
+
+       /* no group validation needed, we have free running counters */
+
+       return 0;
+}
+
+static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+       return 0;
+}
+
+static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       u64 count;
+
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+               return;
+
+       event->hw.state = 0;
+       box->n_active++;
+
+       list_add_tail(&event->active_entry, &box->active_list);
+
+       count = snb_uncore_imc_read_counter(box, event);
+       local64_set(&event->hw.prev_count, count);
+
+       if (box->n_active == 1)
+               uncore_pmu_start_hrtimer(box);
+}
+
+static void snb_uncore_imc_event_stop(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!(hwc->state & PERF_HES_STOPPED)) {
+               box->n_active--;
+
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+
+               list_del(&event->active_entry);
+
+               if (box->n_active == 0)
+                       uncore_pmu_cancel_hrtimer(box);
+       }
+
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               /*
+                * Drain the remaining delta count out of a event
+                * that we are disabling:
+                */
+               uncore_perf_event_update(box, event);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
+}
+
+static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (!box)
+               return -ENODEV;
+
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_ARCH;
+
+       snb_uncore_imc_event_start(event, 0);
+
+       box->n_events++;
+
+       return 0;
+}
+
+static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
+{
+       struct intel_uncore_box *box = uncore_event_to_box(event);
+       int i;
+
+       snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
+
+       for (i = 0; i < box->n_events; i++) {
+               if (event == box->event_list[i]) {
+                       --box->n_events;
+                       break;
+               }
+       }
+}
+
+static int snb_pci2phy_map_init(int devid)
+{
+       struct pci_dev *dev = NULL;
+       int bus;
+
+       dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
+       if (!dev)
+               return -ENOTTY;
+
+       bus = dev->bus->number;
+
+       pcibus_to_physid[bus] = 0;
+
+       pci_dev_put(dev);
+
+       return 0;
+}
+
+static struct pmu snb_uncore_imc_pmu = {
+       .task_ctx_nr    = perf_invalid_context,
+       .event_init     = snb_uncore_imc_event_init,
+       .add            = snb_uncore_imc_event_add,
+       .del            = snb_uncore_imc_event_del,
+       .start          = snb_uncore_imc_event_start,
+       .stop           = snb_uncore_imc_event_stop,
+       .read           = uncore_pmu_event_read,
+};
+
+static struct intel_uncore_ops snb_uncore_imc_ops = {
+       .init_box       = snb_uncore_imc_init_box,
+       .enable_box     = snb_uncore_imc_enable_box,
+       .disable_box    = snb_uncore_imc_disable_box,
+       .disable_event  = snb_uncore_imc_disable_event,
+       .enable_event   = snb_uncore_imc_enable_event,
+       .hw_config      = snb_uncore_imc_hw_config,
+       .read_counter   = snb_uncore_imc_read_counter,
+};
+
+static struct intel_uncore_type snb_uncore_imc = {
+       .name           = "imc",
+       .num_counters   = 2,
+       .num_boxes      = 1,
+       .fixed_ctr_bits = 32,
+       .fixed_ctr      = SNB_UNCORE_PCI_IMC_CTR_BASE,
+       .event_descs    = snb_uncore_imc_events,
+       .format_group   = &snb_uncore_imc_format_group,
+       .perf_ctr       = SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
+       .event_mask     = SNB_UNCORE_PCI_IMC_EVENT_MASK,
+       .ops            = &snb_uncore_imc_ops,
+       .pmu            = &snb_uncore_imc_pmu,
+};
+
+static struct intel_uncore_type *snb_pci_uncores[] = {
+       [SNB_PCI_UNCORE_IMC]    = &snb_uncore_imc,
+       NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(snb_uncore_pci_ids) = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(ivb_uncore_pci_ids) = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ },
+};
+
+static DEFINE_PCI_DEVICE_TABLE(hsw_uncore_pci_ids) = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ },
+};
+
+static struct pci_driver snb_uncore_pci_driver = {
+       .name           = "snb_uncore",
+       .id_table       = snb_uncore_pci_ids,
+};
+
+static struct pci_driver ivb_uncore_pci_driver = {
+       .name           = "ivb_uncore",
+       .id_table       = ivb_uncore_pci_ids,
+};
+
+static struct pci_driver hsw_uncore_pci_driver = {
+       .name           = "hsw_uncore",
+       .id_table       = hsw_uncore_pci_ids,
+};
+
 /* end of Sandy Bridge uncore support */
 
 /* Nehalem uncore support */
@@ -2789,6 +3173,7 @@ again:
 static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
 {
        struct intel_uncore_box *box;
+       struct perf_event *event;
        unsigned long flags;
        int bit;
 
@@ -2801,19 +3186,27 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
         */
        local_irq_save(flags);
 
+       /*
+        * handle boxes with an active event list as opposed to active
+        * counters
+        */
+       list_for_each_entry(event, &box->active_list, active_entry) {
+               uncore_perf_event_update(box, event);
+       }
+
        for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
                uncore_perf_event_update(box, box->events[bit]);
 
        local_irq_restore(flags);
 
-       hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
+       hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration));
        return HRTIMER_RESTART;
 }
 
 static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
 {
        __hrtimer_start_range_ns(&box->hrtimer,
-                       ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
+                       ns_to_ktime(box->hrtimer_duration), 0,
                        HRTIMER_MODE_REL_PINNED, 0);
 }
 
@@ -2847,43 +3240,12 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
        box->cpu = -1;
        box->phys_id = -1;
 
-       return box;
-}
+       /* set default hrtimer timeout */
+       box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL;
 
-static struct intel_uncore_box *
-uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
-{
-       struct intel_uncore_box *box;
+       INIT_LIST_HEAD(&box->active_list);
 
-       box = *per_cpu_ptr(pmu->box, cpu);
-       if (box)
-               return box;
-
-       raw_spin_lock(&uncore_box_lock);
-       list_for_each_entry(box, &pmu->box_list, list) {
-               if (box->phys_id == topology_physical_package_id(cpu)) {
-                       atomic_inc(&box->refcnt);
-                       *per_cpu_ptr(pmu->box, cpu) = box;
-                       break;
-               }
-       }
-       raw_spin_unlock(&uncore_box_lock);
-
-       return *per_cpu_ptr(pmu->box, cpu);
-}
-
-static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
-{
-       return container_of(event->pmu, struct intel_uncore_pmu, pmu);
-}
-
-static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
-{
-       /*
-        * perf core schedules event on the basis of cpu, uncore events are
-        * collected by one of the cpus inside a physical package.
-        */
-       return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
+       return box;
 }
 
 static int
@@ -3279,16 +3641,21 @@ static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
 {
        int ret;
 
-       pmu->pmu = (struct pmu) {
-               .attr_groups    = pmu->type->attr_groups,
-               .task_ctx_nr    = perf_invalid_context,
-               .event_init     = uncore_pmu_event_init,
-               .add            = uncore_pmu_event_add,
-               .del            = uncore_pmu_event_del,
-               .start          = uncore_pmu_event_start,
-               .stop           = uncore_pmu_event_stop,
-               .read           = uncore_pmu_event_read,
-       };
+       if (!pmu->type->pmu) {
+               pmu->pmu = (struct pmu) {
+                       .attr_groups    = pmu->type->attr_groups,
+                       .task_ctx_nr    = perf_invalid_context,
+                       .event_init     = uncore_pmu_event_init,
+                       .add            = uncore_pmu_event_add,
+                       .del            = uncore_pmu_event_del,
+                       .start          = uncore_pmu_event_start,
+                       .stop           = uncore_pmu_event_stop,
+                       .read           = uncore_pmu_event_read,
+               };
+       } else {
+               pmu->pmu = *pmu->type->pmu;
+               pmu->pmu.attr_groups = pmu->type->attr_groups;
+       }
 
        if (pmu->type->num_boxes == 1) {
                if (strlen(pmu->type->name) > 0)
@@ -3502,6 +3869,28 @@ static int __init uncore_pci_init(void)
                pci_uncores = ivt_pci_uncores;
                uncore_pci_driver = &ivt_uncore_pci_driver;
                break;
+       case 42: /* Sandy Bridge */
+               ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_SNB_IMC);
+               if (ret)
+                       return ret;
+               pci_uncores = snb_pci_uncores;
+               uncore_pci_driver = &snb_uncore_pci_driver;
+               break;
+       case 58: /* Ivy Bridge */
+               ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_IVB_IMC);
+               if (ret)
+                       return ret;
+               pci_uncores = snb_pci_uncores;
+               uncore_pci_driver = &ivb_uncore_pci_driver;
+               break;
+       case 60: /* Haswell */
+       case 69: /* Haswell Celeron */
+               ret = snb_pci2phy_map_init(PCI_DEVICE_ID_INTEL_HSW_IMC);
+               if (ret)
+                       return ret;
+               pci_uncores = snb_pci_uncores;
+               uncore_pci_driver = &hsw_uncore_pci_driver;
+               break;
        default:
                return 0;
        }
@@ -3773,7 +4162,7 @@ static void __init uncore_cpu_setup(void *dummy)
 
 static int __init uncore_cpu_init(void)
 {
-       int ret, cpu, max_cores;
+       int ret, max_cores;
 
        max_cores = boot_cpu_data.x86_max_cores;
        switch (boot_cpu_data.x86_model) {
@@ -3817,29 +4206,6 @@ static int __init uncore_cpu_init(void)
        if (ret)
                return ret;
 
-       get_online_cpus();
-
-       for_each_online_cpu(cpu) {
-               int i, phys_id = topology_physical_package_id(cpu);
-
-               for_each_cpu(i, &uncore_cpu_mask) {
-                       if (phys_id == topology_physical_package_id(i)) {
-                               phys_id = -1;
-                               break;
-                       }
-               }
-               if (phys_id < 0)
-                       continue;
-
-               uncore_cpu_prepare(cpu, phys_id);
-               uncore_event_init_cpu(cpu);
-       }
-       on_each_cpu(uncore_cpu_setup, NULL, 1);
-
-       register_cpu_notifier(&uncore_cpu_nb);
-
-       put_online_cpus();
-
        return 0;
 }
 
@@ -3868,6 +4234,41 @@ static int __init uncore_pmus_register(void)
        return 0;
 }
 
+static void __init uncore_cpumask_init(void)
+{
+       int cpu;
+
+       /*
+        * ony invoke once from msr or pci init code
+        */
+       if (!cpumask_empty(&uncore_cpu_mask))
+               return;
+
+       get_online_cpus();
+
+       for_each_online_cpu(cpu) {
+               int i, phys_id = topology_physical_package_id(cpu);
+
+               for_each_cpu(i, &uncore_cpu_mask) {
+                       if (phys_id == topology_physical_package_id(i)) {
+                               phys_id = -1;
+                               break;
+                       }
+               }
+               if (phys_id < 0)
+                       continue;
+
+               uncore_cpu_prepare(cpu, phys_id);
+               uncore_event_init_cpu(cpu);
+       }
+       on_each_cpu(uncore_cpu_setup, NULL, 1);
+
+       register_cpu_notifier(&uncore_cpu_nb);
+
+       put_online_cpus();
+}
+
+
 static int __init intel_uncore_init(void)
 {
        int ret;
@@ -3886,6 +4287,7 @@ static int __init intel_uncore_init(void)
                uncore_pci_exit();
                goto fail;
        }
+       uncore_cpumask_init();
 
        uncore_pmus_register();
        return 0;
index a80ab71a883de06be3cbed3a8c0848aa1dfff69a..90236f0c94a90679506973efe6e56c51bfb9bb2f 100644 (file)
@@ -6,6 +6,7 @@
 
 #define UNCORE_PMU_NAME_LEN            32
 #define UNCORE_PMU_HRTIMER_INTERVAL    (60LL * NSEC_PER_SEC)
+#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
 
 #define UNCORE_FIXED_EVENT             0xff
 #define UNCORE_PMC_IDX_MAX_GENERIC     8
@@ -440,6 +441,7 @@ struct intel_uncore_type {
        struct intel_uncore_ops *ops;
        struct uncore_event_desc *event_descs;
        const struct attribute_group *attr_groups[4];
+       struct pmu *pmu; /* for custom pmu ops */
 };
 
 #define pmu_group attr_groups[0]
@@ -488,8 +490,11 @@ struct intel_uncore_box {
        u64 tags[UNCORE_PMC_IDX_MAX];
        struct pci_dev *pci_dev;
        struct intel_uncore_pmu *pmu;
+       u64 hrtimer_duration; /* hrtimer timeout for this box */
        struct hrtimer hrtimer;
        struct list_head list;
+       struct list_head active_list;
+       void *io_addr;
        struct intel_uncore_extra_reg shared_regs[0];
 };
 
index 3486e6660357e7b365f59c279d748b188a6e72e4..5d466b7d8609814355c90e2cf09ced760debd458 100644 (file)
@@ -1257,7 +1257,24 @@ again:
                        pass++;
                        goto again;
                }
-
+               /*
+                * Perf does test runs to see if a whole group can be assigned
+                * together succesfully.  There can be multiple rounds of this.
+                * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config
+                * bits, such that the next round of group assignments will
+                * cause the above p4_should_swap_ts to pass instead of fail.
+                * This leads to counters exclusive to thread0 being used by
+                * thread1.
+                *
+                * Solve this with a cheap hack, reset the idx back to -1 to
+                * force a new lookup (p4_next_cntr) to get the right counter
+                * for the right thread.
+                *
+                * This probably doesn't comply with the general spirit of how
+                * perf wants to work, but P4 is special. :-(
+                */
+               if (p4_should_swap_ts(hwc->config, cpu))
+                       hwc->idx = -1;
                p4_pmu_swap_config_ts(hwc, cpu);
                if (assign)
                        assign[i] = cntr_idx;
@@ -1322,6 +1339,7 @@ static __initconst const struct x86_pmu p4_pmu = {
 __init int p4_pmu_init(void)
 {
        unsigned int low, high;
+       int i, reg;
 
        /* If we get stripped -- indexing fails */
        BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
@@ -1340,5 +1358,19 @@ __init int p4_pmu_init(void)
 
        x86_pmu = p4_pmu;
 
+       /*
+        * Even though the counters are configured to interrupt a particular
+        * logical processor when an overflow happens, testing has shown that
+        * on kdump kernels (which uses a single cpu), thread1's counter
+        * continues to run and will report an NMI on thread0.  Due to the
+        * overflow bug, this leads to a stream of unknown NMIs.
+        *
+        * Solve this by zero'ing out the registers to mimic a reset.
+        */
+       for (i = 0; i < x86_pmu.num_counters; i++) {
+               reg = x86_pmu_config_addr(i);
+               wrmsrl_safe(reg, 0ULL);
+       }
+
        return 0;
 }
index a57902efe2d597be4af85c9d52d995360d54dadb..507de80665942b87a148e032fd8aeedfeb76c70b 100644 (file)
@@ -57,9 +57,7 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
        struct pt_regs fixed_regs;
-#endif
 
-#ifdef CONFIG_X86_32
        if (!user_mode_vm(regs)) {
                crash_fixup_ss_esp(&fixed_regs, regs);
                regs = &fixed_regs;
index f2a1770ca176381b843f9ab6f08327e55a9a3d6c..5abd4cd4230c69f3ff4730e97a1297be40013c44 100644 (file)
 
 #include <asm/stacktrace.h>
 
+static void *is_irq_stack(void *p, void *irq)
+{
+       if (p < irq || p >= (irq + THREAD_SIZE))
+               return NULL;
+       return irq + THREAD_SIZE;
+}
+
+
+static void *is_hardirq_stack(unsigned long *stack, int cpu)
+{
+       void *irq = per_cpu(hardirq_stack, cpu);
+
+       return is_irq_stack(stack, irq);
+}
+
+static void *is_softirq_stack(unsigned long *stack, int cpu)
+{
+       void *irq = per_cpu(softirq_stack, cpu);
+
+       return is_irq_stack(stack, irq);
+}
 
 void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data)
 {
+       const unsigned cpu = get_cpu();
        int graph = 0;
+       u32 *prev_esp;
 
        if (!task)
                task = current;
@@ -30,7 +53,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                unsigned long dummy;
 
                stack = &dummy;
-               if (task && task != current)
+               if (task != current)
                        stack = (unsigned long *)task->thread.sp;
        }
 
@@ -39,18 +62,31 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 
        for (;;) {
                struct thread_info *context;
+               void *end_stack;
+
+               end_stack = is_hardirq_stack(stack, cpu);
+               if (!end_stack)
+                       end_stack = is_softirq_stack(stack, cpu);
 
-               context = (struct thread_info *)
-                       ((unsigned long)stack & (~(THREAD_SIZE - 1)));
-               bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
+               context = task_thread_info(task);
+               bp = ops->walk_stack(context, stack, bp, ops, data,
+                                    end_stack, &graph);
 
-               stack = (unsigned long *)context->previous_esp;
+               /* Stop if not on irq stack */
+               if (!end_stack)
+                       break;
+
+               /* The previous esp is saved on the bottom of the stack */
+               prev_esp = (u32 *)(end_stack - THREAD_SIZE);
+               stack = (unsigned long *)*prev_esp;
                if (!stack)
                        break;
+
                if (ops->stack(data, "IRQ") < 0)
                        break;
                touch_nmi_watchdog();
        }
+       put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
 
index addb207dab92d11ebe4d48650babe07db36a1539..346b1df2412e238a35a558f66ec2b210b4710bf7 100644 (file)
@@ -104,6 +104,45 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
        return (stack >= irq_stack && stack < irq_stack_end);
 }
 
+static const unsigned long irq_stack_size =
+       (IRQ_STACK_SIZE - 64) / sizeof(unsigned long);
+
+enum stack_type {
+       STACK_IS_UNKNOWN,
+       STACK_IS_NORMAL,
+       STACK_IS_EXCEPTION,
+       STACK_IS_IRQ,
+};
+
+static enum stack_type
+analyze_stack(int cpu, struct task_struct *task,
+             unsigned long *stack, unsigned long **stack_end, char **id)
+{
+       unsigned long *irq_stack;
+       unsigned long addr;
+       unsigned used = 0;
+
+       addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+       if ((unsigned long)task_stack_page(task) == addr)
+               return STACK_IS_NORMAL;
+
+       *stack_end = in_exception_stack(cpu, (unsigned long)stack,
+                                        &used, id);
+       if (*stack_end)
+               return STACK_IS_EXCEPTION;
+
+       *stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+       if (!*stack_end)
+               return STACK_IS_UNKNOWN;
+
+       irq_stack = *stack_end - irq_stack_size;
+
+       if (in_irq_stack(stack, irq_stack, *stack_end))
+               return STACK_IS_IRQ;
+
+       return STACK_IS_UNKNOWN;
+}
+
 /*
  * x86-64 can have up to three kernel stacks:
  * process stack
@@ -116,12 +155,11 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                const struct stacktrace_ops *ops, void *data)
 {
        const unsigned cpu = get_cpu();
-       unsigned long *irq_stack_end =
-               (unsigned long *)per_cpu(irq_stack_ptr, cpu);
-       unsigned used = 0;
        struct thread_info *tinfo;
-       int graph = 0;
+       unsigned long *irq_stack;
        unsigned long dummy;
+       int graph = 0;
+       int done = 0;
 
        if (!task)
                task = current;
@@ -143,49 +181,60 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
         * exceptions
         */
        tinfo = task_thread_info(task);
-       for (;;) {
+       while (!done) {
+               unsigned long *stack_end;
+               enum stack_type stype;
                char *id;
-               unsigned long *estack_end;
-               estack_end = in_exception_stack(cpu, (unsigned long)stack,
-                                               &used, &id);
 
-               if (estack_end) {
+               stype = analyze_stack(cpu, task, stack, &stack_end, &id);
+
+               /* Default finish unless specified to continue */
+               done = 1;
+
+               switch (stype) {
+
+               /* Break out early if we are on the thread stack */
+               case STACK_IS_NORMAL:
+                       break;
+
+               case STACK_IS_EXCEPTION:
+
                        if (ops->stack(data, id) < 0)
                                break;
 
                        bp = ops->walk_stack(tinfo, stack, bp, ops,
-                                            data, estack_end, &graph);
+                                            data, stack_end, &graph);
                        ops->stack(data, "<EOE>");
                        /*
                         * We link to the next stack via the
                         * second-to-last pointer (index -2 to end) in the
                         * exception stack:
                         */
-                       stack = (unsigned long *) estack_end[-2];
-                       continue;
-               }
-               if (irq_stack_end) {
-                       unsigned long *irq_stack;
-                       irq_stack = irq_stack_end -
-                               (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
-
-                       if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
-                               if (ops->stack(data, "IRQ") < 0)
-                                       break;
-                               bp = ops->walk_stack(tinfo, stack, bp,
-                                       ops, data, irq_stack_end, &graph);
-                               /*
-                                * We link to the next stack (which would be
-                                * the process stack normally) the last
-                                * pointer (index -1 to end) in the IRQ stack:
-                                */
-                               stack = (unsigned long *) (irq_stack_end[-1]);
-                               irq_stack_end = NULL;
-                               ops->stack(data, "EOI");
-                               continue;
-                       }
+                       stack = (unsigned long *) stack_end[-2];
+                       done = 0;
+                       break;
+
+               case STACK_IS_IRQ:
+
+                       if (ops->stack(data, "IRQ") < 0)
+                               break;
+                       bp = ops->walk_stack(tinfo, stack, bp,
+                                    ops, data, stack_end, &graph);
+                       /*
+                        * We link to the next stack (which would be
+                        * the process stack normally) the last
+                        * pointer (index -1 to end) in the IRQ stack:
+                        */
+                       stack = (unsigned long *) (stack_end[-1]);
+                       irq_stack = stack_end - irq_stack_size;
+                       ops->stack(data, "EOI");
+                       done = 0;
+                       break;
+
+               case STACK_IS_UNKNOWN:
+                       ops->stack(data, "UNK");
+                       break;
                }
-               break;
        }
 
        /*
index bc4a088f902396721e08a2d7314a451b7597eb1c..6d7d5a1260a68aca347b2b3d056a6d24dbf94f42 100644 (file)
@@ -203,18 +203,15 @@ static void __init intel_remapping_check(int num, int slot, int func)
        revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
 
        /*
-        * Revision 13 of all triggering devices id in this quirk have
-        * a problem draining interrupts when irq remapping is enabled,
-        * and should be flagged as broken.  Additionally revisions 0x12
-        * and 0x22 of device id 0x3405 has this problem.
+        * Revision <= 13 of all triggering devices id in this quirk
+        * have a problem draining interrupts when irq remapping is
+        * enabled, and should be flagged as broken. Additionally
+        * revision 0x22 of device id 0x3405 has this problem.
         */
-       if (revision == 0x13)
+       if (revision <= 0x13)
                set_irq_remapping_broken();
-       else if ((device == 0x3405) &&
-           ((revision == 0x12) ||
-            (revision == 0x22)))
+       else if (device == 0x3405 && revision == 0x22)
                set_irq_remapping_broken();
-
 }
 
 /*
index da85a8e830a12d65e9d2f8a7f1db39b5477e9e85..014618dbaa7b4df9a925ffdae1d2975b07eca672 100644 (file)
@@ -521,7 +521,7 @@ static int hpet_setup_irq(struct hpet_dev *dev)
 {
 
        if (request_irq(dev->irq, hpet_interrupt_handler,
-                       IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
+                       IRQF_TIMER | IRQF_NOBALANCING,
                        dev->name, dev))
                return -1;
 
@@ -699,7 +699,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
                /* FIXME: add schedule_work_on() */
                schedule_delayed_work_on(cpu, &work.work, 0);
                wait_for_completion(&work.complete);
-               destroy_timer_on_stack(&work.work.timer);
+               destroy_delayed_work_on_stack(&work.work);
                break;
        case CPU_DEAD:
                if (hdev) {
index d99f31d9a750216204a0c61faef3e489818ef925..42805fac009215ac5c70bdd71fdc2ad5d72dfa89 100644 (file)
@@ -124,6 +124,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
        seq_printf(p, "  Machine check polls\n");
+#endif
+#if defined(CONFIG_HYPERV) || defined(CONFIG_XEN)
+       seq_printf(p, "%*s: ", prec, "THR");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
+       seq_printf(p, "  Hypervisor callback interrupts\n");
 #endif
        seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
 #if defined(CONFIG_X86_IO_APIC)
index d7fcbedc9c43fa9659b7f2f2c687b8402a826dd2..63ce838e5a5423ad3f425368f1c5adffb8c8356e 100644 (file)
@@ -55,16 +55,8 @@ static inline int check_stack_overflow(void) { return 0; }
 static inline void print_stack_overflow(void) { }
 #endif
 
-/*
- * per-CPU IRQ handling contexts (thread information and stack)
- */
-union irq_ctx {
-       struct thread_info      tinfo;
-       u32                     stack[THREAD_SIZE/sizeof(u32)];
-} __attribute__((aligned(THREAD_SIZE)));
-
-static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
-static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
+DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
+DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
 
 static void call_on_stack(void *func, void *stack)
 {
@@ -77,14 +69,26 @@ static void call_on_stack(void *func, void *stack)
                     : "memory", "cc", "edx", "ecx", "eax");
 }
 
+/* how to get the current stack pointer from C */
+#define current_stack_pointer ({               \
+       unsigned long sp;                       \
+       asm("mov %%esp,%0" : "=g" (sp));        \
+       sp;                                     \
+})
+
+static inline void *current_stack(void)
+{
+       return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
+}
+
 static inline int
 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
 {
-       union irq_ctx *curctx, *irqctx;
-       u32 *isp, arg1, arg2;
+       struct irq_stack *curstk, *irqstk;
+       u32 *isp, *prev_esp, arg1, arg2;
 
-       curctx = (union irq_ctx *) current_thread_info();
-       irqctx = __this_cpu_read(hardirq_ctx);
+       curstk = (struct irq_stack *) current_stack();
+       irqstk = __this_cpu_read(hardirq_stack);
 
        /*
         * this is where we switch to the IRQ stack. However, if we are
@@ -92,13 +96,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
         * handler) we can't do that and just have to keep using the
         * current stack (which is the irq stack already after all)
         */
-       if (unlikely(curctx == irqctx))
+       if (unlikely(curstk == irqstk))
                return 0;
 
-       /* build the stack frame on the IRQ stack */
-       isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
-       irqctx->tinfo.task = curctx->tinfo.task;
-       irqctx->tinfo.previous_esp = current_stack_pointer;
+       isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
+
+       /* Save the next esp at the bottom of the stack */
+       prev_esp = (u32 *)irqstk;
+       *prev_esp = current_stack_pointer;
 
        if (unlikely(overflow))
                call_on_stack(print_stack_overflow, isp);
@@ -118,46 +123,40 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
  */
 void irq_ctx_init(int cpu)
 {
-       union irq_ctx *irqctx;
+       struct irq_stack *irqstk;
 
-       if (per_cpu(hardirq_ctx, cpu))
+       if (per_cpu(hardirq_stack, cpu))
                return;
 
-       irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+       irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
                                               THREADINFO_GFP,
                                               THREAD_SIZE_ORDER));
-       memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
-
-       per_cpu(hardirq_ctx, cpu) = irqctx;
+       per_cpu(hardirq_stack, cpu) = irqstk;
 
-       irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
+       irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
                                               THREADINFO_GFP,
                                               THREAD_SIZE_ORDER));
-       memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
-       irqctx->tinfo.cpu               = cpu;
-       irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
-
-       per_cpu(softirq_ctx, cpu) = irqctx;
+       per_cpu(softirq_stack, cpu) = irqstk;
 
        printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
-              cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
+              cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
 }
 
 void do_softirq_own_stack(void)
 {
-       struct thread_info *curctx;
-       union irq_ctx *irqctx;
-       u32 *isp;
+       struct thread_info *curstk;
+       struct irq_stack *irqstk;
+       u32 *isp, *prev_esp;
 
-       curctx = current_thread_info();
-       irqctx = __this_cpu_read(softirq_ctx);
-       irqctx->tinfo.task = curctx->task;
-       irqctx->tinfo.previous_esp = current_stack_pointer;
+       curstk = current_stack();
+       irqstk = __this_cpu_read(softirq_stack);
 
        /* build the stack frame on the softirq stack */
-       isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
+       isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
+
+       /* Push the previous esp onto the stack */
+       prev_esp = (u32 *)irqstk;
+       *prev_esp = current_stack_pointer;
 
        call_on_stack(__do_softirq, isp);
 }
index 18be189368bbfdbf55cc2e7492c0e7b3b2514403..e69f9882bf95a942ae1ce0f75efbf07301c9adc3 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/mm.h>
 #include <linux/gfp.h>
 #include <linux/jump_label.h>
+#include <linux/random.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -43,13 +44,52 @@ do {                                                        \
 } while (0)
 #endif
 
+#ifdef CONFIG_RANDOMIZE_BASE
+static unsigned long module_load_offset;
+static int randomize_modules = 1;
+
+/* Mutex protects the module_load_offset. */
+static DEFINE_MUTEX(module_kaslr_mutex);
+
+static int __init parse_nokaslr(char *p)
+{
+       randomize_modules = 0;
+       return 0;
+}
+early_param("nokaslr", parse_nokaslr);
+
+static unsigned long int get_module_load_offset(void)
+{
+       if (randomize_modules) {
+               mutex_lock(&module_kaslr_mutex);
+               /*
+                * Calculate the module_load_offset the first time this
+                * code is called. Once calculated it stays the same until
+                * reboot.
+                */
+               if (module_load_offset == 0)
+                       module_load_offset =
+                               (get_random_int() % 1024 + 1) * PAGE_SIZE;
+               mutex_unlock(&module_kaslr_mutex);
+       }
+       return module_load_offset;
+}
+#else
+static unsigned long int get_module_load_offset(void)
+{
+       return 0;
+}
+#endif
+
 void *module_alloc(unsigned long size)
 {
        if (PAGE_ALIGN(size) > MODULES_LEN)
                return NULL;
-       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                               GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-                               NUMA_NO_NODE, __builtin_return_address(0));
+       return __vmalloc_node_range(size, 1,
+                                   MODULES_VADDR + get_module_load_offset(),
+                                   MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
+                                   PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                                   __builtin_return_address(0));
 }
 
 #ifdef CONFIG_X86_32
index 6fcb49ce50a1260d1f4bfdf0b5a065dd4f6d77c7..b4872b999a713d7fc08f7578b672d804a30dd13e 100644 (file)
@@ -87,6 +87,7 @@ __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
 #define nmi_to_desc(type) (&nmi_desc[type])
 
 static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC;
+
 static int __init nmi_warning_debugfs(void)
 {
        debugfs_create_u64("nmi_longest_ns", 0644,
@@ -95,6 +96,20 @@ static int __init nmi_warning_debugfs(void)
 }
 fs_initcall(nmi_warning_debugfs);
 
+static void nmi_max_handler(struct irq_work *w)
+{
+       struct nmiaction *a = container_of(w, struct nmiaction, irq_work);
+       int remainder_ns, decimal_msecs;
+       u64 whole_msecs = ACCESS_ONCE(a->max_duration);
+
+       remainder_ns = do_div(whole_msecs, (1000 * 1000));
+       decimal_msecs = remainder_ns / 1000;
+
+       printk_ratelimited(KERN_INFO
+               "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
+               a->handler, whole_msecs, decimal_msecs);
+}
+
 static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
 {
        struct nmi_desc *desc = nmi_to_desc(type);
@@ -110,26 +125,20 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
         * to handle those situations.
         */
        list_for_each_entry_rcu(a, &desc->head, list) {
-               u64 before, delta, whole_msecs;
-               int remainder_ns, decimal_msecs, thishandled;
+               int thishandled;
+               u64 delta;
 
-               before = sched_clock();
+               delta = sched_clock();
                thishandled = a->handler(type, regs);
                handled += thishandled;
-               delta = sched_clock() - before;
+               delta = sched_clock() - delta;
                trace_nmi_handler(a->handler, (int)delta, thishandled);
 
-               if (delta < nmi_longest_ns)
+               if (delta < nmi_longest_ns || delta < a->max_duration)
                        continue;
 
-               nmi_longest_ns = delta;
-               whole_msecs = delta;
-               remainder_ns = do_div(whole_msecs, (1000 * 1000));
-               decimal_msecs = remainder_ns / 1000;
-               printk_ratelimited(KERN_INFO
-                       "INFO: NMI handler (%ps) took too long to run: "
-                       "%lld.%03d msecs\n", a->handler, whole_msecs,
-                       decimal_msecs);
+               a->max_duration = delta;
+               irq_work_queue(&a->irq_work);
        }
 
        rcu_read_unlock();
@@ -146,6 +155,8 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
        if (!action->handler)
                return -EINVAL;
 
+       init_irq_work(&action->irq_work, nmi_max_handler);
+
        spin_lock_irqsave(&desc->lock, flags);
 
        /*
index 3fb8d95ab8b5ea3635ddb1f0d9f9c12e3a348285..4505e2a950d81f479663df20ce787dbad5f293b0 100644 (file)
@@ -298,10 +298,7 @@ void arch_cpu_idle_dead(void)
  */
 void arch_cpu_idle(void)
 {
-       if (cpuidle_idle_call())
-               x86_idle();
-       else
-               local_irq_enable();
+       x86_idle();
 }
 
 /*
index 0de43e98ce08604afa886ceba8e17b7c66583e97..7bc86bbe748599b92c2b9b2b221f98b10224c5b2 100644 (file)
@@ -314,6 +314,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         */
        arch_end_context_switch(next_p);
 
+       this_cpu_write(kernel_stack,
+                 (unsigned long)task_stack_page(next_p) +
+                 THREAD_SIZE - KERNEL_STACK_OFFSET);
+
        /*
         * Restore %gs if needed (which is common)
         */
index 7461f50d5bb1e15dbf39d7f43a758138b0fad019..678c0ada3b3ce5f94135c7076cf59800850ca474 100644 (file)
@@ -184,14 +184,14 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
 {
        unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
        unsigned long sp = (unsigned long)&regs->sp;
-       struct thread_info *tinfo;
+       u32 *prev_esp;
 
        if (context == (sp & ~(THREAD_SIZE - 1)))
                return sp;
 
-       tinfo = (struct thread_info *)context;
-       if (tinfo->previous_esp)
-               return tinfo->previous_esp;
+       prev_esp = (u32 *)(context);
+       if (prev_esp)
+               return (unsigned long)prev_esp;
 
        return (unsigned long)regs;
 }
index c752cb43e52f192f431a5f2ab91b9366e42ef5b0..654b46574b916c20ac4472aace3ffe4165f131fd 100644 (file)
@@ -464,9 +464,12 @@ void __attribute__((weak)) mach_reboot_fixups(void)
  * 2) If still alive, write to the keyboard controller
  * 3) If still alive, write to the ACPI reboot register again
  * 4) If still alive, write to the keyboard controller again
+ * 5) If still alive, call the EFI runtime service to reboot
+ * 6) If still alive, write to the PCI IO port 0xCF9 to reboot
+ * 7) If still alive, inform BIOS to do a proper reboot
  *
  * If the machine is still alive at this stage, it gives up. We default to
- * following the same pattern, except that if we're still alive after (4) we'll
+ * following the same pattern, except that if we're still alive after (7) we'll
  * try to force a triple fault and then cycle between hitting the keyboard
  * controller and doing that
  */
@@ -502,7 +505,7 @@ static void native_machine_emergency_restart(void)
                                attempt = 1;
                                reboot_type = BOOT_ACPI;
                        } else {
-                               reboot_type = BOOT_TRIPLE;
+                               reboot_type = BOOT_EFI;
                        }
                        break;
 
@@ -510,13 +513,15 @@ static void native_machine_emergency_restart(void)
                        load_idt(&no_idt);
                        __asm__ __volatile__("int3");
 
+                       /* We're probably dead after this, but... */
                        reboot_type = BOOT_KBD;
                        break;
 
                case BOOT_BIOS:
                        machine_real_restart(MRR_BIOS);
 
-                       reboot_type = BOOT_KBD;
+                       /* We're probably dead after this, but... */
+                       reboot_type = BOOT_TRIPLE;
                        break;
 
                case BOOT_ACPI:
@@ -530,7 +535,7 @@ static void native_machine_emergency_restart(void)
                                                 EFI_RESET_WARM :
                                                 EFI_RESET_COLD,
                                                 EFI_SUCCESS, 0, NULL);
-                       reboot_type = BOOT_KBD;
+                       reboot_type = BOOT_CF9_COND;
                        break;
 
                case BOOT_CF9:
@@ -548,7 +553,7 @@ static void native_machine_emergency_restart(void)
                                outb(cf9|reboot_code, 0xcf9);
                                udelay(50);
                        }
-                       reboot_type = BOOT_KBD;
+                       reboot_type = BOOT_BIOS;
                        break;
                }
        }
index ce72964b2f469db1b3c626ecadc18c2998ef98a4..fa511acff7e6c24bbb216de1615569e6bca31db9 100644 (file)
@@ -926,11 +926,11 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_EFI
        if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
                     "EL32", 4)) {
-               set_bit(EFI_BOOT, &x86_efi_facility);
+               set_bit(EFI_BOOT, &efi.flags);
        } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
                     "EL64", 4)) {
-               set_bit(EFI_BOOT, &x86_efi_facility);
-               set_bit(EFI_64BIT, &x86_efi_facility);
+               set_bit(EFI_BOOT, &efi.flags);
+               set_bit(EFI_64BIT, &efi.flags);
        }
 
        if (efi_enabled(EFI_BOOT))
index a32da804252e374b5d266e6788653f24fc98705d..34826934d4a7b37da39ae7556d04be37cae55ee8 100644 (file)
@@ -122,8 +122,9 @@ static void smp_callin(void)
         * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
         */
        cpuid = smp_processor_id();
-       if (apic->wait_for_init_deassert && cpuid != 0)
-               apic->wait_for_init_deassert(&init_deasserted);
+       if (apic->wait_for_init_deassert && cpuid)
+               while (!atomic_read(&init_deasserted))
+                       cpu_relax();
 
        /*
         * (This works even if the APIC is not enabled.)
@@ -701,11 +702,15 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
        int id;
        int boot_error;
 
+       preempt_disable();
+
        /*
         * Wake up AP by INIT, INIT, STARTUP sequence.
         */
-       if (cpu)
-               return wakeup_secondary_cpu_via_init(apicid, start_ip);
+       if (cpu) {
+               boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
+               goto out;
+       }
 
        /*
         * Wake up BSP by nmi.
@@ -725,6 +730,9 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
                boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
        }
 
+out:
+       preempt_enable();
+
        return boot_error;
 }
 
@@ -758,10 +766,10 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
 #else
        clear_tsk_thread_flag(idle, TIF_FORK);
        initial_gs = per_cpu_offset(cpu);
+#endif
        per_cpu(kernel_stack, cpu) =
                (unsigned long)task_stack_page(idle) -
                KERNEL_STACK_OFFSET + THREAD_SIZE;
-#endif
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        initial_code = (unsigned long)start_secondary;
        stack_start  = idle->thread.sp;
@@ -1379,7 +1387,7 @@ static inline void mwait_play_dead(void)
 
        if (!this_cpu_has(X86_FEATURE_MWAIT))
                return;
-       if (!this_cpu_has(X86_FEATURE_CLFLSH))
+       if (!this_cpu_has(X86_FEATURE_CLFLUSH))
                return;
        if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
                return;
index 24d3c91e9812f6f37be6b138deb8e0b6b23a30dd..bf7ef5ce29dff7f89d8e93fb8bee4119f445379b 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/time.h>
 
 #ifdef CONFIG_X86_64
-DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
+__visible DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
 #endif
 
 unsigned long profile_pc(struct pt_regs *regs)
@@ -62,7 +62,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
 
 static struct irqaction irq0  = {
        .handler = timer_interrupt,
-       .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
+       .flags = IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
        .name = "timer"
 };
 
index cfbe99f888300d819b53552a7668ab9bd12c3708..7a9296ab88340991436dfb91d6db70e7a17a529e 100644 (file)
@@ -914,8 +914,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
                tsc_khz_ref = tsc_khz;
        }
        if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
-                       (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
-                       (val == CPUFREQ_RESUMECHANGE)) {
+                       (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
                *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
 
                tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
index c6976257eff51281e023c264b355f166ce56dd5c..e5503d8aec1dac41f6ff7f97eb77e43beee6f38b 100644 (file)
@@ -263,7 +263,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                F(TSC) | F(MSR) | F(PAE) | F(MCE) |
                F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
                F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
-               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
+               F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
                0 /* Reserved, DS, ACPI */ | F(MMX) |
                F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
                0 /* HTT, TM, Reserved, PBE */;
index 3056702e81fbc553d3d7986889bc31ef4bb4bd3f..ff4fa51a5b1f5c0ddffcfa64ce72556b012b084b 100644 (file)
@@ -32,6 +32,7 @@
  */
 
 #include <linux/hash.h>
+#include <linux/init.h>
 
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
 
 static inline u32 crc32_u32(u32 crc, u32 val)
 {
+#ifdef CONFIG_AS_CRC32
        asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
+#else
+       asm (".byte 0xf2, 0x0f, 0x38, 0xf1, 0xc1" : "+a" (crc) : "c" (val));
+#endif
        return crc;
 }
 
@@ -49,19 +54,18 @@ static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
        u32 i, tmp = 0;
 
        for (i = 0; i < len / 4; i++)
-               seed = crc32_u32(*p32++, seed);
+               seed = crc32_u32(seed, *p32++);
 
-       switch (3 - (len & 0x03)) {
-       case 0:
+       switch (len & 3) {
+       case 3:
                tmp |= *((const u8 *) p32 + 2) << 16;
                /* fallthrough */
-       case 1:
+       case 2:
                tmp |= *((const u8 *) p32 + 1) << 8;
                /* fallthrough */
-       case 2:
+       case 1:
                tmp |= *((const u8 *) p32);
-               seed = crc32_u32(tmp, seed);
-       default:
+               seed = crc32_u32(seed, tmp);
                break;
        }
 
@@ -74,12 +78,12 @@ static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
        u32 i;
 
        for (i = 0; i < len; i++)
-               seed = crc32_u32(*p32++, seed);
+               seed = crc32_u32(seed, *p32++);
 
        return seed;
 }
 
-void setup_arch_fast_hash(struct fast_hash_ops *ops)
+void __init setup_arch_fast_hash(struct fast_hash_ops *ops)
 {
        if (cpu_has_xmm4_2) {
                ops->hash  = intel_crc4_2_hash;
index e78761d6b7f87811ea7f20feab53a72656547e1b..a404b4b7553319cd7d517355bf340784de344562 100644 (file)
@@ -4,7 +4,7 @@
 #undef memcpy
 #undef memset
 
-void *memcpy(void *to, const void *from, size_t n)
+__visible void *memcpy(void *to, const void *from, size_t n)
 {
 #ifdef CONFIG_X86_USE_3DNOW
        return __memcpy3d(to, from, n);
@@ -14,13 +14,13 @@ void *memcpy(void *to, const void *from, size_t n)
 }
 EXPORT_SYMBOL(memcpy);
 
-void *memset(void *s, int c, size_t count)
+__visible void *memset(void *s, int c, size_t count)
 {
        return __memset(s, c, count);
 }
 EXPORT_SYMBOL(memset);
 
-void *memmove(void *dest, const void *src, size_t n)
+__visible void *memmove(void *dest, const void *src, size_t n)
 {
        int d0,d1,d2,d3,d4,d5;
        char *ret = dest;
index 8f8eebdca7d4cadc249405e8a0c50108e34edc3b..db9db446b71a66fe5bd59de47232e8fa69e8c96e 100644 (file)
@@ -8,7 +8,7 @@ struct msr *msrs_alloc(void)
 
        msrs = alloc_percpu(struct msr);
        if (!msrs) {
-               pr_warning("%s: error allocating msrs\n", __func__);
+               pr_warn("%s: error allocating msrs\n", __func__);
                return NULL;
        }
 
@@ -21,3 +21,90 @@ void msrs_free(struct msr *msrs)
        free_percpu(msrs);
 }
 EXPORT_SYMBOL(msrs_free);
+
+/**
+ * Read an MSR with error handling
+ *
+ * @msr: MSR to read
+ * @m: value to read into
+ *
+ * It returns read data only on success, otherwise it doesn't change the output
+ * argument @m.
+ *
+ */
+int msr_read(u32 msr, struct msr *m)
+{
+       int err;
+       u64 val;
+
+       err = rdmsrl_safe(msr, &val);
+       if (!err)
+               m->q = val;
+
+       return err;
+}
+
+/**
+ * Write an MSR with error handling
+ *
+ * @msr: MSR to write
+ * @m: value to write
+ */
+int msr_write(u32 msr, struct msr *m)
+{
+       return wrmsrl_safe(msr, m->q);
+}
+
+static inline int __flip_bit(u32 msr, u8 bit, bool set)
+{
+       struct msr m, m1;
+       int err = -EINVAL;
+
+       if (bit > 63)
+               return err;
+
+       err = msr_read(msr, &m);
+       if (err)
+               return err;
+
+       m1 = m;
+       if (set)
+               m1.q |=  BIT_64(bit);
+       else
+               m1.q &= ~BIT_64(bit);
+
+       if (m1.q == m.q)
+               return 0;
+
+       err = msr_write(msr, &m);
+       if (err)
+               return err;
+
+       return 1;
+}
+
+/**
+ * Set @bit in a MSR @msr.
+ *
+ * Retval:
+ * < 0: An error was encountered.
+ * = 0: Bit was already set.
+ * > 0: Hardware accepted the MSR write.
+ */
+int msr_set_bit(u32 msr, u8 bit)
+{
+       return __flip_bit(msr, bit, true);
+}
+
+/**
+ * Clear @bit in a MSR @msr.
+ *
+ * Retval:
+ * < 0: An error was encountered.
+ * = 0: Bit was already cleared.
+ * > 0: Hardware accepted the MSR write.
+ */
+int msr_clear_bit(u32 msr, u8 bit)
+{
+       return __flip_bit(msr, bit, false);
+}
index 0002a3a33081c77134569c1872e5a70684dbd646..20621d753d5fb4564dddbf960999158a82bc0707 100644 (file)
@@ -30,6 +30,7 @@ struct pg_state {
        unsigned long start_address;
        unsigned long current_address;
        const struct addr_marker *marker;
+       bool to_dmesg;
 };
 
 struct addr_marker {
@@ -88,10 +89,28 @@ static struct addr_marker address_markers[] = {
 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
 #define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
 
+#define pt_dump_seq_printf(m, to_dmesg, fmt, args...)          \
+({                                                             \
+       if (to_dmesg)                                   \
+               printk(KERN_INFO fmt, ##args);                  \
+       else                                                    \
+               if (m)                                          \
+                       seq_printf(m, fmt, ##args);             \
+})
+
+#define pt_dump_cont_printf(m, to_dmesg, fmt, args...)         \
+({                                                             \
+       if (to_dmesg)                                   \
+               printk(KERN_CONT fmt, ##args);                  \
+       else                                                    \
+               if (m)                                          \
+                       seq_printf(m, fmt, ##args);             \
+})
+
 /*
  * Print a readable form of a pgprot_t to the seq_file
  */
-static void printk_prot(struct seq_file *m, pgprot_t prot, int level)
+static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
 {
        pgprotval_t pr = pgprot_val(prot);
        static const char * const level_name[] =
@@ -99,47 +118,47 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level)
 
        if (!pgprot_val(prot)) {
                /* Not present */
-               seq_printf(m, "                          ");
+               pt_dump_cont_printf(m, dmsg, "                          ");
        } else {
                if (pr & _PAGE_USER)
-                       seq_printf(m, "USR ");
+                       pt_dump_cont_printf(m, dmsg, "USR ");
                else
-                       seq_printf(m, "    ");
+                       pt_dump_cont_printf(m, dmsg, "    ");
                if (pr & _PAGE_RW)
-                       seq_printf(m, "RW ");
+                       pt_dump_cont_printf(m, dmsg, "RW ");
                else
-                       seq_printf(m, "ro ");
+                       pt_dump_cont_printf(m, dmsg, "ro ");
                if (pr & _PAGE_PWT)
-                       seq_printf(m, "PWT ");
+                       pt_dump_cont_printf(m, dmsg, "PWT ");
                else
-                       seq_printf(m, "    ");
+                       pt_dump_cont_printf(m, dmsg, "    ");
                if (pr & _PAGE_PCD)
-                       seq_printf(m, "PCD ");
+                       pt_dump_cont_printf(m, dmsg, "PCD ");
                else
-                       seq_printf(m, "    ");
+                       pt_dump_cont_printf(m, dmsg, "    ");
 
                /* Bit 9 has a different meaning on level 3 vs 4 */
                if (level <= 3) {
                        if (pr & _PAGE_PSE)
-                               seq_printf(m, "PSE ");
+                               pt_dump_cont_printf(m, dmsg, "PSE ");
                        else
-                               seq_printf(m, "    ");
+                               pt_dump_cont_printf(m, dmsg, "    ");
                } else {
                        if (pr & _PAGE_PAT)
-                               seq_printf(m, "pat ");
+                               pt_dump_cont_printf(m, dmsg, "pat ");
                        else
-                               seq_printf(m, "    ");
+                               pt_dump_cont_printf(m, dmsg, "    ");
                }
                if (pr & _PAGE_GLOBAL)
-                       seq_printf(m, "GLB ");
+                       pt_dump_cont_printf(m, dmsg, "GLB ");
                else
-                       seq_printf(m, "    ");
+                       pt_dump_cont_printf(m, dmsg, "    ");
                if (pr & _PAGE_NX)
-                       seq_printf(m, "NX ");
+                       pt_dump_cont_printf(m, dmsg, "NX ");
                else
-                       seq_printf(m, "x  ");
+                       pt_dump_cont_printf(m, dmsg, "x  ");
        }
-       seq_printf(m, "%s\n", level_name[level]);
+       pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
 }
 
 /*
@@ -178,7 +197,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                st->current_prot = new_prot;
                st->level = level;
                st->marker = address_markers;
-               seq_printf(m, "---[ %s ]---\n", st->marker->name);
+               pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
+                                  st->marker->name);
        } else if (prot != cur || level != st->level ||
                   st->current_address >= st->marker[1].start_address) {
                const char *unit = units;
@@ -188,17 +208,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                /*
                 * Now print the actual finished series
                 */
-               seq_printf(m, "0x%0*lx-0x%0*lx   ",
-                          width, st->start_address,
-                          width, st->current_address);
+               pt_dump_seq_printf(m, st->to_dmesg,  "0x%0*lx-0x%0*lx   ",
+                                  width, st->start_address,
+                                  width, st->current_address);
 
                delta = (st->current_address - st->start_address) >> 10;
                while (!(delta & 1023) && unit[1]) {
                        delta >>= 10;
                        unit++;
                }
-               seq_printf(m, "%9lu%c ", delta, *unit);
-               printk_prot(m, st->current_prot, st->level);
+               pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ", delta, *unit);
+               printk_prot(m, st->current_prot, st->level, st->to_dmesg);
 
                /*
                 * We print markers for special areas of address space,
@@ -207,7 +227,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
                 */
                if (st->current_address >= st->marker[1].start_address) {
                        st->marker++;
-                       seq_printf(m, "---[ %s ]---\n", st->marker->name);
+                       pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
+                                          st->marker->name);
                }
 
                st->start_address = st->current_address;
@@ -296,7 +317,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
 #define pgd_none(a)  pud_none(__pud(pgd_val(a)))
 #endif
 
-static void walk_pgd_level(struct seq_file *m)
+void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
 {
 #ifdef CONFIG_X86_64
        pgd_t *start = (pgd_t *) &init_level4_pgt;
@@ -304,9 +325,12 @@ static void walk_pgd_level(struct seq_file *m)
        pgd_t *start = swapper_pg_dir;
 #endif
        int i;
-       struct pg_state st;
+       struct pg_state st = {};
 
-       memset(&st, 0, sizeof(st));
+       if (pgd) {
+               start = pgd;
+               st.to_dmesg = true;
+       }
 
        for (i = 0; i < PTRS_PER_PGD; i++) {
                st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
@@ -331,7 +355,7 @@ static void walk_pgd_level(struct seq_file *m)
 
 static int ptdump_show(struct seq_file *m, void *v)
 {
-       walk_pgd_level(m);
+       ptdump_walk_pgd_level(m, NULL);
        return 0;
 }
 
index a10c8c79216187d2faa5add449762710d51c759b..8e57229926779eb9db2afad3e5b277def75d4e0a 100644 (file)
@@ -584,8 +584,13 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
 
        if (error_code & PF_INSTR) {
                unsigned int level;
+               pgd_t *pgd;
+               pte_t *pte;
 
-               pte_t *pte = lookup_address(address, &level);
+               pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
+               pgd += pgd_index(address);
+
+               pte = lookup_address_in_pgd(pgd, address, &level);
 
                if (pte && pte_present(*pte) && !pte_exec(*pte))
                        printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
index b3b19f46c0164c7169c9259a68836afbaeae1943..ae242a7c11c7473cfeb163b78d54fa62005b8e44 100644 (file)
@@ -126,8 +126,8 @@ within(unsigned long addr, unsigned long start, unsigned long end)
  * @vaddr:     virtual start address
  * @size:      number of bytes to flush
  *
- * clflush is an unordered instruction which needs fencing with mfence
- * to avoid ordering issues.
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues.
  */
 void clflush_cache_range(void *vaddr, unsigned int size)
 {
@@ -136,11 +136,11 @@ void clflush_cache_range(void *vaddr, unsigned int size)
        mb();
 
        for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
-               clflush(vaddr);
+               clflushopt(vaddr);
        /*
         * Flush any possible final partial cacheline:
         */
-       clflush(vend);
+       clflushopt(vend);
 
        mb();
 }
@@ -323,8 +323,12 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
        return prot;
 }
 
-static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
-                                     unsigned int *level)
+/*
+ * Lookup the page table entry for a virtual address in a specific pgd.
+ * Return a pointer to the entry and the level of the mapping.
+ */
+pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+                            unsigned int *level)
 {
        pud_t *pud;
        pmd_t *pmd;
@@ -365,7 +369,7 @@ static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
  */
 pte_t *lookup_address(unsigned long address, unsigned int *level)
 {
-        return __lookup_address_in_pgd(pgd_offset_k(address), address, level);
+        return lookup_address_in_pgd(pgd_offset_k(address), address, level);
 }
 EXPORT_SYMBOL_GPL(lookup_address);
 
@@ -373,7 +377,7 @@ static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
                                  unsigned int *level)
 {
         if (cpa->pgd)
-               return __lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+               return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
                                               address, level);
 
         return lookup_address(address, level);
@@ -692,6 +696,18 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
        return true;
 }
 
+static bool try_to_free_pud_page(pud_t *pud)
+{
+       int i;
+
+       for (i = 0; i < PTRS_PER_PUD; i++)
+               if (!pud_none(pud[i]))
+                       return false;
+
+       free_page((unsigned long)pud);
+       return true;
+}
+
 static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
 {
        pte_t *pte = pte_offset_kernel(pmd, start);
@@ -805,6 +821,16 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
         */
 }
 
+static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
+{
+       pgd_t *pgd_entry = root + pgd_index(addr);
+
+       unmap_pud_range(pgd_entry, addr, end);
+
+       if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
+               pgd_clear(pgd_entry);
+}
+
 static int alloc_pte_page(pmd_t *pmd)
 {
        pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -999,9 +1025,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
 static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
 {
        pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
-       bool allocd_pgd = false;
-       pgd_t *pgd_entry;
        pud_t *pud = NULL;      /* shut up gcc */
+       pgd_t *pgd_entry;
        int ret;
 
        pgd_entry = cpa->pgd + pgd_index(addr);
@@ -1015,7 +1040,6 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
                        return -1;
 
                set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
-               allocd_pgd = true;
        }
 
        pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
@@ -1023,19 +1047,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
 
        ret = populate_pud(cpa, addr, pgd_entry, pgprot);
        if (ret < 0) {
-               unmap_pud_range(pgd_entry, addr,
+               unmap_pgd_range(cpa->pgd, addr,
                                addr + (cpa->numpages << PAGE_SHIFT));
-
-               if (allocd_pgd) {
-                       /*
-                        * If I allocated this PUD page, I can just as well
-                        * free it in this error path.
-                        */
-                       pgd_clear(pgd_entry);
-                       free_page((unsigned long)pud);
-               }
                return ret;
        }
+
        cpa->numpages = ret;
        return 0;
 }
@@ -1377,10 +1393,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
        cache = cache_attr(mask_set);
 
        /*
-        * On success we use clflush, when the CPU supports it to
-        * avoid the wbindv. If the CPU does not support it and in the
+        * On success we use CLFLUSH, when the CPU supports it to
+        * avoid the WBINVD. If the CPU does not support it and in the
         * error case we fall back to cpa_flush_all (which uses
-        * wbindv):
+        * WBINVD):
         */
        if (!ret && cpu_has_clflush) {
                if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
@@ -1861,6 +1877,12 @@ out:
        return retval;
 }
 
+void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
+                              unsigned numpages)
+{
+       unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
+}
+
 /*
  * The testcases use internal knowledge of the implementation that shouldn't
  * be exposed to the rest of the kernel. Include these directly here.
index 1953e9c9391aecf6ae4cddb4d65baae045fd2911..66338a60aa6ef961c6017731b6fd6d9d169736e1 100644 (file)
@@ -52,12 +52,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
        int i, j;
 
        for (i = 0; i < slit->locality_count; i++) {
-               if (pxm_to_node(i) == NUMA_NO_NODE)
+               const int from_node = pxm_to_node(i);
+
+               if (from_node == NUMA_NO_NODE)
                        continue;
+
                for (j = 0; j < slit->locality_count; j++) {
-                       if (pxm_to_node(j) == NUMA_NO_NODE)
+                       const int to_node = pxm_to_node(j);
+
+                       if (to_node == NUMA_NO_NODE)
                                continue;
-                       numa_set_distance(pxm_to_node(i), pxm_to_node(j),
+
+                       numa_set_distance(from_node, to_node,
                                slit->entry[slit->locality_count * i + j]);
                }
        }
index b7b0b35c198127dfed4c73dae046665a42ba5561..d51045afcaaf5e386ca101d889d4462f197afa93 100644 (file)
@@ -1,3 +1,4 @@
 obj-$(CONFIG_EFI)              += efi.o efi_$(BITS).o efi_stub_$(BITS).o
 obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
 obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o
+obj-$(CONFIG_EFI_MIXED)                += efi_thunk_$(BITS).o
index b97acecf3fd95667e2b67bba8f88bb80428480bb..3781dd39e8bd55a03b8acd779113f40c151442b4 100644 (file)
@@ -68,9 +68,7 @@ struct efi_memory_map memmap;
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
-unsigned long x86_efi_facility;
-
-static __initdata efi_config_table_type_t arch_tables[] = {
+static efi_config_table_type_t arch_tables[] __initdata = {
 #ifdef CONFIG_X86_UV
        {UV_SYSTEM_TABLE_GUID, "UVsystab", &efi.uv_systab},
 #endif
@@ -79,16 +77,7 @@ static __initdata efi_config_table_type_t arch_tables[] = {
 
 u64 efi_setup;         /* efi setup_data physical address */
 
-/*
- * Returns 1 if 'facility' is enabled, 0 otherwise.
- */
-int efi_enabled(int facility)
-{
-       return test_bit(facility, &x86_efi_facility) != 0;
-}
-EXPORT_SYMBOL(efi_enabled);
-
-static bool __initdata disable_runtime = false;
+static bool disable_runtime __initdata = false;
 static int __init setup_noefi(char *arg)
 {
        disable_runtime = true;
@@ -257,27 +246,12 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
        return status;
 }
 
-static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
-                                            efi_time_cap_t *tc)
-{
-       unsigned long flags;
-       efi_status_t status;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-       efi_call_phys_prelog();
-       status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
-                               virt_to_phys(tc));
-       efi_call_phys_epilog();
-       spin_unlock_irqrestore(&rtc_lock, flags);
-       return status;
-}
-
 int efi_set_rtc_mmss(const struct timespec *now)
 {
        unsigned long nowtime = now->tv_sec;
-       efi_status_t    status;
-       efi_time_t      eft;
-       efi_time_cap_t  cap;
+       efi_status_t    status;
+       efi_time_t      eft;
+       efi_time_cap_t  cap;
        struct rtc_time tm;
 
        status = efi.get_time(&eft, &cap);
@@ -295,9 +269,8 @@ int efi_set_rtc_mmss(const struct timespec *now)
                eft.second = tm.tm_sec;
                eft.nanosecond = 0;
        } else {
-               printk(KERN_ERR
-                      "%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
-                      __FUNCTION__, nowtime);
+               pr_err("%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
+                      __func__, nowtime);
                return -1;
        }
 
@@ -413,8 +386,7 @@ static void __init print_efi_memmap(void)
             p < memmap.map_end;
             p += memmap.desc_size, i++) {
                md = p;
-               pr_info("mem%02u: type=%u, attr=0x%llx, "
-                       "range=[0x%016llx-0x%016llx) (%lluMB)\n",
+               pr_info("mem%02u: type=%u, attr=0x%llx, range=[0x%016llx-0x%016llx) (%lluMB)\n",
                        i, md->type, md->attribute, md->phys_addr,
                        md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
                        (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
@@ -446,9 +418,8 @@ void __init efi_reserve_boot_services(void)
                        memblock_is_region_reserved(start, size)) {
                        /* Could not reserve, skip it */
                        md->num_pages = 0;
-                       memblock_dbg("Could not reserve boot range "
-                                       "[0x%010llx-0x%010llx]\n",
-                                               start, start+size-1);
+                       memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
+                                    start, start+size-1);
                } else
                        memblock_reserve(start, size);
        }
@@ -456,7 +427,7 @@ void __init efi_reserve_boot_services(void)
 
 void __init efi_unmap_memmap(void)
 {
-       clear_bit(EFI_MEMMAP, &x86_efi_facility);
+       clear_bit(EFI_MEMMAP, &efi.flags);
        if (memmap.map) {
                early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
                memmap.map = NULL;
@@ -467,9 +438,6 @@ void __init efi_free_boot_services(void)
 {
        void *p;
 
-       if (!efi_is_native())
-               return;
-
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                efi_memory_desc_t *md = p;
                unsigned long long start = md->phys_addr;
@@ -584,45 +552,82 @@ static int __init efi_systab_init(void *phys)
                return -EINVAL;
        }
        if ((efi.systab->hdr.revision >> 16) == 0)
-               pr_err("Warning: System table version "
-                      "%d.%02d, expected 1.00 or greater!\n",
+               pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
                       efi.systab->hdr.revision >> 16,
                       efi.systab->hdr.revision & 0xffff);
 
+       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
        return 0;
 }
 
-static int __init efi_runtime_init(void)
+static int __init efi_runtime_init32(void)
 {
-       efi_runtime_services_t *runtime;
+       efi_runtime_services_32_t *runtime;
+
+       runtime = early_ioremap((unsigned long)efi.systab->runtime,
+                       sizeof(efi_runtime_services_32_t));
+       if (!runtime) {
+               pr_err("Could not map the runtime service table!\n");
+               return -ENOMEM;
+       }
 
        /*
-        * Check out the runtime services table. We need to map
-        * the runtime services table so that we can grab the physical
-        * address of several of the EFI runtime functions, needed to
-        * set the firmware into virtual mode.
+        * We will only need *early* access to the following two
+        * EFI runtime services before set_virtual_address_map
+        * is invoked.
         */
+       efi_phys.set_virtual_address_map =
+                       (efi_set_virtual_address_map_t *)
+                       (unsigned long)runtime->set_virtual_address_map;
+       early_iounmap(runtime, sizeof(efi_runtime_services_32_t));
+
+       return 0;
+}
+
+static int __init efi_runtime_init64(void)
+{
+       efi_runtime_services_64_t *runtime;
+
        runtime = early_ioremap((unsigned long)efi.systab->runtime,
-                               sizeof(efi_runtime_services_t));
+                       sizeof(efi_runtime_services_64_t));
        if (!runtime) {
                pr_err("Could not map the runtime service table!\n");
                return -ENOMEM;
        }
+
        /*
-        * We will only need *early* access to the following
-        * two EFI runtime services before set_virtual_address_map
+        * We will only need *early* access to the following two
+        * EFI runtime services before set_virtual_address_map
         * is invoked.
         */
-       efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
        efi_phys.set_virtual_address_map =
-               (efi_set_virtual_address_map_t *)
-               runtime->set_virtual_address_map;
+                       (efi_set_virtual_address_map_t *)
+                       (unsigned long)runtime->set_virtual_address_map;
+       early_iounmap(runtime, sizeof(efi_runtime_services_64_t));
+
+       return 0;
+}
+
+static int __init efi_runtime_init(void)
+{
+       int rv;
+
        /*
-        * Make efi_get_time can be called before entering
-        * virtual mode.
+        * Check out the runtime services table. We need to map
+        * the runtime services table so that we can grab the physical
+        * address of several of the EFI runtime functions, needed to
+        * set the firmware into virtual mode.
         */
-       efi.get_time = phys_efi_get_time;
-       early_iounmap(runtime, sizeof(efi_runtime_services_t));
+       if (efi_enabled(EFI_64BIT))
+               rv = efi_runtime_init64();
+       else
+               rv = efi_runtime_init32();
+
+       if (rv)
+               return rv;
+
+       set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 
        return 0;
 }
@@ -641,6 +646,8 @@ static int __init efi_memmap_init(void)
        if (add_efi_memmap)
                do_add_efi_memmap();
 
+       set_bit(EFI_MEMMAP, &efi.flags);
+
        return 0;
 }
 
@@ -723,7 +730,7 @@ void __init efi_init(void)
        if (efi_systab_init(efi_phys.systab))
                return;
 
-       set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
+       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
 
        efi.config_table = (unsigned long)efi.systab->tables;
        efi.fw_vendor    = (unsigned long)efi.systab->fw_vendor;
@@ -751,24 +758,21 @@ void __init efi_init(void)
        if (efi_config_init(arch_tables))
                return;
 
-       set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
-
        /*
         * Note: We currently don't support runtime services on an EFI
         * that doesn't match the kernel 32/64-bit mode.
         */
 
-       if (!efi_is_native())
+       if (!efi_runtime_supported())
                pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
        else {
                if (disable_runtime || efi_runtime_init())
                        return;
-               set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
        }
        if (efi_memmap_init())
                return;
 
-       set_bit(EFI_MEMMAP, &x86_efi_facility);
+       set_bit(EFI_MEMMAP, &efi.flags);
 
        print_efi_memmap();
 }
@@ -845,6 +849,22 @@ void __init old_map_region(efi_memory_desc_t *md)
                       (unsigned long long)md->phys_addr);
 }
 
+static void native_runtime_setup(void)
+{
+       efi.get_time = virt_efi_get_time;
+       efi.set_time = virt_efi_set_time;
+       efi.get_wakeup_time = virt_efi_get_wakeup_time;
+       efi.set_wakeup_time = virt_efi_set_wakeup_time;
+       efi.get_variable = virt_efi_get_variable;
+       efi.get_next_variable = virt_efi_get_next_variable;
+       efi.set_variable = virt_efi_set_variable;
+       efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
+       efi.reset_system = virt_efi_reset_system;
+       efi.query_variable_info = virt_efi_query_variable_info;
+       efi.update_capsule = virt_efi_update_capsule;
+       efi.query_capsule_caps = virt_efi_query_capsule_caps;
+}
+
 /* Merge contiguous regions of the same type and attribute */
 static void __init efi_merge_regions(void)
 {
@@ -892,8 +912,9 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
        }
 }
 
-static int __init save_runtime_map(void)
+static void __init save_runtime_map(void)
 {
+#ifdef CONFIG_KEXEC
        efi_memory_desc_t *md;
        void *tmp, *p, *q = NULL;
        int count = 0;
@@ -915,38 +936,44 @@ static int __init save_runtime_map(void)
        }
 
        efi_runtime_map_setup(q, count, memmap.desc_size);
+       return;
 
-       return 0;
 out:
        kfree(q);
-       return -ENOMEM;
+       pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
+#endif
 }
 
-/*
- * Map efi regions which were passed via setup_data. The virt_addr is a fixed
- * addr which was used in first kernel of a kexec boot.
- */
-static void __init efi_map_regions_fixed(void)
+static void *realloc_pages(void *old_memmap, int old_shift)
 {
-       void *p;
-       efi_memory_desc_t *md;
+       void *ret;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
-               efi_map_region_fixed(md); /* FIXME: add error handling */
-               get_systab_virt_addr(md);
-       }
+       ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
+       if (!ret)
+               goto out;
+
+       /*
+        * A first-time allocation doesn't have anything to copy.
+        */
+       if (!old_memmap)
+               return ret;
 
+       memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
+
+out:
+       free_pages((unsigned long)old_memmap, old_shift);
+       return ret;
 }
 
 /*
- * Map efi memory ranges for runtime serivce and update new_memmap with virtual
- * addresses.
+ * Map the efi memory ranges of the runtime services and update new_mmap with
+ * virtual addresses.
  */
-static void * __init efi_map_regions(int *count)
+static void * __init efi_map_regions(int *count, int *pg_shift)
 {
+       void *p, *new_memmap = NULL;
+       unsigned long left = 0;
        efi_memory_desc_t *md;
-       void *p, *tmp, *new_memmap = NULL;
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
                md = p;
@@ -961,20 +988,80 @@ static void * __init efi_map_regions(int *count)
                efi_map_region(md);
                get_systab_virt_addr(md);
 
-               tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size,
-                              GFP_KERNEL);
-               if (!tmp)
-                       goto out;
-               new_memmap = tmp;
+               if (left < memmap.desc_size) {
+                       new_memmap = realloc_pages(new_memmap, *pg_shift);
+                       if (!new_memmap)
+                               return NULL;
+
+                       left += PAGE_SIZE << *pg_shift;
+                       (*pg_shift)++;
+               }
+
                memcpy(new_memmap + (*count * memmap.desc_size), md,
                       memmap.desc_size);
+
+               left -= memmap.desc_size;
                (*count)++;
        }
 
        return new_memmap;
-out:
-       kfree(new_memmap);
-       return NULL;
+}
+
+static void __init kexec_enter_virtual_mode(void)
+{
+#ifdef CONFIG_KEXEC
+       efi_memory_desc_t *md;
+       void *p;
+
+       efi.systab = NULL;
+
+       /*
+        * We don't do virtual mode, since we don't do runtime services, on
+        * non-native EFI
+        */
+       if (!efi_is_native()) {
+               efi_unmap_memmap();
+               return;
+       }
+
+       /*
+       * Map efi regions which were passed via setup_data. The virt_addr is a
+       * fixed addr which was used in first kernel of a kexec boot.
+       */
+       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+               md = p;
+               efi_map_region_fixed(md); /* FIXME: add error handling */
+               get_systab_virt_addr(md);
+       }
+
+       save_runtime_map();
+
+       BUG_ON(!efi.systab);
+
+       efi_sync_low_kernel_mappings();
+
+       /*
+        * Now that EFI is in virtual mode, update the function
+        * pointers in the runtime service table to the new virtual addresses.
+        *
+        * Call EFI services through wrapper functions.
+        */
+       efi.runtime_version = efi_systab.hdr.revision;
+
+       native_runtime_setup();
+
+       efi.set_virtual_address_map = NULL;
+
+       if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
+               runtime_code_page_mkexec();
+
+       /* clean DUMMY object */
+       efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
+                        EFI_VARIABLE_NON_VOLATILE |
+                        EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                        EFI_VARIABLE_RUNTIME_ACCESS,
+                        0, NULL);
+#endif
 }
 
 /*
@@ -996,57 +1083,53 @@ out:
  *
  * Specially for kexec boot, efi runtime maps in previous kernel should
  * be passed in via setup_data. In that case runtime ranges will be mapped
- * to the same virtual addresses as the first kernel.
+ * to the same virtual addresses as the first kernel, see
+ * kexec_enter_virtual_mode().
  */
-void __init efi_enter_virtual_mode(void)
+static void __init __efi_enter_virtual_mode(void)
 {
-       efi_status_t status;
+       int count = 0, pg_shift = 0;
        void *new_memmap = NULL;
-       int err, count = 0;
+       efi_status_t status;
 
        efi.systab = NULL;
 
-       /*
-        * We don't do virtual mode, since we don't do runtime services, on
-        * non-native EFI
-        */
-       if (!efi_is_native()) {
-               efi_unmap_memmap();
+       efi_merge_regions();
+       new_memmap = efi_map_regions(&count, &pg_shift);
+       if (!new_memmap) {
+               pr_err("Error reallocating memory, EFI runtime non-functional!\n");
                return;
        }
 
-       if (efi_setup) {
-               efi_map_regions_fixed();
-       } else {
-               efi_merge_regions();
-               new_memmap = efi_map_regions(&count);
-               if (!new_memmap) {
-                       pr_err("Error reallocating memory, EFI runtime non-functional!\n");
-                       return;
-               }
-       }
-
-       err = save_runtime_map();
-       if (err)
-               pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
+       save_runtime_map();
 
        BUG_ON(!efi.systab);
 
-       efi_setup_page_tables();
+       if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift))
+               return;
+
        efi_sync_low_kernel_mappings();
+       efi_dump_pagetable();
 
-       if (!efi_setup) {
+       if (efi_is_native()) {
                status = phys_efi_set_virtual_address_map(
-                       memmap.desc_size * count,
-                       memmap.desc_size,
-                       memmap.desc_version,
-                       (efi_memory_desc_t *)__pa(new_memmap));
-
-               if (status != EFI_SUCCESS) {
-                       pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
-                                status);
-                       panic("EFI call to SetVirtualAddressMap() failed!");
-               }
+                               memmap.desc_size * count,
+                               memmap.desc_size,
+                               memmap.desc_version,
+                               (efi_memory_desc_t *)__pa(new_memmap));
+       } else {
+               status = efi_thunk_set_virtual_address_map(
+                               efi_phys.set_virtual_address_map,
+                               memmap.desc_size * count,
+                               memmap.desc_size,
+                               memmap.desc_version,
+                               (efi_memory_desc_t *)__pa(new_memmap));
+       }
+
+       if (status != EFI_SUCCESS) {
+               pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
+                        status);
+               panic("EFI call to SetVirtualAddressMap() failed!");
        }
 
        /*
@@ -1056,23 +1139,43 @@ void __init efi_enter_virtual_mode(void)
         * Call EFI services through wrapper functions.
         */
        efi.runtime_version = efi_systab.hdr.revision;
-       efi.get_time = virt_efi_get_time;
-       efi.set_time = virt_efi_set_time;
-       efi.get_wakeup_time = virt_efi_get_wakeup_time;
-       efi.set_wakeup_time = virt_efi_set_wakeup_time;
-       efi.get_variable = virt_efi_get_variable;
-       efi.get_next_variable = virt_efi_get_next_variable;
-       efi.set_variable = virt_efi_set_variable;
-       efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
-       efi.reset_system = virt_efi_reset_system;
+
+       if (efi_is_native())
+               native_runtime_setup();
+       else
+               efi_thunk_runtime_setup();
+
        efi.set_virtual_address_map = NULL;
-       efi.query_variable_info = virt_efi_query_variable_info;
-       efi.update_capsule = virt_efi_update_capsule;
-       efi.query_capsule_caps = virt_efi_query_capsule_caps;
 
        efi_runtime_mkexec();
 
-       kfree(new_memmap);
+       /*
+        * We mapped the descriptor array into the EFI pagetable above but we're
+        * not unmapping it here. Here's why:
+        *
+        * We're copying select PGDs from the kernel page table to the EFI page
+        * table and when we do so and make changes to those PGDs like unmapping
+        * stuff from them, those changes appear in the kernel page table and we
+        * go boom.
+        *
+        * From setup_real_mode():
+        *
+        * ...
+        * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+        *
+        * In this particular case, our allocation is in PGD 0 of the EFI page
+        * table but we've copied that PGD from PGD[272] of the EFI page table:
+        *
+        *      pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
+        *
+        * where the direct memory mapping in kernel space is.
+        *
+        * new_memmap's VA comes from that direct mapping and thus clearing it,
+        * it would get cleared in the kernel page table too.
+        *
+        * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
+        */
+       free_pages((unsigned long)new_memmap, pg_shift);
 
        /* clean DUMMY object */
        efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
@@ -1082,6 +1185,14 @@ void __init efi_enter_virtual_mode(void)
                         0, NULL);
 }
 
+void __init efi_enter_virtual_mode(void)
+{
+       if (efi_setup)
+               kexec_enter_virtual_mode();
+       else
+               __efi_enter_virtual_mode();
+}
+
 /*
  * Convenience functions to obtain memory types and attributes
  */
@@ -1119,9 +1230,8 @@ u64 efi_mem_attributes(unsigned long phys_addr)
 }
 
 /*
- * Some firmware has serious problems when using more than 50% of the EFI
- * variable store, i.e. it triggers bugs that can brick machines. Ensure that
- * we never use more than this safe limit.
+ * Some firmware implementations refuse to boot if there's insufficient space
+ * in the variable store. Ensure that we never use more than a safe limit.
  *
  * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
  * store.
@@ -1140,10 +1250,9 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
                return status;
 
        /*
-        * Some firmware implementations refuse to boot if there's insufficient
-        * space in the variable store. We account for that by refusing the
-        * write if permitting it would reduce the available space to under
-        * 5KB. This figure was provided by Samsung, so should be safe.
+        * We account for that by refusing the write if permitting it would
+        * reduce the available space to under 5KB. This figure was provided by
+        * Samsung, so should be safe.
         */
        if ((remaining_size - size < EFI_MIN_RESERVE) &&
                !efi_no_storage_paranoia) {
@@ -1206,7 +1315,7 @@ static int __init parse_efi_cmdline(char *str)
                str++;
 
        if (!strncmp(str, "old_map", 7))
-               set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+               set_bit(EFI_OLD_MEMMAP, &efi.flags);
 
        return 0;
 }
@@ -1219,7 +1328,7 @@ void __init efi_apply_memmap_quirks(void)
         * firmware/kernel architectures since there is no support for runtime
         * services.
         */
-       if (!efi_is_native()) {
+       if (!efi_runtime_supported()) {
                pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
                efi_unmap_memmap();
        }
@@ -1228,5 +1337,5 @@ void __init efi_apply_memmap_quirks(void)
         * UV doesn't support the new EFI pagetable mapping yet.
         */
        if (is_uv_system())
-               set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+               set_bit(EFI_OLD_MEMMAP, &efi.flags);
 }
index 0b74cdf7f816aa0e4e6f26c020821266f51aefdb..9ee3491e31fbab7f6643462395d31c6f9f36f1b4 100644 (file)
 static unsigned long efi_rt_eflags;
 
 void efi_sync_low_kernel_mappings(void) {}
-void efi_setup_page_tables(void) {}
+void __init efi_dump_pagetable(void) {}
+int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+{
+       return 0;
+}
+void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) {}
 
 void __init efi_map_region(efi_memory_desc_t *md)
 {
index 0c2a234fef1e48794a14e13aad812a5b468c6605..290d397e1dd9125e408a1ee140fe4d3ced51a33d 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 #include <asm/realmode.h>
+#include <asm/time.h>
 
 static pgd_t *save_pgd __initdata;
 static unsigned long efi_flags __initdata;
@@ -58,7 +59,8 @@ struct efi_scratch {
        u64 prev_cr3;
        pgd_t *efi_pgt;
        bool use_pgd;
-};
+       u64 phys_stack;
+} __packed;
 
 static void __init early_code_mapping_set_exec(int executable)
 {
@@ -137,12 +139,64 @@ void efi_sync_low_kernel_mappings(void)
                sizeof(pgd_t) * num_pgds);
 }
 
-void efi_setup_page_tables(void)
+int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
+       unsigned long text;
+       struct page *page;
+       unsigned npages;
+       pgd_t *pgd;
+
+       if (efi_enabled(EFI_OLD_MEMMAP))
+               return 0;
+
        efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
+       pgd = __va(efi_scratch.efi_pgt);
 
-       if (!efi_enabled(EFI_OLD_MEMMAP))
-               efi_scratch.use_pgd = true;
+       /*
+        * It can happen that the physical address of new_memmap lands in memory
+        * which is not mapped in the EFI page table. Therefore we need to go
+        * and ident-map those pages containing the map before calling
+        * phys_efi_set_virtual_address_map().
+        */
+       if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
+               pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
+               return 1;
+       }
+
+       efi_scratch.use_pgd = true;
+
+       /*
+        * When making calls to the firmware everything needs to be 1:1
+        * mapped and addressable with 32-bit pointers. Map the kernel
+        * text and allocate a new stack because we can't rely on the
+        * stack pointer being < 4GB.
+        */
+       if (!IS_ENABLED(CONFIG_EFI_MIXED))
+               return 0;
+
+       page = alloc_page(GFP_KERNEL|__GFP_DMA32);
+       if (!page)
+               panic("Unable to allocate EFI runtime stack < 4GB\n");
+
+       efi_scratch.phys_stack = virt_to_phys(page_address(page));
+       efi_scratch.phys_stack += PAGE_SIZE; /* stack grows down */
+
+       npages = (_end - _text) >> PAGE_SHIFT;
+       text = __pa(_text);
+
+       if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
+               pr_err("Failed to map kernel text 1:1\n");
+               return 1;
+       }
+
+       return 0;
+}
+
+void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+{
+       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+
+       kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
 }
 
 static void __init __map_region(efi_memory_desc_t *md, u64 va)
@@ -173,6 +227,16 @@ void __init efi_map_region(efi_memory_desc_t *md)
         */
        __map_region(md, md->phys_addr);
 
+       /*
+        * Enforce the 1:1 mapping as the default virtual address when
+        * booting in EFI mixed mode, because even though we may be
+        * running a 64-bit kernel, the firmware may only be 32-bit.
+        */
+       if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
+               md->virt_addr = md->phys_addr;
+               return;
+       }
+
        efi_va -= size;
 
        /* Is PA 2M-aligned? */
@@ -242,3 +306,299 @@ void __init efi_runtime_mkexec(void)
        if (__supported_pte_mask & _PAGE_NX)
                runtime_code_page_mkexec();
 }
+
+void __init efi_dump_pagetable(void)
+{
+#ifdef CONFIG_EFI_PGT_DUMP
+       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+
+       ptdump_walk_pgd_level(NULL, pgd);
+#endif
+}
+
+#ifdef CONFIG_EFI_MIXED
+extern efi_status_t efi64_thunk(u32, ...);
+
+#define runtime_service32(func)                                                 \
+({                                                                      \
+       u32 table = (u32)(unsigned long)efi.systab;                      \
+       u32 *rt, *___f;                                                  \
+                                                                        \
+       rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime));  \
+       ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
+       *___f;                                                           \
+})
+
+/*
+ * Switch to the EFI page tables early so that we can access the 1:1
+ * runtime services mappings which are not mapped in any other page
+ * tables. This function must be called before runtime_service32().
+ *
+ * Also, disable interrupts because the IDT points to 64-bit handlers,
+ * which aren't going to function correctly when we switch to 32-bit.
+ */
+#define efi_thunk(f, ...)                                              \
+({                                                                     \
+       efi_status_t __s;                                               \
+       unsigned long flags;                                            \
+       u32 func;                                                       \
+                                                                       \
+       efi_sync_low_kernel_mappings();                                 \
+       local_irq_save(flags);                                          \
+                                                                       \
+       efi_scratch.prev_cr3 = read_cr3();                              \
+       write_cr3((unsigned long)efi_scratch.efi_pgt);                  \
+       __flush_tlb_all();                                              \
+                                                                       \
+       func = runtime_service32(f);                                    \
+       __s = efi64_thunk(func, __VA_ARGS__);                   \
+                                                                       \
+       write_cr3(efi_scratch.prev_cr3);                                \
+       __flush_tlb_all();                                              \
+       local_irq_restore(flags);                                       \
+                                                                       \
+       __s;                                                            \
+})
+
+efi_status_t efi_thunk_set_virtual_address_map(
+       void *phys_set_virtual_address_map,
+       unsigned long memory_map_size,
+       unsigned long descriptor_size,
+       u32 descriptor_version,
+       efi_memory_desc_t *virtual_map)
+{
+       efi_status_t status;
+       unsigned long flags;
+       u32 func;
+
+       efi_sync_low_kernel_mappings();
+       local_irq_save(flags);
+
+       efi_scratch.prev_cr3 = read_cr3();
+       write_cr3((unsigned long)efi_scratch.efi_pgt);
+       __flush_tlb_all();
+
+       func = (u32)(unsigned long)phys_set_virtual_address_map;
+       status = efi64_thunk(func, memory_map_size, descriptor_size,
+                            descriptor_version, virtual_map);
+
+       write_cr3(efi_scratch.prev_cr3);
+       __flush_tlb_all();
+       local_irq_restore(flags);
+
+       return status;
+}
+
+static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+{
+       efi_status_t status;
+       u32 phys_tm, phys_tc;
+
+       spin_lock(&rtc_lock);
+
+       phys_tm = virt_to_phys(tm);
+       phys_tc = virt_to_phys(tc);
+
+       status = efi_thunk(get_time, phys_tm, phys_tc);
+
+       spin_unlock(&rtc_lock);
+
+       return status;
+}
+
+static efi_status_t efi_thunk_set_time(efi_time_t *tm)
+{
+       efi_status_t status;
+       u32 phys_tm;
+
+       spin_lock(&rtc_lock);
+
+       phys_tm = virt_to_phys(tm);
+
+       status = efi_thunk(set_time, phys_tm);
+
+       spin_unlock(&rtc_lock);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
+                         efi_time_t *tm)
+{
+       efi_status_t status;
+       u32 phys_enabled, phys_pending, phys_tm;
+
+       spin_lock(&rtc_lock);
+
+       phys_enabled = virt_to_phys(enabled);
+       phys_pending = virt_to_phys(pending);
+       phys_tm = virt_to_phys(tm);
+
+       status = efi_thunk(get_wakeup_time, phys_enabled,
+                            phys_pending, phys_tm);
+
+       spin_unlock(&rtc_lock);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+{
+       efi_status_t status;
+       u32 phys_tm;
+
+       spin_lock(&rtc_lock);
+
+       phys_tm = virt_to_phys(tm);
+
+       status = efi_thunk(set_wakeup_time, enabled, phys_tm);
+
+       spin_unlock(&rtc_lock);
+
+       return status;
+}
+
+
+static efi_status_t
+efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+                      u32 *attr, unsigned long *data_size, void *data)
+{
+       efi_status_t status;
+       u32 phys_name, phys_vendor, phys_attr;
+       u32 phys_data_size, phys_data;
+
+       phys_data_size = virt_to_phys(data_size);
+       phys_vendor = virt_to_phys(vendor);
+       phys_name = virt_to_phys(name);
+       phys_attr = virt_to_phys(attr);
+       phys_data = virt_to_phys(data);
+
+       status = efi_thunk(get_variable, phys_name, phys_vendor,
+                          phys_attr, phys_data_size, phys_data);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+                      u32 attr, unsigned long data_size, void *data)
+{
+       u32 phys_name, phys_vendor, phys_data;
+       efi_status_t status;
+
+       phys_name = virt_to_phys(name);
+       phys_vendor = virt_to_phys(vendor);
+       phys_data = virt_to_phys(data);
+
+       /* If data_size is > sizeof(u32) we've got problems */
+       status = efi_thunk(set_variable, phys_name, phys_vendor,
+                          attr, data_size, phys_data);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_get_next_variable(unsigned long *name_size,
+                           efi_char16_t *name,
+                           efi_guid_t *vendor)
+{
+       efi_status_t status;
+       u32 phys_name_size, phys_name, phys_vendor;
+
+       phys_name_size = virt_to_phys(name_size);
+       phys_vendor = virt_to_phys(vendor);
+       phys_name = virt_to_phys(name);
+
+       status = efi_thunk(get_next_variable, phys_name_size,
+                          phys_name, phys_vendor);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_get_next_high_mono_count(u32 *count)
+{
+       efi_status_t status;
+       u32 phys_count;
+
+       phys_count = virt_to_phys(count);
+       status = efi_thunk(get_next_high_mono_count, phys_count);
+
+       return status;
+}
+
+static void
+efi_thunk_reset_system(int reset_type, efi_status_t status,
+                      unsigned long data_size, efi_char16_t *data)
+{
+       u32 phys_data;
+
+       phys_data = virt_to_phys(data);
+
+       efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+}
+
+static efi_status_t
+efi_thunk_update_capsule(efi_capsule_header_t **capsules,
+                        unsigned long count, unsigned long sg_list)
+{
+       /*
+        * To properly support this function we would need to repackage
+        * 'capsules' because the firmware doesn't understand 64-bit
+        * pointers.
+        */
+       return EFI_UNSUPPORTED;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
+                             u64 *remaining_space,
+                             u64 *max_variable_size)
+{
+       efi_status_t status;
+       u32 phys_storage, phys_remaining, phys_max;
+
+       if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+               return EFI_UNSUPPORTED;
+
+       phys_storage = virt_to_phys(storage_space);
+       phys_remaining = virt_to_phys(remaining_space);
+       phys_max = virt_to_phys(max_variable_size);
+
+       status = efi_thunk(query_variable_info, attr, phys_storage,
+                          phys_remaining, phys_max);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
+                            unsigned long count, u64 *max_size,
+                            int *reset_type)
+{
+       /*
+        * To properly support this function we would need to repackage
+        * 'capsules' because the firmware doesn't understand 64-bit
+        * pointers.
+        */
+       return EFI_UNSUPPORTED;
+}
+
+void efi_thunk_runtime_setup(void)
+{
+       efi.get_time = efi_thunk_get_time;
+       efi.set_time = efi_thunk_set_time;
+       efi.get_wakeup_time = efi_thunk_get_wakeup_time;
+       efi.set_wakeup_time = efi_thunk_set_wakeup_time;
+       efi.get_variable = efi_thunk_get_variable;
+       efi.get_next_variable = efi_thunk_get_next_variable;
+       efi.set_variable = efi_thunk_set_variable;
+       efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
+       efi.reset_system = efi_thunk_reset_system;
+       efi.query_variable_info = efi_thunk_query_variable_info;
+       efi.update_capsule = efi_thunk_update_capsule;
+       efi.query_capsule_caps = efi_thunk_query_capsule_caps;
+}
+#endif /* CONFIG_EFI_MIXED */
index 88073b1402988b49eb6c5e95f9f801a14d40f144..e0984ef0374b87a4dcf9d770b812feff2825285d 100644 (file)
@@ -7,6 +7,10 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/msr.h>
+#include <asm/processor-flags.h>
+#include <asm/page_types.h>
 
 #define SAVE_XMM                       \
        mov %rsp, %rax;                 \
@@ -164,7 +168,169 @@ ENTRY(efi_call6)
        ret
 ENDPROC(efi_call6)
 
+#ifdef CONFIG_EFI_MIXED
+
+/*
+ * We run this function from the 1:1 mapping.
+ *
+ * This function must be invoked with a 1:1 mapped stack.
+ */
+ENTRY(__efi64_thunk)
+       movl    %ds, %eax
+       push    %rax
+       movl    %es, %eax
+       push    %rax
+       movl    %ss, %eax
+       push    %rax
+
+       subq    $32, %rsp
+       movl    %esi, 0x0(%rsp)
+       movl    %edx, 0x4(%rsp)
+       movl    %ecx, 0x8(%rsp)
+       movq    %r8, %rsi
+       movl    %esi, 0xc(%rsp)
+       movq    %r9, %rsi
+       movl    %esi,  0x10(%rsp)
+
+       sgdt    save_gdt(%rip)
+
+       leaq    1f(%rip), %rbx
+       movq    %rbx, func_rt_ptr(%rip)
+
+       /* Switch to gdt with 32-bit segments */
+       movl    64(%rsp), %eax
+       lgdt    (%rax)
+
+       leaq    efi_enter32(%rip), %rax
+       pushq   $__KERNEL_CS
+       pushq   %rax
+       lretq
+
+1:     addq    $32, %rsp
+
+       lgdt    save_gdt(%rip)
+
+       pop     %rbx
+       movl    %ebx, %ss
+       pop     %rbx
+       movl    %ebx, %es
+       pop     %rbx
+       movl    %ebx, %ds
+
+       /*
+        * Convert 32-bit status code into 64-bit.
+        */
+       test    %rax, %rax
+       jz      1f
+       movl    %eax, %ecx
+       andl    $0x0fffffff, %ecx
+       andl    $0xf0000000, %eax
+       shl     $32, %rax
+       or      %rcx, %rax
+1:
+       ret
+ENDPROC(__efi64_thunk)
+
+ENTRY(efi_exit32)
+       movq    func_rt_ptr(%rip), %rax
+       push    %rax
+       mov     %rdi, %rax
+       ret
+ENDPROC(efi_exit32)
+
+       .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    %eax, %ss
+
+       /* Reload pgtables */
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       /* Disable paging */
+       movl    %cr0, %eax
+       btrl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+
+       /* Disable long mode via EFER */
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btrl    $_EFER_LME, %eax
+       wrmsr
+
+       call    *%edi
+
+       /* We must preserve return value */
+       movl    %eax, %edi
+
+       /*
+        * Some firmware will return with interrupts enabled. Be sure to
+        * disable them before we switch GDTs.
+        */
+       cli
+
+       movl    68(%esp), %eax
+       movl    %eax, 2(%eax)
+       lgdtl   (%eax)
+
+       movl    %cr4, %eax
+       btsl    $(X86_CR4_PAE_BIT), %eax
+       movl    %eax, %cr4
+
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btsl    $_EFER_LME, %eax
+       wrmsr
+
+       xorl    %eax, %eax
+       lldt    %ax
+
+       movl    72(%esp), %eax
+       pushl   $__KERNEL_CS
+       pushl   %eax
+
+       /* Enable paging */
+       movl    %cr0, %eax
+       btsl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+       lret
+ENDPROC(efi_enter32)
+
+       .data
+       .balign 8
+       .global efi32_boot_gdt
+efi32_boot_gdt:        .word   0
+               .quad   0
+
+save_gdt:      .word   0
+               .quad   0
+func_rt_ptr:   .quad   0
+
+       .global efi_gdt64
+efi_gdt64:
+       .word   efi_gdt64_end - efi_gdt64
+       .long   0                       /* Filled out by user */
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
+       .quad   0x0080890000000000      /* TS descriptor */
+       .quad   0x0000000000000000      /* TS continued */
+efi_gdt64_end:
+#endif /* CONFIG_EFI_MIXED */
+
        .data
 ENTRY(efi_scratch)
        .fill 3,8,0
        .byte 0
+       .quad 0
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
new file mode 100644 (file)
index 0000000..8806fa7
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+       .text
+       .code64
+ENTRY(efi64_thunk)
+       push    %rbp
+       push    %rbx
+
+       /*
+        * Switch to 1:1 mapped 32-bit stack pointer.
+        */
+       movq    %rsp, efi_saved_sp(%rip)
+       movq    efi_scratch+25(%rip), %rsp
+
+       /*
+        * Calculate the physical address of the kernel text.
+        */
+       movq    $__START_KERNEL_map, %rax
+       subq    phys_base(%rip), %rax
+
+       /*
+        * Push some physical addresses onto the stack. This is easier
+        * to do now in a code64 section while the assembler can address
+        * 64-bit values. Note that all the addresses on the stack are
+        * 32-bit.
+        */
+       subq    $16, %rsp
+       leaq    efi_exit32(%rip), %rbx
+       subq    %rax, %rbx
+       movl    %ebx, 8(%rsp)
+       leaq    efi_gdt64(%rip), %rbx
+       subq    %rax, %rbx
+       movl    %ebx, 2(%ebx)
+       movl    %ebx, 4(%rsp)
+       leaq    efi_gdt32(%rip), %rbx
+       subq    %rax, %rbx
+       movl    %ebx, 2(%ebx)
+       movl    %ebx, (%rsp)
+
+       leaq    __efi64_thunk(%rip), %rbx
+       subq    %rax, %rbx
+       call    *%rbx
+
+       movq    efi_saved_sp(%rip), %rsp
+       pop     %rbx
+       pop     %rbp
+       retq
+ENDPROC(efi64_thunk)
+
+       .data
+efi_gdt32:
+       .word   efi_gdt32_end - efi_gdt32
+       .long   0                       /* Filled out above */
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+       .quad   0x00cf9a000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
+efi_gdt32_end:
+
+efi_saved_sp:          .quad 0
index 39febb214e8c748ffd245c409c3b0cea31d3cebd..9471b9456f259762baf47c05a742d8401146b81e 100644 (file)
@@ -88,7 +88,7 @@ struct ts5500_sbc {
 static const struct {
        const char * const string;
        const ssize_t offset;
-} ts5500_signatures[] __initdata = {
+} ts5500_signatures[] __initconst = {
        { "TS-5x00 AMD Elan", 0xb14 },
 };
 
index fd14be1d1472204afc041a5bef320b2f7c7275af..9206ac7961a596798a1302b808bcf1fd6f119272 100644 (file)
@@ -2,6 +2,8 @@
 # Building vDSO images for x86.
 #
 
+KBUILD_CFLAGS += $(DISABLE_LTO)
+
 VDSO64-$(CONFIG_X86_64)                := y
 VDSOX32-$(CONFIG_X86_X32_ABI)  := y
 VDSO32-$(CONFIG_X86_32)                := y
@@ -35,7 +37,8 @@ export CPPFLAGS_vdso.lds += -P -C
 
 VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
                        -Wl,--no-undefined \
-                       -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+                       -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
+                       $(DISABLE_LTO)
 
 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
 
@@ -127,7 +130,7 @@ vdso32.so-$(VDSO32-y)               += sysenter
 vdso32-images                  = $(vdso32.so-y:%=vdso32-%.so)
 
 CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
 
 # This makes sure the $(obj) subdirectory exists even though vdso32/
 # is not a kbuild sub-make subdirectory.
@@ -181,7 +184,8 @@ quiet_cmd_vdso = VDSO    $@
                       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+               $(LTO_CFLAGS)
 GCOV_PROFILE := n
 
 #
index 256282e7888b118b02e61d657f78ae8490bf0fe4..2423ef04ffea596fd43eeb918f290003277fbb21 100644 (file)
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 /* Assume pteval_t is equivalent to all the other *val_t types. */
 static pteval_t pte_mfn_to_pfn(pteval_t val)
 {
-       if (pteval_present(val)) {
+       if (val & _PAGE_PRESENT) {
                unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
                unsigned long pfn = mfn_to_pfn(mfn);
 
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
 
 static pteval_t pte_pfn_to_mfn(pteval_t val)
 {
-       if (pteval_present(val)) {
+       if (val & _PAGE_PRESENT) {
                unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
                pteval_t flags = val & PTE_FLAGS_MASK;
                unsigned long mfn;
index 581521c843a576d4264567e90c11dfaf645d6238..4d3acc34a998e5aca97d58c9573e822a29905626 100644 (file)
@@ -183,7 +183,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 
        local_irq_save(flags);
 
-       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+       kstat_incr_irq_this_cpu(irq);
 out:
        cpumask_clear_cpu(cpu, &waiting_cpus);
        w->lock = NULL;
index 0a337e4a8370aefce9d310c47bf75053316a1d5a..c3d20ba6eb86d6f7d42102f076f6085bcbe096c8 100644 (file)
@@ -9,6 +9,7 @@ generic-y += errno.h
 generic-y += exec.h
 generic-y += fcntl.h
 generic-y += hardirq.h
+generic-y += hash.h
 generic-y += ioctl.h
 generic-y += irq_regs.h
 generic-y += kdebug.h
@@ -17,7 +18,9 @@ generic-y += kvm_para.h
 generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
+generic-y += mcs_spinlock.h
 generic-y += percpu.h
+generic-y += preempt.h
 generic-y += resource.h
 generic-y += scatterlist.h
 generic-y += sections.h
@@ -27,5 +30,3 @@ generic-y += termios.h
 generic-y += topology.h
 generic-y += trace_clock.h
 generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
index 482868a2de6ebde7995002509445859bf7a002b0..3eee94f621ebd723195d326965cb6afc6998c85f 100644 (file)
@@ -155,18 +155,6 @@ void __init init_IRQ(void)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_chip *chip = irq_data_get_irq_chip(data);
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       if (chip->irq_set_affinity)
-               chip->irq_set_affinity(data, cpumask_of(cpu), false);
-       raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
 /*
  * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
  * the affinity settings do not allow other CPUs, force them onto any
@@ -175,10 +163,9 @@ static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
 void migrate_irqs(void)
 {
        unsigned int i, cpu = smp_processor_id();
-       struct irq_desc *desc;
 
-       for_each_irq_desc(i, desc) {
-               struct irq_data *data = irq_desc_get_irq_data(desc);
+       for_each_active_irq(i) {
+               struct irq_data *data = irq_get_irq_data(i);
                unsigned int newcpu;
 
                if (irqd_is_per_cpu(data))
@@ -194,11 +181,8 @@ void migrate_irqs(void)
                                            i, cpu);
 
                        cpumask_setall(data->affinity);
-                       newcpu = cpumask_any_and(data->affinity,
-                                                cpu_online_mask);
                }
-
-               route_irq(data, i, newcpu);
+               irq_set_affinity(i, data->affinity);
        }
 }
 #endif /* CONFIG_HOTPLUG_CPU */
index 4770de5707b9b9eb0a4c27f14c7ec9ad13035257..c205653e96447b12ab9ed163715bf9d64c0cd230 100644 (file)
@@ -43,19 +43,6 @@ config ACPI_SLEEP
        depends on SUSPEND || HIBERNATION
        default y
 
-config ACPI_PROCFS
-       bool "Deprecated /proc/acpi files"
-       depends on PROC_FS
-       help
-         For backwards compatibility, this option allows
-         deprecated /proc/acpi/ files to exist, even when
-         they have been replaced by functions in /sys.
-
-         This option has no effect on /proc/acpi/ files
-         and functions which do not yet exist in /sys.
-
-         Say N to delete /proc/acpi/ files that have moved to /sys/
-
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
@@ -115,7 +102,7 @@ config ACPI_BUTTON
 
 config ACPI_VIDEO
        tristate "Video"
-       depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
+       depends on X86 && BACKLIGHT_CLASS_DEVICE
        depends on INPUT
        select THERMAL
        help
@@ -343,6 +330,19 @@ config ACPI_BGRT
          data from the firmware boot splash. It will appear under
          /sys/firmware/acpi/bgrt/ .
 
+config ACPI_REDUCED_HARDWARE_ONLY
+       bool "Hardware-reduced ACPI support only" if EXPERT
+       def_bool n
+       depends on ACPI
+       help
+       This config item changes the way the ACPI code is built.  When this
+       option is selected, the kernel will use a specialized version of
+       ACPICA that ONLY supports the ACPI "reduced hardware" mode.  The
+       resulting kernel will be smaller but it will also be restricted to
+       running in ACPI reduced hardware mode ONLY.
+
+       If you are unsure what to do, do not enable this option.
+
 source "drivers/acpi/apei/Kconfig"
 
 config ACPI_EXTLOG
index 6f190bc2b8b784bf7e09425b77c560fa60f04b90..2c01c1da29ce39f637136a12a512f2a473333233 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/acpi.h>
+#include "battery.h"
 
 #define PREFIX "ACPI: "
 
@@ -57,6 +58,7 @@ struct acpi_ac {
        struct power_supply charger;
        struct platform_device *pdev;
        unsigned long long state;
+       struct notifier_block battery_nb;
 };
 
 #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
@@ -152,6 +154,26 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
        return;
 }
 
+static int acpi_ac_battery_notify(struct notifier_block *nb,
+                                 unsigned long action, void *data)
+{
+       struct acpi_ac *ac = container_of(nb, struct acpi_ac, battery_nb);
+       struct acpi_bus_event *event = (struct acpi_bus_event *)data;
+
+       /*
+        * On HP Pavilion dv6-6179er AC status notifications aren't triggered
+        * when adapter is plugged/unplugged. However, battery status
+        * notifcations are triggered when battery starts charging or
+        * discharging. Re-reading AC status triggers lost AC notifications,
+        * if AC status has changed.
+        */
+       if (strcmp(event->device_class, ACPI_BATTERY_CLASS) == 0 &&
+           event->type == ACPI_BATTERY_NOTIFY_STATUS)
+               acpi_ac_get_state(ac);
+
+       return NOTIFY_OK;
+}
+
 static int thinkpad_e530_quirk(const struct dmi_system_id *d)
 {
        ac_sleep_before_get_state_ms = 1000;
@@ -215,6 +237,8 @@ static int acpi_ac_probe(struct platform_device *pdev)
               acpi_device_name(adev), acpi_device_bid(adev),
               ac->state ? "on-line" : "off-line");
 
+       ac->battery_nb.notifier_call = acpi_ac_battery_notify;
+       register_acpi_notifier(&ac->battery_nb);
 end:
        if (result)
                kfree(ac);
@@ -261,6 +285,7 @@ static int acpi_ac_remove(struct platform_device *pdev)
        ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
+       unregister_acpi_notifier(&ac->battery_nb);
 
        kfree(ac);
 
index 84190ed89c04bb74f1a69d0c8d72fe23efbfacc2..961b45d18a5de484243da61ee0e40625956c3265 100644 (file)
@@ -18,8 +18,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 ACPI_MODULE_NAME("cmos rtc");
 
 static const struct acpi_device_id acpi_cmos_rtc_ids[] = {
index 6745fe137b9ea541ae729429035eaeca8fc8fc07..69e29f409d4c69952bf3655daeec25bbfeff085e 100644 (file)
@@ -33,6 +33,13 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_GENERAL_UART_RTS_OVRD     BIT(3)
 #define LPSS_SW_LTR                    0x10
 #define LPSS_AUTO_LTR                  0x14
+#define LPSS_LTR_SNOOP_REQ             BIT(15)
+#define LPSS_LTR_SNOOP_MASK            0x0000FFFF
+#define LPSS_LTR_SNOOP_LAT_1US         0x800
+#define LPSS_LTR_SNOOP_LAT_32US                0xC00
+#define LPSS_LTR_SNOOP_LAT_SHIFT       5
+#define LPSS_LTR_SNOOP_LAT_CUTOFF      3000
+#define LPSS_LTR_MAX_VAL               0x3FF
 #define LPSS_TX_INT                    0x20
 #define LPSS_TX_INT_MASK               BIT(1)
 
@@ -102,6 +109,16 @@ static struct lpss_device_desc lpt_sdio_dev_desc = {
        .ltr_required = true,
 };
 
+static struct lpss_shared_clock pwm_clock = {
+       .name = "pwm_clk",
+       .rate = 25000000,
+};
+
+static struct lpss_device_desc byt_pwm_dev_desc = {
+       .clk_required = true,
+       .shared_clock = &pwm_clock,
+};
+
 static struct lpss_shared_clock uart_clock = {
        .name = "uart_clk",
        .rate = 44236800,
@@ -157,6 +174,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
        { "INT33C7", },
 
        /* BayTrail LPSS devices */
+       { "80860F09", (unsigned long)&byt_pwm_dev_desc },
        { "80860F0A", (unsigned long)&byt_uart_dev_desc },
        { "80860F0E", (unsigned long)&byt_spi_dev_desc },
        { "80860F14", (unsigned long)&byt_sdio_dev_desc },
@@ -315,6 +333,17 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        return ret;
 }
 
+static u32 __lpss_reg_read(struct lpss_private_data *pdata, unsigned int reg)
+{
+       return readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
+static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
+                            unsigned int reg)
+{
+       writel(val, pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+}
+
 static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
 {
        struct acpi_device *adev;
@@ -336,7 +365,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
                ret = -ENODEV;
                goto out;
        }
-       *val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+       *val = __lpss_reg_read(pdata, reg);
 
  out:
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -389,6 +418,37 @@ static struct attribute_group lpss_attr_group = {
        .name = "lpss_ltr",
 };
 
+static void acpi_lpss_set_ltr(struct device *dev, s32 val)
+{
+       struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+       u32 ltr_mode, ltr_val;
+
+       ltr_mode = __lpss_reg_read(pdata, LPSS_GENERAL);
+       if (val < 0) {
+               if (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) {
+                       ltr_mode &= ~LPSS_GENERAL_LTR_MODE_SW;
+                       __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+               }
+               return;
+       }
+       ltr_val = __lpss_reg_read(pdata, LPSS_SW_LTR) & ~LPSS_LTR_SNOOP_MASK;
+       if (val >= LPSS_LTR_SNOOP_LAT_CUTOFF) {
+               ltr_val |= LPSS_LTR_SNOOP_LAT_32US;
+               val = LPSS_LTR_MAX_VAL;
+       } else if (val > LPSS_LTR_MAX_VAL) {
+               ltr_val |= LPSS_LTR_SNOOP_LAT_32US | LPSS_LTR_SNOOP_REQ;
+               val >>= LPSS_LTR_SNOOP_LAT_SHIFT;
+       } else {
+               ltr_val |= LPSS_LTR_SNOOP_LAT_1US | LPSS_LTR_SNOOP_REQ;
+       }
+       ltr_val |= val;
+       __lpss_reg_write(ltr_val, pdata, LPSS_SW_LTR);
+       if (!(ltr_mode & LPSS_GENERAL_LTR_MODE_SW)) {
+               ltr_mode |= LPSS_GENERAL_LTR_MODE_SW;
+               __lpss_reg_write(ltr_mode, pdata, LPSS_GENERAL);
+       }
+}
+
 static int acpi_lpss_platform_notify(struct notifier_block *nb,
                                     unsigned long action, void *data)
 {
@@ -426,9 +486,29 @@ static struct notifier_block acpi_lpss_nb = {
        .notifier_call = acpi_lpss_platform_notify,
 };
 
+static void acpi_lpss_bind(struct device *dev)
+{
+       struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+
+       if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
+               return;
+
+       if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
+               dev->power.set_latency_tolerance = acpi_lpss_set_ltr;
+       else
+               dev_err(dev, "MMIO size insufficient to access LTR\n");
+}
+
+static void acpi_lpss_unbind(struct device *dev)
+{
+       dev->power.set_latency_tolerance = NULL;
+}
+
 static struct acpi_scan_handler lpss_handler = {
        .ids = acpi_lpss_device_ids,
        .attach = acpi_lpss_create_device,
+       .bind = acpi_lpss_bind,
+       .unbind = acpi_lpss_unbind,
 };
 
 void __init acpi_lpss_init(void)
index df96a0fe48905aa77d5ac17248195c0c1fcb9221..37d73024b82e4f9a7df2820e331bd873e0805947 100644 (file)
@@ -408,28 +408,14 @@ static int acpi_pad_pur(acpi_handle handle)
        return num;
 }
 
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
-       uint32_t idle_cpus)
-{
-       union acpi_object params[3] = {
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_BUFFER,},
-       };
-       struct acpi_object_list arg_list = {3, params};
-
-       params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
-       params[1].integer.value =  stat;
-       params[2].buffer.length = 4;
-       params[2].buffer.pointer = (void *)&idle_cpus;
-       acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
 static void acpi_pad_handle_notify(acpi_handle handle)
 {
        int num_cpus;
        uint32_t idle_cpus;
+       struct acpi_buffer param = {
+               .length = 4,
+               .pointer = (void *)&idle_cpus,
+       };
 
        mutex_lock(&isolated_cpus_lock);
        num_cpus = acpi_pad_pur(handle);
@@ -439,7 +425,7 @@ static void acpi_pad_handle_notify(acpi_handle handle)
        }
        acpi_pad_idle_cpus(num_cpus);
        idle_cpus = acpi_pad_idle_cpus_num();
-       acpi_pad_ost(handle, 0, idle_cpus);
+       acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, &param);
        mutex_unlock(&isolated_cpus_lock);
 }
 
index 438304086ff1bb117f946f79038ff586a80df9c5..b7ed86a20427e43080db1f4238afba5864e426a6 100644 (file)
@@ -122,6 +122,8 @@ acpi-y +=           \
        rsaddr.o        \
        rscalc.o        \
        rscreate.o      \
+       rsdump.o        \
+       rsdumpinfo.o    \
        rsinfo.o        \
        rsio.o          \
        rsirq.o         \
@@ -132,8 +134,6 @@ acpi-y +=           \
        rsutils.o       \
        rsxface.o
 
-acpi-$(ACPI_FUTURE_USAGE) += rsdump.o rsdumpinfo.o
-
 acpi-y +=              \
        tbfadt.o        \
        tbfind.o        \
index 8a6c4a0d22db7b2dc5b8250c91880a60e74bfbb3..6f1c616910acad10e379a320933cb10be08f9d94 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2bf3ca2b8a7a653f16fa97264e95063abd1fa3e0..68a91eb0fa483f24b55218770c4bed079873d3e6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -115,6 +115,8 @@ ACPI_HW_DEPENDENT_RETURN_VOID(void
                                                   char *block_arg))
 ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
 
+void acpi_db_execute_test(char *type_arg);
+
 /*
  * dbconvert - miscellaneous conversion routines
  */
index 427db72a6302f4b53f32a71d44a8a0f0ef170b27..5b472c43c31da376e14e039f4583521e68b184da 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0fb0adf435d667593c29b2f0785cb5a5c3a35e8c..68ec61fff1886872da7143ec804013ab5b504656 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4ed1aa384df2fd7680268c569a0388434b76d7d4..8f40bb972ae3907a0eec7c50b7d2e39a48a231cb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * to simplify maintenance of the code.
  */
 #ifdef DEFINE_ACPI_GLOBALS
-#define ACPI_EXTERN
-#define ACPI_INIT_GLOBAL(a,b) a=b
+#define ACPI_GLOBAL(type,name) \
+       extern type name; \
+       type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+       type name=value
+
 #else
-#define ACPI_EXTERN extern
-#define ACPI_INIT_GLOBAL(a,b) a
+#define ACPI_GLOBAL(type,name) \
+       extern type name
+
+#define ACPI_INIT_GLOBAL(type,name,value) \
+       extern type name
 #endif
 
 #ifdef DEFINE_ACPI_GLOBALS
@@ -82,7 +90,7 @@
  * 5) Allow unresolved references (invalid target name) in package objects
  * 6) Enable warning messages for behavior that is not ACPI spec compliant
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE);
 
 /*
  * Automatically serialize ALL control methods? Default is FALSE, meaning
@@ -90,25 +98,25 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_interpreter_slack, FALSE);
  * Only change this if the ASL code is poorly written and cannot handle
  * reentrancy even though methods are marked "NotSerialized".
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_all_methods_serialized, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_all_methods_serialized, FALSE);
 
 /*
  * Create the predefined _OSI method in the namespace? Default is TRUE
  * because ACPI CA is fully compatible with other ACPI implementations.
  * Changing this will revert ACPI CA (and machine ASL) to pre-OSI behavior.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_create_osi_method, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_create_osi_method, TRUE);
 
 /*
  * Optionally use default values for the ACPI register widths. Set this to
  * TRUE to use the defaults, if an FADT contains incorrect widths/lengths.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_default_register_widths, TRUE);
 
 /*
  * Optionally enable output from the AML Debug Object.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_aml_debug_object, FALSE);
 
 /*
  * Optionally copy the entire DSDT to local memory (instead of simply
@@ -116,7 +124,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
  * DSDT, creating the need for this option. Default is FALSE, do not copy
  * the DSDT.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_copy_dsdt_locally, FALSE);
 
 /*
  * Optionally ignore an XSDT if present and use the RSDT instead.
@@ -124,7 +132,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
  * of the RSDT, the XSDT has been found to be corrupt or ill-formed on
  * some machines. Default behavior is to use the XSDT if present.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
 
 /*
  * Optionally use 32-bit FADT addresses if and when there is a conflict
@@ -134,7 +142,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_do_not_use_xsdt, FALSE);
  * some machines have been found to have a corrupted non-zero 64-bit
  * address. Default is FALSE, do not favor the 32-bit addresses.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
 
 /*
  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
@@ -142,47 +150,28 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use32_bit_fadt_addresses, FALSE);
  * this value is set to TRUE if any Windows OSI strings have been
  * requested by the BIOS.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_truncate_io_addresses, FALSE);
 
 /*
  * Disable runtime checking and repair of values returned by control methods.
  * Use only if the repair is causing a problem on a particular machine.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_auto_repair, FALSE);
 
 /*
  * Optionally do not load any SSDTs from the RSDT/XSDT during initialization.
  * This can be useful for debugging ACPI problems on some machines.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_disable_ssdt_table_load, FALSE);
 
 /*
  * We keep track of the latest version of Windows that has been requested by
  * the BIOS.
  */
-u8 ACPI_INIT_GLOBAL(acpi_gbl_osi_data, 0);
-
-/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
-
-struct acpi_table_fadt acpi_gbl_FADT;
-u32 acpi_current_gpe_count;
-u32 acpi_gbl_trace_flags;
-acpi_name acpi_gbl_trace_method_name;
-u8 acpi_gbl_system_awake_and_running;
-
-/*
- * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
- * that the ACPI hardware is no longer required. A flag in the FADT indicates
- * a reduced HW machine, and that flag is duplicated here for convenience.
- */
-u8 acpi_gbl_reduced_hardware;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
 
 #endif                         /* DEFINE_ACPI_GLOBALS */
 
-/* Do not disassemble buffers to resource descriptors */
-
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
-
 /*****************************************************************************
  *
  * ACPI Table globals
@@ -190,37 +179,36 @@ ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
  ****************************************************************************/
 
 /*
- * acpi_gbl_root_table_list is the master list of ACPI tables that were
- * found in the RSDT/XSDT.
+ * Master list of all ACPI tables that were found in the RSDT/XSDT.
  */
-ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
+ACPI_GLOBAL(struct acpi_table_list, acpi_gbl_root_table_list);
+
+/* DSDT information. Used to check for DSDT corruption */
+
+ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
+ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
 
 #if (!ACPI_REDUCED_HARDWARE)
-ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
+ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
 
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
 /* These addresses are calculated from the FADT Event Block addresses */
 
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
-
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
-ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1a_enable);
 
-/* DSDT information. Used to check for DSDT corruption */
-
-ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
-ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_status);
+ACPI_GLOBAL(struct acpi_generic_address, acpi_gbl_xpm1b_enable);
 
 /*
- * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
+ * Handle both ACPI 1.0 and ACPI 2.0+ Integer widths. The integer width is
  * determined by the revision of the DSDT: If the DSDT revision is less than
  * 2, use only the lower 32 bits of the internal 64-bit Integer.
  */
-ACPI_EXTERN u8 acpi_gbl_integer_bit_width;
-ACPI_EXTERN u8 acpi_gbl_integer_byte_width;
-ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
+ACPI_GLOBAL(u8, acpi_gbl_integer_bit_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_byte_width);
+ACPI_GLOBAL(u8, acpi_gbl_integer_nybble_width);
 
 /*****************************************************************************
  *
@@ -233,36 +221,36 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
  * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
  * (The table maps local handles to the real OS handles)
  */
-ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
+ACPI_GLOBAL(struct acpi_mutex_info, acpi_gbl_mutex_info[ACPI_NUM_MUTEX]);
 
 /*
  * Global lock mutex is an actual AML mutex object
  * Global lock semaphore works in conjunction with the actual global lock
  * Global lock spinlock is used for "pending" handshake
  */
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
-ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
-ACPI_EXTERN acpi_spinlock acpi_gbl_global_lock_pending_lock;
-ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
-ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
-ACPI_EXTERN u8 acpi_gbl_global_lock_present;
-ACPI_EXTERN u8 acpi_gbl_global_lock_pending;
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_global_lock_mutex);
+ACPI_GLOBAL(acpi_semaphore, acpi_gbl_global_lock_semaphore);
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_global_lock_pending_lock);
+ACPI_GLOBAL(u16, acpi_gbl_global_lock_handle);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_acquired);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_present);
+ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending);
 
 /*
  * Spinlocks are used for interfaces that can be possibly called at
  * interrupt level
  */
-ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock;   /* For GPE data structs and registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock;      /* For ACPI H/W except GPE registers */
-ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock);    /* For ACPI H/W except GPE registers */
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
 
 /* Mutex for _OSI support */
 
-ACPI_EXTERN acpi_mutex acpi_gbl_osi_mutex;
+ACPI_GLOBAL(acpi_mutex, acpi_gbl_osi_mutex);
 
 /* Reader/Writer lock is used for namespace walk and dynamic table unload */
 
-ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
+ACPI_GLOBAL(struct acpi_rw_lock, acpi_gbl_namespace_rw_lock);
 
 /*****************************************************************************
  *
@@ -272,70 +260,69 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
 
 /* Object caches */
 
-ACPI_EXTERN acpi_cache_t *acpi_gbl_namespace_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_state_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_ps_node_ext_cache;
-ACPI_EXTERN acpi_cache_t *acpi_gbl_operand_cache;
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_namespace_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_state_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_ps_node_ext_cache);
+ACPI_GLOBAL(acpi_cache_t *, acpi_gbl_operand_cache);
+
+/* System */
+
+ACPI_INIT_GLOBAL(u32, acpi_gbl_startup_flags, 0);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_shutdown, TRUE);
 
 /* Global handlers */
 
-ACPI_EXTERN struct acpi_global_notify_handler acpi_gbl_global_notify[2];
-ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
-ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
-ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
-ACPI_EXTERN void *acpi_gbl_table_handler_context;
-ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
-ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
-ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
+ACPI_GLOBAL(struct acpi_global_notify_handler, acpi_gbl_global_notify[2]);
+ACPI_GLOBAL(acpi_exception_handler, acpi_gbl_exception_handler);
+ACPI_GLOBAL(acpi_init_handler, acpi_gbl_init_handler);
+ACPI_GLOBAL(acpi_table_handler, acpi_gbl_table_handler);
+ACPI_GLOBAL(void *, acpi_gbl_table_handler_context);
+ACPI_GLOBAL(struct acpi_walk_state *, acpi_gbl_breakpoint_walk);
+ACPI_GLOBAL(acpi_interface_handler, acpi_gbl_interface_handler);
+ACPI_GLOBAL(struct acpi_sci_handler_info *, acpi_gbl_sci_handler_list);
 
 /* Owner ID support */
 
-ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
-ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
-ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+ACPI_GLOBAL(u32, acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS]);
+ACPI_GLOBAL(u8, acpi_gbl_last_owner_id_index);
+ACPI_GLOBAL(u8, acpi_gbl_next_owner_id_offset);
 
 /* Initialization sequencing */
 
-ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
+ACPI_GLOBAL(u8, acpi_gbl_reg_methods_executed);
 
 /* Misc */
 
-ACPI_EXTERN u32 acpi_gbl_original_mode;
-ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
-ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
-ACPI_EXTERN u32 acpi_gbl_ps_find_count;
-ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
-ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
-ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
-ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
-ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
-ACPI_EXTERN struct acpi_address_range
-    *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
-
-#ifndef DEFINE_ACPI_GLOBALS
-
-/* Other miscellaneous */
-
-extern u8 acpi_gbl_shutdown;
-extern u32 acpi_gbl_startup_flags;
+ACPI_GLOBAL(u32, acpi_gbl_original_mode);
+ACPI_GLOBAL(u32, acpi_gbl_rsdp_original_location);
+ACPI_GLOBAL(u32, acpi_gbl_ns_lookup_count);
+ACPI_GLOBAL(u32, acpi_gbl_ps_find_count);
+ACPI_GLOBAL(u16, acpi_gbl_pm1_enable_register_save);
+ACPI_GLOBAL(u8, acpi_gbl_debugger_configuration);
+ACPI_GLOBAL(u8, acpi_gbl_step_to_next_call);
+ACPI_GLOBAL(u8, acpi_gbl_acpi_hardware_present);
+ACPI_GLOBAL(u8, acpi_gbl_events_initialized);
+ACPI_GLOBAL(struct acpi_interface_info *, acpi_gbl_supported_interfaces);
+ACPI_GLOBAL(struct acpi_address_range *,
+           acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX]);
+
+/* Other miscellaneous, declared and initialized in utglobal */
+
 extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
 extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
 extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
-extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
 extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
-
-#endif
+extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
 
 #ifdef ACPI_DBG_TRACK_ALLOCATIONS
 
-/* Lists for tracking memory allocations */
+/* Lists for tracking memory allocations (debug only) */
 
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_global_list;
-ACPI_EXTERN struct acpi_memory_list *acpi_gbl_ns_node_list;
-ACPI_EXTERN u8 acpi_gbl_display_final_mem_stats;
-ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list);
+ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list);
+ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats);
+ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
 #endif
 
 /*****************************************************************************
@@ -350,22 +337,23 @@ ACPI_EXTERN u8 acpi_gbl_disable_mem_tracking;
 #define NUM_PREDEFINED_NAMES            9
 #endif
 
-ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
-ACPI_EXTERN union acpi_operand_object *acpi_gbl_module_code_list;
+ACPI_GLOBAL(struct acpi_namespace_node, acpi_gbl_root_node_struct);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_root_node);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_fadt_gpe_device);
+ACPI_GLOBAL(union acpi_operand_object *, acpi_gbl_module_code_list);
 
 extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
 extern const struct acpi_predefined_names
     acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
 
 #ifdef ACPI_DEBUG_OUTPUT
-ACPI_EXTERN u32 acpi_gbl_current_node_count;
-ACPI_EXTERN u32 acpi_gbl_current_node_size;
-ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
-ACPI_EXTERN acpi_size *acpi_gbl_entry_stack_pointer;
-ACPI_EXTERN acpi_size *acpi_gbl_lowest_stack_pointer;
-ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
+ACPI_GLOBAL(u32, acpi_gbl_current_node_count);
+ACPI_GLOBAL(u32, acpi_gbl_current_node_size);
+ACPI_GLOBAL(u32, acpi_gbl_max_concurrent_node_count);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_entry_stack_pointer);
+ACPI_GLOBAL(acpi_size *, acpi_gbl_lowest_stack_pointer);
+ACPI_GLOBAL(u32, acpi_gbl_deepest_nesting);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_nesting_level, 0);
 #endif
 
 /*****************************************************************************
@@ -374,11 +362,11 @@ ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
  *
  ****************************************************************************/
 
-ACPI_EXTERN struct acpi_thread_state *acpi_gbl_current_walk_list;
+ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
 
 /* Control method single step flag */
 
-ACPI_EXTERN u8 acpi_gbl_cm_single_step;
+ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
 
 /*****************************************************************************
  *
@@ -388,8 +376,9 @@ ACPI_EXTERN u8 acpi_gbl_cm_single_step;
 
 extern struct acpi_bit_register_info
     acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
-ACPI_EXTERN u8 acpi_gbl_sleep_type_a;
-ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
+
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
 
 /*****************************************************************************
  *
@@ -399,14 +388,15 @@ ACPI_EXTERN u8 acpi_gbl_sleep_type_b;
 
 #if (!ACPI_REDUCED_HARDWARE)
 
-ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized;
-ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
-ACPI_EXTERN struct acpi_gpe_block_info
-    *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
-ACPI_EXTERN acpi_gbl_event_handler acpi_gbl_global_event_handler;
-ACPI_EXTERN void *acpi_gbl_global_event_handler_context;
-ACPI_EXTERN struct acpi_fixed_event_handler
-    acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u8, acpi_gbl_all_gpes_initialized);
+ACPI_GLOBAL(struct acpi_gpe_xrupt_info *, acpi_gbl_gpe_xrupt_list_head);
+ACPI_GLOBAL(struct acpi_gpe_block_info *,
+           acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]);
+ACPI_GLOBAL(acpi_gbl_event_handler, acpi_gbl_global_event_handler);
+ACPI_GLOBAL(void *, acpi_gbl_global_event_handler_context);
+ACPI_GLOBAL(struct acpi_fixed_event_handler,
+           acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS]);
+
 extern struct acpi_fixed_event_info
     acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS];
 
@@ -418,23 +408,19 @@ extern struct acpi_fixed_event_info
  *
  ****************************************************************************/
 
-/* Procedure nesting level for debug output */
-
-extern u32 acpi_gbl_nesting_level;
-
 /* Event counters */
 
-ACPI_EXTERN u32 acpi_method_count;
-ACPI_EXTERN u32 acpi_gpe_count;
-ACPI_EXTERN u32 acpi_sci_count;
-ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS];
+ACPI_GLOBAL(u32, acpi_method_count);
+ACPI_GLOBAL(u32, acpi_gpe_count);
+ACPI_GLOBAL(u32, acpi_sci_count);
+ACPI_GLOBAL(u32, acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]);
 
 /* Support for dynamic control method tracing mechanism */
 
-ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_original_dbg_layer;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_level;
-ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
+ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
 
 /*****************************************************************************
  *
@@ -442,61 +428,64 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
  *
  ****************************************************************************/
 
-ACPI_EXTERN u8 acpi_gbl_db_output_flags;
+ACPI_GLOBAL(u8, acpi_gbl_db_output_flags);
 
 #ifdef ACPI_DISASSEMBLER
 
-ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_INIT_GLOBAL(u8, acpi_gbl_no_resource_disassembly, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
 
-ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
-ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
-ACPI_EXTERN u8 acpi_gbl_num_external_methods;
-ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
-ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
-ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
+ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
+ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
+ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
+ACPI_GLOBAL(struct acpi_external_file *, acpi_gbl_external_file_list);
 #endif
 
 #ifdef ACPI_DEBUGGER
 
-extern u8 acpi_gbl_method_executing;
-extern u8 acpi_gbl_abort_method;
-extern u8 acpi_gbl_db_terminate_threads;
+ACPI_INIT_GLOBAL(u8, acpi_gbl_db_terminate_threads, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_abort_method, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_method_executing, FALSE);
 
-ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
-ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
-ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
-ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
-ACPI_EXTERN char *acpi_gbl_db_buffer;
-ACPI_EXTERN char *acpi_gbl_db_filename;
-ACPI_EXTERN u32 acpi_gbl_db_debug_level;
-ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
-ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_tables);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_stats);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_ini_methods);
+ACPI_GLOBAL(u8, acpi_gbl_db_opt_no_region_support);
+ACPI_GLOBAL(u8, acpi_gbl_db_output_to_file);
+ACPI_GLOBAL(char *, acpi_gbl_db_buffer);
+ACPI_GLOBAL(char *, acpi_gbl_db_filename);
+ACPI_GLOBAL(u32, acpi_gbl_db_debug_level);
+ACPI_GLOBAL(u32, acpi_gbl_db_console_debug_level);
+ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_db_scope_node);
 
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_GLOBAL(char *, acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS]);
+ACPI_GLOBAL(acpi_object_type, acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS]);
 
 /* These buffers should all be the same size */
 
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_GLOBAL(char, acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE]);
+ACPI_GLOBAL(char, acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE]);
 
 /*
  * Statistic globals
  */
-ACPI_EXTERN u16 acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1];
-ACPI_EXTERN u16 acpi_gbl_obj_type_count_misc;
-ACPI_EXTERN u16 acpi_gbl_node_type_count_misc;
-ACPI_EXTERN u32 acpi_gbl_num_nodes;
-ACPI_EXTERN u32 acpi_gbl_num_objects;
-
-ACPI_EXTERN u32 acpi_gbl_size_of_parse_tree;
-ACPI_EXTERN u32 acpi_gbl_size_of_method_trees;
-ACPI_EXTERN u32 acpi_gbl_size_of_node_entries;
-ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
+ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
+ACPI_GLOBAL(u32, acpi_gbl_num_objects);
+
+ACPI_GLOBAL(u32, acpi_gbl_size_of_parse_tree);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_method_trees);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_node_entries);
+ACPI_GLOBAL(u32, acpi_gbl_size_of_acpi_objects);
 
 #endif                         /* ACPI_DEBUGGER */
 
@@ -508,7 +497,7 @@ ACPI_EXTERN u32 acpi_gbl_size_of_acpi_objects;
 
 #ifdef ACPI_APPLICATION
 
-ACPI_FILE ACPI_INIT_GLOBAL(acpi_gbl_debug_file, NULL);
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
 
 #endif                         /* ACPI_APPLICATION */
 
index 6357e932bfd9d20cf6640dd1a87c86e95969bac2..2ad2351a983321e31c1a4aa6983dfd0dfe419c09 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8af8c9bdeb3552b115617b3507c7b0d1ef602c59..c54267748be5bae1f85d0f123919bce602a0df66 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -87,6 +87,10 @@ typedef const struct acpi_exdump_info {
 #define ACPI_EXD_PACKAGE                11
 #define ACPI_EXD_FIELD                  12
 #define ACPI_EXD_REFERENCE              13
+#define ACPI_EXD_LIST                   14     /* Operand object list */
+#define ACPI_EXD_HDLR_LIST              15     /* Address Handler list */
+#define ACPI_EXD_RGN_LIST               16     /* Region list */
+#define ACPI_EXD_NODE                   17     /* Namespace Node */
 
 /* restore default alignment */
 
index d95ca5449aceb119361c2e83b441de76097b3306..52a21dafb54039ca3c3d62bb22328acfb7a3321a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2a86c65d873bb6625d3a028b24d299fb2b6101ce..4bceb11c7380d118020b512a93a267c4c6b3114a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define ACPI_SET64(ptr, val)            (*ACPI_CAST64 (ptr) = (u64) (val))
 
 /*
- * printf() format helpers
+ * printf() format helpers. These macros are workarounds for the difficulties
+ * with emitting 64-bit integers and 64-bit pointers with the same code
+ * for both 32-bit and 64-bit hosts.
  */
-
-/* Split 64-bit integer into two 32-bit values. Use with %8.8X%8.8X */
-
 #define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i), ACPI_LODWORD(i)
 
 #if ACPI_MACHINE_WIDTH == 64
 #define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
+#define ACPI_FORMAT_TO_UINT(i)          ACPI_FORMAT_UINT64(i)
+#define ACPI_PRINTF_UINT                 "0x%8.8X%8.8X"
+
 #else
-#define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
+#define ACPI_FORMAT_NATIVE_UINT(i)      0, (u32) (i)
+#define ACPI_FORMAT_TO_UINT(i)          (u32) (i)
+#define ACPI_PRINTF_UINT                 "0x%8.8X"
 #endif
 
 /*
index e6138ac4a16054038275fc81f2f616cd70c26d5d..ee1c040f321c621bc33ed8dd66c2c9426e6f3801 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cc7ab6dd724e6234a835d0321c8cb4b6106796f4..1a4d61805ebc64a83bc9242414536f7fecfb25cf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3fc9ca7e8aa3e6afee0933d1ac82efe33c8ba04d..dda0e6affcf1ccb1199320ff4ede74f471ebbad5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index aed319318835f3c9b47a53acbb9618da37629c83..6168b85463edc1b0b68b5405a8e64ed449b2bdd8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f600aded7261cde6762b3fee5d9b9a6a790664b8..a48d713e95991d52389bafb62bc401c497e9df89 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
  *
  * Return Package types
  *
- * 1) PTYPE1 packages do not contain sub-packages.
+ * 1) PTYPE1 packages do not contain subpackages.
  *
  * ACPI_PTYPE1_FIXED: Fixed-length length, 1 or 2 object types:
  *      object type
@@ -63,8 +63,8 @@
  *      (Used for _PRW)
  *
  *
- * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
- *    of the different types describe the contents of each of the sub-packages.
+ * 2) PTYPE2 packages contain a Variable-length number of subpackages. Each
+ *    of the different types describe the contents of each of the subpackages.
  *
  * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types. Zero-length
  *      parent package is allowed:
@@ -560,7 +560,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
 
        /*
         * For _HPX, a single package is returned, containing a variable-length number
-        * of sub-packages. Each sub-package contains a PCI record setting.
+        * of subpackages. Each subpackage contains a PCI record setting.
         * There are several different type of record settings, of different
         * lengths, but all elements of all settings are Integers.
         */
@@ -698,6 +698,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
          METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Refs) */
        PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
+       {{"_PRP", METHOD_0ARGS,
+         METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Str, 1 Int/Str/Pkg */
+       PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1,
+                    ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
+                    ACPI_RTYPE_PACKAGE | ACPI_RTYPE_REFERENCE, 1, 0),
+
        {{"_PRS", METHOD_0ARGS,
          METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
 
index ff97430455cbe923d3bad4f453635371b9388867..4b008e8884a111326fe17f5a1f5ee2adc028f8f6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fc83c0a5ca70fe228857d1336d967a91dd0765fa..5d2989a1b68c965a8649243b20837dd5104f74a5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c54f42c64fe2ca78ae7d9269b96c69ad2461c552..5fa4b202769790e81a9ff39964957fc5860cb844 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index be8180c17d7e772ec520dae60dc43b514c5295c1..ceeec0b7ccb1764b1e8bc1bd4b7e91ef1f7b372c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -49,7 +49,7 @@ extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
 
 /* Strings used by the disassembler and debugger resource dump routines */
 
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
 
 extern const char *acpi_gbl_bm_decode[];
 extern const char *acpi_gbl_config_decode[];
index 48a3e331b72d7e7c8214969217d4e57e3c535d3e..5908ccec6aea8b5858338cf5b2a7de95bdbb118d 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 87c26366d1dfa8a9e65d8931f067d28330d8aa57..f3f834408441c34e40ab88127bb83d30c2e2aa0c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index afdc6df17abf12fa37311484b48e00ba0ff67bca..720b1cdda7113665da0883681c3b0765a4d63245 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index eb56b66444b5e3d671572b6290a1245473e7d165..8daf9de82b73431d630d0e31f5f86abdaa8baa84 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e7a57c554e84f9d0f60c0bb558df4e288d495894..3661c8e90540fb8715ec7a24795394bc1609b016 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 14424200d246c8538bcde5070072100528dcf1dd..96644d5ac0e1fa3852cdcb28f934cdf259f92e4e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 81a78ba843112d335613fc55a2d0d2ae39e6bc3f..2c6d42c2bc01a4b7f10fcb1424479a6dbab8b667 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c4b0b365723758fe5181e26eaeaa7cb5314f18d3..b67522df01ace08274a2ce93d5c555e31a4aa75f 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b1746a68dad11b8ac3a7b37bdb213f1e7265ee17..a1e7e6b6fcf7f89a8c87a5153d7c58bd8f0e80ec 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5205edcf2c019d1936c81af2d77a8f4e732d9c0d..6c0759c0db470ab520be6accf81e66ee87ed6e58 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d7f53fb2979ab9a602f11af4a9a1478e60f59c3d..9f74795e226853e516dc4525b7f9b09420ef57bc 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1bbb22fd6fa02f005893fc691eedfb8d0981246e..f7f5107e754da79f4b2459cc3e2c648cda9fb489 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2dbe109727c84c0b8d26029d479937ac3155b303..bd7811c64169f8aa2c4be03fd61ef1d9bdffee6f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7f569d57302758dcec3643d489f715f794625f78..2ac28d297305c5ec6183ff81b4e38f4d1ce7b9c8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d67891de1b545b43f33dca806d784dda884cfedb..9d6e2c1de1f89e5f6420ddfa52d1863766f8d986 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ecb12e2137ffb4ff736541891122afcf145b4f61..24f7d5ea678a0420570fd6e8711108905083cee2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 83cd45f4a8703f5cba5a613c2bad0030112759d8..c7bffff9ed32c97e4c35f8ef5fa0c1372c82687a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4c67193a9fa731f0f616023ffb642547d38795df..3393a73ca0d68f8d9c219cfd6d374f9ef09a5c92 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a9cb4a1a4bb8ce6b2c203c7c16360556decd3ada..955f83da68a52cee8c8fa672b09910155ac35ca6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a31e549e64cc7a95b77b516596b3f86a3aeae3fb..caaed3c673fdf369d8ce686507630fb035172995 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a3e2f38aadf64783cd3dfd03f1f6bc0140a87b85..ae779c1e871dbf5eaa8c422b970d2f78dfbfd972 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4d764e847a08a7174202166ae5e360e0b2d2b811..17e4bbfdb096c2e9c522b2a387abfed80943d2aa 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e3157313eb2786e2098b7c6fa5f4a3c58c40d293..78ac29351c9e1736880471724849c2028dd5b5c7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a5687540e9a66e5ba20bfeb9772ad5351b20bc61..5d594eb2e5ecb6c627587daffaac0f0e5dcf35d3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 144cbb9b73bc7238460be80e22a62a95aa2042e3..9957297d15805f2b1691d2b251d89031fd404bb5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -314,6 +314,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
 {
        union acpi_operand_object *handler_obj;
        union acpi_operand_object *obj_desc;
+       union acpi_operand_object *start_desc;
        union acpi_operand_object **last_obj_ptr;
        acpi_adr_space_setup region_setup;
        void **region_context;
@@ -341,6 +342,7 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
        /* Find this region in the handler's list */
 
        obj_desc = handler_obj->address_space.region_list;
+       start_desc = obj_desc;
        last_obj_ptr = &handler_obj->address_space.region_list;
 
        while (obj_desc) {
@@ -438,6 +440,15 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
 
                last_obj_ptr = &obj_desc->region.next;
                obj_desc = obj_desc->region.next;
+
+               /* Prevent infinite loop if list is corrupted */
+
+               if (obj_desc == start_desc) {
+                       ACPI_ERROR((AE_INFO,
+                                   "Circular handler list in region object %p",
+                                   region_obj));
+                       return_VOID;
+               }
        }
 
        /* If we get here, the region was not in the handler's region list */
index 8354c4f7f10c55c604765a92020abd2d30547de9..1b148a440d67f545ebf1db7d12829de790b0a234 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9e9e3454d8932c9c44f853bbe3d05edffeb38130..4d8a709c1fc4505b4ea176e0347142a43eed0fba 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 23a7fadca4122992c7f652c32674843de13500f9..a734b27da0615e7bf73a6454183605931bce48b0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 39d06af5e347e234ad285f77848e22b8bef101d6..e286640ad4ff9e02974bc9d7c9c1d2a64fd58a92 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5713da77c665b01a5e9dcdbd91abc70a11f4228a..20a1392ffe06fd59eecc04b6dfe17a86aa96200d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -583,6 +583,18 @@ acpi_install_gpe_block(acpi_handle gpe_device,
                goto unlock_and_exit;
        }
 
+       /* Validate the parent device */
+
+       if (node->type != ACPI_TYPE_DEVICE) {
+               status = AE_TYPE;
+               goto unlock_and_exit;
+       }
+
+       if (node->object) {
+               status = AE_ALREADY_EXISTS;
+               goto unlock_and_exit;
+       }
+
        /*
         * For user-installed GPE Block Devices, the gpe_block_base_number
         * is always zero
@@ -666,6 +678,13 @@ acpi_status acpi_remove_gpe_block(acpi_handle gpe_device)
                goto unlock_and_exit;
        }
 
+       /* Validate the parent device */
+
+       if (node->type != ACPI_TYPE_DEVICE) {
+               status = AE_TYPE;
+               goto unlock_and_exit;
+       }
+
        /* Get the device_object attached to the node */
 
        obj_desc = acpi_ns_get_attached_object(node);
index 02ed75ac56cd5454adc527b09e3ebe63a329ae0e..2d6f187939c703db962bac5fbfad6ca9c9140bfa 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 06d216c8d43ab18edc0fb1f66def077014b2aa3f..8ba1464efd112a8f89a48eabbf7fd343c87ab3b6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 69e4a8cc9b71728f98b1aad402a0a44538033457..c545386fee96cd825f20a5099e1f82aeae39bc85 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3c2e6dcdad3e6abcbb60e1afacf3e0717c55a0d8..95d23dabcfbbe00e4221818b468c8e92483f5942 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 81c72a4ecd823eaca8542135aea5396d7d6d5626..4cfc3d3b5c97ddbddf19450f86759d5f32fb2dab 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4d046faac48cbb09b2c0aa6c4a0c5e8550333ec5..973fdae00f9479ddd160a8d3fff96e9367b26619 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -94,12 +94,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
        {ACPI_EXD_BUFFER, 0, NULL}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_package[5] = {
+static struct acpi_exdump_info acpi_ex_dump_package[6] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
@@ -108,11 +109,11 @@ static struct acpi_exdump_info acpi_ex_dump_package[5] = {
 
 static struct acpi_exdump_info acpi_ex_dump_device[4] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_device), NULL},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.handler), "Handler"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[0]),
         "System Notify"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(device.notify_list[1]),
-        "Device Notify"}
+        "Device Notify"},
+       {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(device.handler), "Handler"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_event[2] = {
@@ -142,17 +143,18 @@ static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_region[7] = {
+static struct acpi_exdump_info acpi_ex_dump_region[8] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region), NULL},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.space_id), "Space Id"},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(region.flags), "Flags"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(region.node), "Parent Node"},
        {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(region.address), "Address"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(region.length), "Length"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.handler), "Handler"},
+       {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(region.handler), "Handler"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(region.next), "Next"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_power[5] = {
+static struct acpi_exdump_info acpi_ex_dump_power[6] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_power), NULL},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(power_resource.system_level),
         "System Level"},
@@ -161,7 +163,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[0]),
         "System Notify"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.notify_list[1]),
-        "Device Notify"}
+        "Device Notify"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(power_resource.handler), "Handler"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
@@ -225,7 +228,7 @@ static struct acpi_exdump_info acpi_ex_dump_reference[8] = {
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(reference.target_type), "Target Type"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(reference.value), "Value"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.object), "Object Desc"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.node), "Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(reference.node), "Node"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(reference.where), "Where"},
        {ACPI_EXD_REFERENCE, 0, NULL}
 };
@@ -234,16 +237,16 @@ static struct acpi_exdump_info acpi_ex_dump_address_handler[6] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_address_handler),
         NULL},
        {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(address_space.space_id), "Space Id"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.next), "Next"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.region_list),
+       {ACPI_EXD_HDLR_LIST, ACPI_EXD_OFFSET(address_space.next), "Next"},
+       {ACPI_EXD_RGN_LIST, ACPI_EXD_OFFSET(address_space.region_list),
         "Region List"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.node), "Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(address_space.node), "Node"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(address_space.context), "Context"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_notify), NULL},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.node), "Node"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(notify.node), "Node"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(notify.handler_type), "Handler Type"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.handler), "Handler"},
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.context), "Context"},
@@ -252,14 +255,31 @@ static struct acpi_exdump_info acpi_ex_dump_notify[7] = {
        {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(notify.next[1]), "Next Device Notify"}
 };
 
+static struct acpi_exdump_info acpi_ex_dump_extra[6] = {
+       {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_extra), NULL},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.method_REG), "_REG Method"},
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(extra.scope_node), "Scope Node"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.region_context),
+        "Region Context"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(extra.aml_start), "Aml Start"},
+       {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(extra.aml_length), "Aml Length"}
+};
+
+static struct acpi_exdump_info acpi_ex_dump_data[3] = {
+       {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_data), NULL},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.handler), "Handler"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(data.pointer), "Raw Data"}
+};
+
 /* Miscellaneous tables */
 
-static struct acpi_exdump_info acpi_ex_dump_common[4] = {
+static struct acpi_exdump_info acpi_ex_dump_common[5] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_common), NULL},
        {ACPI_EXD_TYPE, 0, NULL},
        {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(common.reference_count),
         "Reference Count"},
-       {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"}
+       {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(common.flags), "Flags"},
+       {ACPI_EXD_LIST, ACPI_EXD_OFFSET(common.next_object), "Object List"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
@@ -274,15 +294,17 @@ static struct acpi_exdump_info acpi_ex_dump_field_common[7] = {
         "Field Bit Offset"},
        {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(common_field.base_byte_offset),
         "Base Byte Offset"},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
+       {ACPI_EXD_NODE, ACPI_EXD_OFFSET(common_field.node), "Parent Node"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_node[5] = {
+static struct acpi_exdump_info acpi_ex_dump_node[7] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_node), NULL},
        {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(flags), "Flags"},
        {ACPI_EXD_UINT8, ACPI_EXD_NSOFFSET(owner_id), "Owner Id"},
-       {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(child), "Child List"},
-       {ACPI_EXD_POINTER, ACPI_EXD_NSOFFSET(peer), "Next Peer"}
+       {ACPI_EXD_LIST, ACPI_EXD_NSOFFSET(object), "Object List"},
+       {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(parent), "Parent"},
+       {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(child), "Child"},
+       {ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(peer), "Peer"}
 };
 
 /* Dispatch table, indexed by object type */
@@ -315,7 +337,9 @@ static struct acpi_exdump_info *acpi_ex_dump_info[] = {
        acpi_ex_dump_address_handler,
        NULL,
        NULL,
-       NULL
+       NULL,
+       acpi_ex_dump_extra,
+       acpi_ex_dump_data
 };
 
 /*******************************************************************************
@@ -340,6 +364,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
        char *name;
        const char *reference_name;
        u8 count;
+       union acpi_operand_object *start;
+       union acpi_operand_object *data = NULL;
+       union acpi_operand_object *next;
+       struct acpi_namespace_node *node;
 
        if (!info) {
                acpi_os_printf
@@ -363,9 +391,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
 
                case ACPI_EXD_TYPE:
 
-                       acpi_ex_out_string("Type",
-                                          acpi_ut_get_object_type_name
-                                          (obj_desc));
+                       acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+                                      obj_desc->common.type,
+                                      acpi_ut_get_object_type_name(obj_desc));
                        break;
 
                case ACPI_EXD_UINT8:
@@ -433,6 +461,121 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
                        acpi_ex_dump_reference_obj(obj_desc);
                        break;
 
+               case ACPI_EXD_LIST:
+
+                       start = *ACPI_CAST_PTR(void *, target);
+                       next = start;
+
+                       acpi_os_printf("%20s : %p", name, next);
+                       if (next) {
+                               acpi_os_printf("(%s %2.2X)",
+                                              acpi_ut_get_object_type_name
+                                              (next), next->common.type);
+
+                               while (next->common.next_object) {
+                                       if ((next->common.type ==
+                                            ACPI_TYPE_LOCAL_DATA) && !data) {
+                                               data = next;
+                                       }
+
+                                       next = next->common.next_object;
+                                       acpi_os_printf("->%p(%s %2.2X)", next,
+                                                      acpi_ut_get_object_type_name
+                                                      (next),
+                                                      next->common.type);
+
+                                       if ((next == start) || (next == data)) {
+                                               acpi_os_printf
+                                                   ("\n**** Error: Object list appears to be circular linked");
+                                               break;
+                                       }
+                               }
+                       }
+
+                       acpi_os_printf("\n", next);
+                       break;
+
+               case ACPI_EXD_HDLR_LIST:
+
+                       start = *ACPI_CAST_PTR(void *, target);
+                       next = start;
+
+                       acpi_os_printf("%20s : %p", name, next);
+                       if (next) {
+                               acpi_os_printf("(%s %2.2X)",
+                                              acpi_ut_get_object_type_name
+                                              (next), next->common.type);
+
+                               while (next->address_space.next) {
+                                       if ((next->common.type ==
+                                            ACPI_TYPE_LOCAL_DATA) && !data) {
+                                               data = next;
+                                       }
+
+                                       next = next->address_space.next;
+                                       acpi_os_printf("->%p(%s %2.2X)", next,
+                                                      acpi_ut_get_object_type_name
+                                                      (next),
+                                                      next->common.type);
+
+                                       if ((next == start) || (next == data)) {
+                                               acpi_os_printf
+                                                   ("\n**** Error: Handler list appears to be circular linked");
+                                               break;
+                                       }
+                               }
+                       }
+
+                       acpi_os_printf("\n", next);
+                       break;
+
+               case ACPI_EXD_RGN_LIST:
+
+                       start = *ACPI_CAST_PTR(void *, target);
+                       next = start;
+
+                       acpi_os_printf("%20s : %p", name, next);
+                       if (next) {
+                               acpi_os_printf("(%s %2.2X)",
+                                              acpi_ut_get_object_type_name
+                                              (next), next->common.type);
+
+                               while (next->region.next) {
+                                       if ((next->common.type ==
+                                            ACPI_TYPE_LOCAL_DATA) && !data) {
+                                               data = next;
+                                       }
+
+                                       next = next->region.next;
+                                       acpi_os_printf("->%p(%s %2.2X)", next,
+                                                      acpi_ut_get_object_type_name
+                                                      (next),
+                                                      next->common.type);
+
+                                       if ((next == start) || (next == data)) {
+                                               acpi_os_printf
+                                                   ("\n**** Error: Region list appears to be circular linked");
+                                               break;
+                                       }
+                               }
+                       }
+
+                       acpi_os_printf("\n", next);
+                       break;
+
+               case ACPI_EXD_NODE:
+
+                       node =
+                           *ACPI_CAST_PTR(struct acpi_namespace_node *,
+                                          target);
+
+                       acpi_os_printf("%20s : %p", name, node);
+                       if (node) {
+                               acpi_os_printf(" [%4.4s]", node->name.ascii);
+                       }
+                       acpi_os_printf("\n");
+                       break;
+
                default:
 
                        acpi_os_printf("**** Invalid table opcode [%X] ****\n",
@@ -821,10 +964,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
        }
 
        acpi_os_printf("%20s : %4.4s\n", "Name", acpi_ut_get_node_name(node));
-       acpi_ex_out_string("Type", acpi_ut_get_type_name(node->type));
-       acpi_ex_out_pointer("Attached Object",
-                           acpi_ns_get_attached_object(node));
-       acpi_ex_out_pointer("Parent", node->parent);
+       acpi_os_printf("%20s : %2.2X [%s]\n", "Type",
+                      node->type, acpi_ut_get_type_name(node->type));
 
        acpi_ex_dump_object(ACPI_CAST_PTR(union acpi_operand_object, node),
                            acpi_ex_dump_node);
@@ -1017,22 +1158,26 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
                               ((struct acpi_namespace_node *)obj_desc)->
                               object);
 
-               acpi_ex_dump_object_descriptor(((struct acpi_namespace_node *)
-                                               obj_desc)->object, flags);
-               return_VOID;
+               obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+               goto dump_object;
        }
 
        if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) {
-               acpi_os_printf
-                   ("ExDumpObjectDescriptor: %p is not an ACPI operand object: [%s]\n",
-                    obj_desc, acpi_ut_get_descriptor_name(obj_desc));
+               acpi_os_printf("%p is not an ACPI operand object: [%s]\n",
+                              obj_desc, acpi_ut_get_descriptor_name(obj_desc));
                return_VOID;
        }
 
-       if (obj_desc->common.type > ACPI_TYPE_NS_NODE_MAX) {
+       /* Validate the object type */
+
+       if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+               acpi_os_printf("Not a known object type: %2.2X\n",
+                              obj_desc->common.type);
                return_VOID;
        }
 
+dump_object:
+
        /* Common Fields */
 
        acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
@@ -1040,6 +1185,22 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
        /* Object-specific fields */
 
        acpi_ex_dump_object(obj_desc, acpi_ex_dump_info[obj_desc->common.type]);
+
+       if (obj_desc->common.type == ACPI_TYPE_REGION) {
+               obj_desc = obj_desc->common.next_object;
+               if (obj_desc->common.type > ACPI_TYPE_LOCAL_MAX) {
+                       acpi_os_printf
+                           ("Secondary object is not a known object type: %2.2X\n",
+                            obj_desc->common.type);
+
+                       return_VOID;
+               }
+
+               acpi_os_printf("\nExtra attached Object (%p):\n", obj_desc);
+               acpi_ex_dump_object(obj_desc,
+                                   acpi_ex_dump_info[obj_desc->common.type]);
+       }
+
        return_VOID;
 }
 
index cfd875243421473d23ac1f9e0350488041cfc617..68d97441432cca3140d2151a50af3384b034464a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 49fb742d61b98b1f18484f6f1783cba553447555..1d1b27a96c5bb1c35cf5cacb58fc8cf26746d47e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 65d93607f3681d845a87a4f721cdddb8728df660..2207e624f5388e110c80e9f744352bc7a928c8d7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7be0205ad06797a1037977c6757a879f28bb1810..b49ea2a95f4f99f5afde46ee9a1394d16078d675 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 14689dec496095ae2735c197a5ff749aa823f212..dbb03b544e8c46a23637e2283fc808f523342a84 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d74cea416ca05beaa9af14e9d0af3a1fd0aa3c89..1b8e941044079214882a75c7ad7a18a564ef9643 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d6fa0fce1fc91e74b197490b0f1f180d386b9545..2ede656ee26a5837912440f5ba8cc27896428ca2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index bc042adf880496ffce63da79852b574e1a97c9f2..363767cf01e5af3c09e4401fc78166ae4aa78ece 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4459e32c683d3c9e78bdf6bf807048b05e52a4a4..29e9e99f7fe3010b17f4b4ef0e991d72a6d2f39a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5a588611ab484f12e33966fcbdf4ce2a03ffdc4e..ee3f872870bc77d0d6d599037fbbb7a735982581 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9d28867e60dc8b2c907e8cc76c92d10d95caa68e..cd5288a257a91fe9e6b5b80c55383151fef6cf6f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7ca6925a87cad08adea5abad1d1ec43dcf974d49..ab060261b43e66e9ab3c69b20ee0f88c169bf092 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1606524312e3a4f4219d47f7513390abc9986f75..3cde553bcbe182d06e277d26fbd188d4f9fd7237 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index be3f66973ee809637c30ada74e2ca706e0185795..3af8de3fcea43824a635112871850c0c0d5e1f94 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f0b09bf9887d605ee78e03a7a9aa2c84f2a4c98c..daf49f7ea311385a1342327ef732015f3a384f13 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 20d809d90c5b8f113e7b0f3c56fcafa58893dda2..04bd16c08f9e5fc08cf330475c44660d19b02ab3 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 26e371073b1aa881b1bac40a0b6579da0f5a2c12..fd11018b01687feb839b9253a460395500cca8de 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6578dee2e51b276f33154cdc4b2fa868f94245d5..841caed11c08a4d8a018c7562c6d40cfca6cdb47 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 99dc7b287d555d4baf37f4422649b3281b243b87..5b16c5484bee2555b49c5cfdb54b2b0dceede93b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3d36df828f520b1c196783453905c3f05f34543c..1e66d960fc118fcb3cdcc8faf68660fede2e6c6d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 414076818d40ee234d05970e5675023d1bff5b20..858fdd6be5982ee205c0bbff92f1d71d95dacf15 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 96540506058fcc94351e2980feab2adf990d9ad6..2e6caabba07a1852b766164231da39527a5e5f44 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0889a629505f857582c4f02bd65a8256cf3fe87e..e701d8c33dbfb376b6683b4357a4a268f41f659a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 12e6cff54f7885558d898c35c0a7375282139c5c..e0fd9b4978cd6a22af964a3165d26cfd62ea032f 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e3828cc4361bfa46ed25d9b4374545758542b22c..d590693eb54ecb3701962617d2f8b36f82105c73 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3c498dc1636ecd27cb439de4e0f72a44c9bb5ec6..76ab5c1a814eb3d1696dd298fb940ffe1c4e04a8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index eab70d58852a674fadb1881df67fbc0ad9717aa1..6b919127cd9dcf650ba0bf52faa7c8f95a201aa0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b4b47db2dee271a82ec4f3f6537f194c7c982ef8..96d007df65ec15d537db26319d472938d988ff1e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 15dddc10fc9ba5dc1cfd78b5f962ab1c21a45e1f..6921c7f3d208f290ed0a1db3a0ce72181bf6537f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 14f65f6345b9a2de465cbe06d6047dc40f2c9576..f1249e3463bedc78122250d4f63a5e531588ef93 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fd1ff54cda1911a1e520440afd8a48125d927069..607eb9e5150de6261a849b285e58dc9fdffc636b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 74b24c82707e733fe944029382f13f3c4d1b4e50..80fcfc8c9c1b79039dae19cf05ee5974a4a9ceed 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index acd2964c26906f995b159bd382a42ce000cfa2c3..b55642c4ee585c5fe5b61d25cbd0fd7f4ddbcbc4 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 48b9c6f1264351b27b7232af13a810c98553eec1..3d88ef4a3e0d2afd93cd9b575adb9d7e49da9cd4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 283762511b73abcbb87e0a7aa6c9f21ba3f88dcd..42d37109aa5d0fdbb18bc0f72ddb9ffe50e364a1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 963ceef063f8c0de739da154b3330dd95f809895..e634a05974db6a52d4e6bfc2e25996703074c4a8 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3a0423af968cb7e0e57313d91c4dadcadb1cfaac..5b74677bf74dddfb48d160f390b0aada33eea458 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 89ec645e7730ac6765c8013d82d08c51947da550..7ae521ce8d3f3c09d0f58f95cf4bf52a59d4c91f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 90a0380fb8a04b51fd508817247531303825d18a..7eee0a6f02f60d28b96754338ad47120f272fe14 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7a736f4d1fd8184953a023ae70cbc95c56f27024..fe54a8c73b8c8f1618c12badbe62d07ec73c7ea7 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -222,13 +222,19 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
                }
        }
 
-       /* Clear the entry in all cases */
+       /* Clear the Node entry in all cases */
 
        node->object = NULL;
        if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_OPERAND) {
+
+               /* Unlink object from front of possible object list */
+
                node->object = obj_desc->common.next_object;
+
+               /* Handle possible 2-descriptor object */
+
                if (node->object &&
-                   ((node->object)->common.type != ACPI_TYPE_LOCAL_DATA)) {
+                   (node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
                        node->object = node->object->common.next_object;
                }
        }
index 177857340271c4775a26e772df6e29d2c1e8d5ae..e83cff31754bba937e292433e796922eecbfeb45 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d2855d9857c48e84a0149eaccbd5208dad2a6536..392910ffbed9ac92e16953b76ad42768a4276846 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3d5391f9bcb5561ffa888407271dd64d2fcb026c..68f725839eb6d61bf129664922b1b22d7bdb0d05 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -132,12 +132,12 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
         * Decode the type of the expected package contents
         *
         * PTYPE1 packages contain no subpackages
-        * PTYPE2 packages contain sub-packages
+        * PTYPE2 packages contain subpackages
         */
        switch (package->ret_info.type) {
        case ACPI_PTYPE1_FIXED:
                /*
-                * The package count is fixed and there are no sub-packages
+                * The package count is fixed and there are no subpackages
                 *
                 * If package is too small, exit.
                 * If package is larger than expected, issue warning but continue
@@ -169,7 +169,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
 
        case ACPI_PTYPE1_VAR:
                /*
-                * The package count is variable, there are no sub-packages, and all
+                * The package count is variable, there are no subpackages, and all
                 * elements must be of the same type
                 */
                for (i = 0; i < count; i++) {
@@ -185,7 +185,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
 
        case ACPI_PTYPE1_OPTION:
                /*
-                * The package count is variable, there are no sub-packages. There are
+                * The package count is variable, there are no subpackages. There are
                 * a fixed number of required elements, and a variable number of
                 * optional elements.
                 *
@@ -242,7 +242,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
                elements++;
                count--;
 
-               /* Examine the sub-packages */
+               /* Examine the subpackages */
 
                status =
                    acpi_ns_check_package_list(info, package, elements, count);
@@ -250,7 +250,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
 
        case ACPI_PTYPE2_PKG_COUNT:
 
-               /* First element is the (Integer) count of sub-packages to follow */
+               /* First element is the (Integer) count of subpackages to follow */
 
                status = acpi_ns_check_object_type(info, elements,
                                                   ACPI_RTYPE_INTEGER, 0);
@@ -270,7 +270,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
                count = expected_count;
                elements++;
 
-               /* Examine the sub-packages */
+               /* Examine the subpackages */
 
                status =
                    acpi_ns_check_package_list(info, package, elements, count);
@@ -283,9 +283,9 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
        case ACPI_PTYPE2_FIX_VAR:
                /*
                 * These types all return a single Package that consists of a
-                * variable number of sub-Packages.
+                * variable number of subpackages.
                 *
-                * First, ensure that the first element is a sub-Package. If not,
+                * First, ensure that the first element is a subpackage. If not,
                 * the BIOS may have incorrectly returned the object as a single
                 * package instead of a Package of Packages (a common error if
                 * there is only one entry). We may be able to repair this by
@@ -310,7 +310,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
                        count = 1;
                }
 
-               /* Examine the sub-packages */
+               /* Examine the subpackages */
 
                status =
                    acpi_ns_check_package_list(info, package, elements, count);
@@ -370,9 +370,9 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
        u32 j;
 
        /*
-        * Validate each sub-Package in the parent Package
+        * Validate each subpackage in the parent Package
         *
-        * NOTE: assumes list of sub-packages contains no NULL elements.
+        * NOTE: assumes list of subpackages contains no NULL elements.
         * Any NULL elements should have been removed by earlier call
         * to acpi_ns_remove_null_elements.
         */
@@ -389,7 +389,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
                        return (status);
                }
 
-               /* Examine the different types of expected sub-packages */
+               /* Examine the different types of expected subpackages */
 
                info->parent_package = sub_package;
                switch (package->ret_info.type) {
@@ -450,14 +450,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
 
                case ACPI_PTYPE2_FIXED:
 
-                       /* Each sub-package has a fixed length */
+                       /* Each subpackage has a fixed length */
 
                        expected_count = package->ret_info2.count;
                        if (sub_package->package.count < expected_count) {
                                goto package_too_small;
                        }
 
-                       /* Check the type of each sub-package element */
+                       /* Check the type of each subpackage element */
 
                        for (j = 0; j < expected_count; j++) {
                                status =
@@ -475,14 +475,14 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
 
                case ACPI_PTYPE2_MIN:
 
-                       /* Each sub-package has a variable but minimum length */
+                       /* Each subpackage has a variable but minimum length */
 
                        expected_count = package->ret_info.count1;
                        if (sub_package->package.count < expected_count) {
                                goto package_too_small;
                        }
 
-                       /* Check the type of each sub-package element */
+                       /* Check the type of each subpackage element */
 
                        status =
                            acpi_ns_check_package_elements(info, sub_elements,
@@ -531,7 +531,7 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
                                (*sub_elements)->integer.value = expected_count;
                        }
 
-                       /* Check the type of each sub-package element */
+                       /* Check the type of each subpackage element */
 
                        status =
                            acpi_ns_check_package_elements(info,
@@ -557,10 +557,10 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
 
 package_too_small:
 
-       /* The sub-package count was smaller than required */
+       /* The subpackage count was smaller than required */
 
        ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname, info->node_flags,
-                             "Return Sub-Package[%u] is too small - found %u elements, expected %u",
+                             "Return SubPackage[%u] is too small - found %u elements, expected %u",
                              i, sub_package->package.count, expected_count));
 
        return (AE_AML_OPERAND_VALUE);
index a05afff50eb9cc6e9e515ef745fb8b0ae8fd7779..7e417aa5c91e2784cec2187e04efa31d103ce1c0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -207,13 +207,30 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
         * this predefined name. Either one return value is expected, or none,
         * for both methods and other objects.
         *
-        * Exit now if there is no return object. Warning if one was expected.
+        * Try to fix if there was no return object. Warning if failed to fix.
         */
        if (!return_object) {
                if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
-                       ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
-                                             ACPI_WARN_ALWAYS,
-                                             "Missing expected return value"));
+                       if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+                               ACPI_WARN_PREDEFINED((AE_INFO,
+                                                     info->full_pathname,
+                                                     ACPI_WARN_ALWAYS,
+                                                     "Found unexpected NULL package element"));
+
+                               status =
+                                   acpi_ns_repair_null_element(info,
+                                                               expected_btypes,
+                                                               package_index,
+                                                               return_object_ptr);
+                               if (ACPI_SUCCESS(status)) {
+                                       return (AE_OK); /* Repair was successful */
+                               }
+                       } else {
+                               ACPI_WARN_PREDEFINED((AE_INFO,
+                                                     info->full_pathname,
+                                                     ACPI_WARN_ALWAYS,
+                                                     "Missing expected return value"));
+                       }
 
                        return (AE_AML_NO_RETURN_VALUE);
                }
@@ -448,7 +465,7 @@ acpi_ns_repair_null_element(struct acpi_evaluate_info * info,
  * RETURN:      None.
  *
  * DESCRIPTION: Remove all NULL package elements from packages that contain
- *              a variable number of sub-packages. For these types of
+ *              a variable number of subpackages. For these types of
  *              packages, NULL elements can be safely removed.
  *
  *****************************************************************************/
@@ -469,7 +486,7 @@ acpi_ns_remove_null_elements(struct acpi_evaluate_info *info,
        /*
         * We can safely remove all NULL elements from these package types:
         * PTYPE1_VAR packages contain a variable number of simple data types.
-        * PTYPE2 packages contain a variable number of sub-packages.
+        * PTYPE2 packages contain a variable number of subpackages.
         */
        switch (package_type) {
        case ACPI_PTYPE1_VAR:
index 6a25d320b169ea9671385a0c825ecf9dadd290d9..b09e6bef72b88471352fede319e0c690591efd97 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -432,8 +432,8 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
  * DESCRIPTION: Repair for the _CST object:
  *              1. Sort the list ascending by C state type
  *              2. Ensure type cannot be zero
- *              3. A sub-package count of zero means _CST is meaningless
- *              4. Count must match the number of C state sub-packages
+ *              3. A subpackage count of zero means _CST is meaningless
+ *              4. Count must match the number of C state subpackages
  *
  *****************************************************************************/
 
@@ -611,6 +611,7 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
        union acpi_operand_object **top_object_list;
        union acpi_operand_object **sub_object_list;
        union acpi_operand_object *obj_desc;
+       union acpi_operand_object *sub_package;
        u32 element_count;
        u32 index;
 
@@ -619,8 +620,17 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
        top_object_list = package_object->package.elements;
        element_count = package_object->package.count;
 
-       for (index = 0; index < element_count; index++) {
-               sub_object_list = (*top_object_list)->package.elements;
+       /* Examine each subpackage */
+
+       for (index = 0; index < element_count; index++, top_object_list++) {
+               sub_package = *top_object_list;
+               sub_object_list = sub_package->package.elements;
+
+               /* Check for minimum required element count */
+
+               if (sub_package->package.count < 4) {
+                       continue;
+               }
 
                /*
                 * If the BIOS has erroneously reversed the _PRT source_name (index 2)
@@ -634,15 +644,12 @@ acpi_ns_repair_PRT(struct acpi_evaluate_info *info,
                        sub_object_list[2] = obj_desc;
                        info->return_flags |= ACPI_OBJECT_REPAIRED;
 
-                       ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+                       ACPI_WARN_PREDEFINED((AE_INFO,
+                                             info->full_pathname,
                                              info->node_flags,
                                              "PRT[%X]: Fixed reversed SourceName and SourceIndex",
                                              index));
                }
-
-               /* Point to the next union acpi_operand_object in the top level package */
-
-               top_object_list++;
        }
 
        return (AE_OK);
@@ -679,7 +686,7 @@ acpi_ns_repair_PSS(struct acpi_evaluate_info *info,
        u32 i;
 
        /*
-        * Entries (sub-packages) in the _PSS Package must be sorted by power
+        * Entries (subpackages) in the _PSS Package must be sorted by power
         * dissipation, in descending order. If it appears that the list is
         * incorrectly sorted, sort it. We sort by cpu_frequency, since this
         * should be proportional to the power.
@@ -767,9 +774,9 @@ acpi_ns_repair_TSS(struct acpi_evaluate_info *info,
  *
  * PARAMETERS:  info                - Method execution information block
  *              return_object       - Pointer to the top-level returned object
- *              start_index         - Index of the first sub-package
- *              expected_count      - Minimum length of each sub-package
- *              sort_index          - Sub-package entry to sort on
+ *              start_index         - Index of the first subpackage
+ *              expected_count      - Minimum length of each subpackage
+ *              sort_index          - Subpackage entry to sort on
  *              sort_direction      - Ascending or descending
  *              sort_key_name       - Name of the sort_index field
  *
@@ -805,7 +812,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
        }
 
        /*
-        * NOTE: assumes list of sub-packages contains no NULL elements.
+        * NOTE: assumes list of subpackages contains no NULL elements.
         * Any NULL elements should have been removed by earlier call
         * to acpi_ns_remove_null_elements.
         */
@@ -832,7 +839,7 @@ acpi_ns_check_sorted_list(struct acpi_evaluate_info *info,
                        return (AE_AML_OPERAND_TYPE);
                }
 
-               /* Each sub-package must have the minimum length */
+               /* Each subpackage must have the minimum length */
 
                if ((*outer_elements)->package.count < expected_count) {
                        return (AE_AML_PACKAGE_LIMIT);
index 47420faef073b4dada9fa8dc26252639c3213849..af1cc42a8aa183bbae45f2b3794e1acfec888f51 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4a0665b6bcc11c6c6eb8aad3ee62be15b6122694..4a5e3f5c0ff78afede734885a02da7da73f25d44 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e81f15ef659a5c85dbf19e020b20a7ae75523dcd..4758a1f2ce22abb098193bf59495eb75324efdf8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1f0c28ba50df353de078c6ae47f70cbee05f025f..4bd558bf10d226efa496b9974c08395530aae9f3 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -923,19 +923,22 @@ ACPI_EXPORT_SYMBOL(acpi_detach_data)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_get_data
+ * FUNCTION:    acpi_get_data_full
  *
  * PARAMETERS:  obj_handle          - Namespace node
  *              handler             - Handler used in call to attach_data
  *              data                - Where the data is returned
+ *              callback            - function to execute before returning
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node
+ *              and execute a callback before returning.
  *
  ******************************************************************************/
 acpi_status
-acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+acpi_get_data_full(acpi_handle obj_handle, acpi_object_handler handler,
+                  void **data, void (*callback)(void *))
 {
        struct acpi_namespace_node *node;
        acpi_status status;
@@ -960,10 +963,34 @@ acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
        }
 
        status = acpi_ns_get_attached_data(node, handler, data);
+       if (ACPI_SUCCESS(status) && callback) {
+               callback(*data);
+       }
 
 unlock_and_exit:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
        return (status);
 }
 
+ACPI_EXPORT_SYMBOL(acpi_get_data_full)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_data
+ *
+ * PARAMETERS:  obj_handle          - Namespace node
+ *              handler             - Handler used in call to attach_data
+ *              data                - Where the data is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Retrieve data that was previously attached to a namespace node.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_data(acpi_handle obj_handle, acpi_object_handler handler, void **data)
+{
+       return acpi_get_data_full(obj_handle, handler, data, NULL);
+}
+
 ACPI_EXPORT_SYMBOL(acpi_get_data)
index 3a4bd3ff49a365827f2bc70dff2250c40082ea19..8c6c11ce9760b889dca1c9442cade74c7ae90054 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0e6d79e462d4f8c7233dc77ad37ad6f893faaea9..dae9401be7a2d6cef946f4d3ca3fab173165cea6 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 91a5a69db80c316552e13f9704eb9435f9b94a30..314d314340ae8a9151f254ba7b65715baeaf5bdb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 065b44ae538f28b7595fe9af1870b5c824052237..646d1a3f6e27b7e5695aaec30746ba4bd4c9aa53 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 95dc608a66a85b3b01e5a461b58a39f4a02b20f7..af1f46cd37a5ad3620a165351eac2f061da104e0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1b659e59710ab525d85cbd6674c900b3da746f36..1755d2ac5656ae5c975e73b04e912741cef8fdf4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b0c9787dbe6194971f62f39e7c419ee1c389226e..0d8d37ffd04d34d1a8a638d05f511a52f6047969 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 79d9a28dedefe4fd6c69fdae5291a3c51106e5e6..6d27b597394e907055417402cb7f35bab3ed63d1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6a4b6fb39f32e197767a46aeef54711ad9df2bfc..32d250feea214acbef13cc8afab6af017bc47d0b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 877dc0de8df3e19da7057a8fd99042eb2d6f80b6..0b64181e772090fec84aeb2494781459a43fedeb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 91fa73a6e55e7245ff9147b769a0a32f3c509864..3cd48802eede240750a45a3e4a57c3475fc58e99 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index abd65624754f20175721dea4e570e7fa0a2d887c..9cb07e1e76d9dba5a0b4403350e436606c48bcf4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fcb7a840e996bdee6a3db496a296e44edd54d6f0..e135acaa5e1c2cb89ae522d2b8f0b9c4d53f4851 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f3a9276ac665035adefb27e949fa842a6a629802..916fd095ff342b53a11b0c57d16110bd5f108889 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b60c9cf82862f9e94dfb254b6a30b003e2dd5426..689556744b034d82ac43c72bfcd70b4f515fbe83 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -636,7 +636,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
 
        for (index = 0; index < number_of_elements; index++) {
 
-               /* Dereference the sub-package */
+               /* Dereference the subpackage */
 
                package_element = *top_object_list;
 
index 3a2ace93e62cf5d11690a4a4a907ae5539007f9e..75d3690506570d824d4d25d1280c48bdbbe6731f 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -273,7 +273,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                 */
                user_prt->length = (sizeof(struct acpi_pci_routing_table) - 4);
 
-               /* Each sub-package must be of length 4 */
+               /* Each subpackage must be of length 4 */
 
                if ((*top_object_list)->package.count != 4) {
                        ACPI_ERROR((AE_INFO,
@@ -283,7 +283,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                }
 
                /*
-                * Dereference the sub-package.
+                * Dereference the subpackage.
                 * The sub_object_list will now point to an array of the four IRQ
                 * elements: [Address, Pin, Source, source_index]
                 */
@@ -292,7 +292,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                /* 1) First subobject: Dereference the PRT.Address */
 
                obj_desc = sub_object_list[0];
-               if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+               if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
                        ACPI_ERROR((AE_INFO,
                                    "(PRT[%u].Address) Need Integer, found %s",
                                    index,
@@ -305,7 +305,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                /* 2) Second subobject: Dereference the PRT.Pin */
 
                obj_desc = sub_object_list[1];
-               if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+               if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
                        ACPI_ERROR((AE_INFO,
                                    "(PRT[%u].Pin) Need Integer, found %s",
                                    index,
@@ -394,7 +394,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
                /* 4) Fourth subobject: Dereference the PRT.source_index */
 
                obj_desc = sub_object_list[3];
-               if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
+               if (!obj_desc || obj_desc->common.type != ACPI_TYPE_INTEGER) {
                        ACPI_ERROR((AE_INFO,
                                    "(PRT[%u].SourceIndex) Need Integer, found %s",
                                    index,
index 8a2d4986b0aa576695ed2f479703cfa8dc4fd139..c3c56b5a9788319cec129216882bd4d9cf8faebc 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
 
 #define _COMPONENT          ACPI_RESOURCES
 ACPI_MODULE_NAME("rsdump")
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
 /* Local prototypes */
 static void acpi_rs_out_string(char *title, char *value);
 
index 46192bd5365335e97120269519c097fd6c2aa7be..2f9332d5c973047c2e9362bfd921a1d2aafb9514 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,7 +48,7 @@
 #define _COMPONENT          ACPI_RESOURCES
 ACPI_MODULE_NAME("rsdumpinfo")
 
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
 #define ACPI_RSD_OFFSET(f)          (u8) ACPI_OFFSET (union acpi_resource_data,f)
 #define ACPI_PRT_OFFSET(f)          (u8) ACPI_OFFSET (struct acpi_pci_routing_table,f)
 #define ACPI_RSD_TABLE_SIZE(name)   (sizeof(name) / sizeof (struct acpi_rsdump_info))
index 41fed78e0de62f84f0d30e74d1560643e28d872a..9d3f8a9a24bd1819e8b5ea932df087550aa12f55 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -132,8 +132,7 @@ struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
        acpi_rs_convert_uart_serial_bus,
 };
 
-#ifdef ACPI_FUTURE_USAGE
-#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
 
 /* Dispatch table for resource dump functions */
 
@@ -168,7 +167,6 @@ struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
 };
 #endif
 
-#endif                         /* ACPI_FUTURE_USAGE */
 /*
  * Base sizes for external AML resource descriptors, indexed by internal type.
  * Includes size of the descriptor header (1 byte for small descriptors,
index ca183755a6f9bb3923065906fc00b2d8b82b8a1f..19d64873290ab63a1bb209bb26c3eefab767d103 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 364decc1028ac39aa20030a392693eea66ed291b..3461f7db26dfb561803f1b24f4205b8d93690efe 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6053aa182093e75b070ecd89bd318348d6f2b62f..77291293af64c5bf6f8ed509790a98088aad6035 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ebc773a1b350b4386cb3896f464efe957e9a379d..eab4483ff5f8e5b44b7f5c2f22b0275925ea7cd1 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c99cec9cefdec4ee20305ed6e5ff578b60960f58..41eea4bc089c55c1858467284ff9f43011a9b178 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fe49fc43e10f54b726debec50b6f4c54c4cd24a9..9e8407223d9575ca5dc3d63927c235cf328dd40a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 14a7982c9961088ae50c0283bfefb4b4b5ccb488..897a5ceb042009682b2ce109614a695c2f7531f2 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 01e476988aaee38f7529e71d091310cd1e7afae5..877ab9202133fd76c65a003daf7592e2486cfb38 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8f89263ac47e28cfecce4304a16b79406ba5c71f..ec14588254d433e63e1b17352f16f3702b9a09f6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e4f4f02d49e7cf58f4acc9db842d741bb46a9301..c12003947bd53c09ed4c63603f93736b19f911e3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 634357d51fe9b4a30103f5cb1169a97d1da72b7f..e3040947e9a00fff25b9cedc13813d27d8ebaf59 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -292,10 +292,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
                new_table = acpi_os_map_memory(new_address, new_table_length);
                if (!new_table) {
                        ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
-                                       "%4.4s %p Attempted physical table override failed",
+                                       "%4.4s " ACPI_PRINTF_UINT
+                                       " Attempted physical table override failed",
                                        table_header->signature,
-                                       ACPI_CAST_PTR(void,
-                                                     table_desc->address)));
+                                       ACPI_FORMAT_TO_UINT(table_desc->
+                                                           address)));
                        return (NULL);
                }
 
@@ -308,11 +309,11 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header
 
 finish_override:
 
-       ACPI_INFO((AE_INFO,
-                  "%4.4s %p %s table override, new table: %p",
+       ACPI_INFO((AE_INFO, "%4.4s " ACPI_PRINTF_UINT
+                  " %s table override, new table: " ACPI_PRINTF_UINT,
                   table_header->signature,
-                  ACPI_CAST_PTR(void, table_desc->address),
-                  override_type, new_table));
+                  ACPI_FORMAT_TO_UINT(table_desc->address),
+                  override_type, ACPI_FORMAT_TO_UINT(new_table)));
 
        /* We can now unmap/delete the original table (if fully mapped) */
 
index 6866e767ba90947359ca432bcba8643a92c0069d..df3bb20ea3255c9cf745e8ad8e84f4eaae0292f4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -128,15 +128,17 @@ acpi_tb_print_table_header(acpi_physical_address address,
        struct acpi_table_header local_header;
 
        /*
-        * The reason that the Address is cast to a void pointer is so that we
-        * can use %p which will work properly on both 32-bit and 64-bit hosts.
+        * The reason that we use ACPI_PRINTF_UINT and ACPI_FORMAT_TO_UINT is to
+        * support both 32-bit and 64-bit hosts/addresses in a consistent manner.
+        * The %p specifier does not emit uniform output on all hosts. On some,
+        * leading zeros are not supported.
         */
        if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
 
                /* FACS only has signature and length fields */
 
-               ACPI_INFO((AE_INFO, "%4.4s %p %06X",
-                          header->signature, ACPI_CAST_PTR(void, address),
+               ACPI_INFO((AE_INFO, "%-4.4s " ACPI_PRINTF_UINT " %06X",
+                          header->signature, ACPI_FORMAT_TO_UINT(address),
                           header->length));
        } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
 
@@ -147,8 +149,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
                                          header)->oem_id, ACPI_OEM_ID_SIZE);
                acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
 
-               ACPI_INFO((AE_INFO, "RSDP %p %06X (v%.2d %6.6s)",
-                          ACPI_CAST_PTR(void, address),
+               ACPI_INFO((AE_INFO,
+                          "RSDP " ACPI_PRINTF_UINT " %06X (v%.2d %-6.6s)",
+                          ACPI_FORMAT_TO_UINT(address),
                           (ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
                            revision >
                            0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -162,8 +165,9 @@ acpi_tb_print_table_header(acpi_physical_address address,
                acpi_tb_cleanup_table_header(&local_header, header);
 
                ACPI_INFO((AE_INFO,
-                          "%4.4s %p %06X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
-                          local_header.signature, ACPI_CAST_PTR(void, address),
+                          "%-4.4s " ACPI_PRINTF_UINT
+                          " %06X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
+                          local_header.signature, ACPI_FORMAT_TO_UINT(address),
                           local_header.length, local_header.revision,
                           local_header.oem_id, local_header.oem_table_id,
                           local_header.oem_revision,
index 6412d3c301cb62d480b9c753a0dbbf2b7af612f3..a4702eee91a820d131960754c14da6aa62f50939 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index db826eaadd1c7300a58fde06d0d29df463ad97a8..a1593159d9ea4faeb3d0d61b8731974a1af05676 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 60b5a871833cce325393e516562d2574476d304b..0909420fc776510c1d4fef5278112391a5ad0f4d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e4e1468877c3a27f2db6899098765ccaa2f42eaa..65ab8fed3d5e504011328030cd192b663b1ed507 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2c2b6ae5dfc4ab1fb3dbd20adc670f7214a4a7ed..a1acec9d2ef36b20539ad32f9f0c37dcb79dcab3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1851762fc5b522371cbde01980956457ed91c37d..efac83c606dce04470b9b2c09487a882afff7565 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 11fde93be120af0291b3e60d049f90fe0e3502d3..3c16997406535dc2e85445af0fd1b60cd0db2610 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cacd2fd9e665592ac64241be9fc6f4afd2cdc100..78fde0aac487ee585d2da9dfe0294126ad545a65 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index edff4e653d9a06f3b9312c6512ca2a937c9ab0e6..270c16464dd948181f98755f8e1f13809c9acf13 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -535,10 +535,10 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
 
        case ACPI_TYPE_LOCAL_REFERENCE:
 
-               /* TBD: should validate incoming handle */
+               /* An incoming reference is defined to be a namespace node */
 
-               internal_object->reference.class = ACPI_REFCLASS_NAME;
-               internal_object->reference.node =
+               internal_object->reference.class = ACPI_REFCLASS_REFOF;
+               internal_object->reference.object =
                    external_object->reference.handle;
                break;
 
index d971c8631263b3dd8f0b9ec05269122b78c24391..21a20ac5b1e1aec4f8654b03caab01ba26466c8a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b3f31dd89a45d90f66ab25a617960f56505501d4..fbfa9eca011f8457ba0fecd5e1bed00b69466022 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c07d2227ea42587937ecf7bf37d88e91a6f65a66..a3516de213fa5c1bfa5a45dd368c8c06d50aab19 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -75,6 +75,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
        union acpi_operand_object *handler_desc;
        union acpi_operand_object *second_desc;
        union acpi_operand_object *next_desc;
+       union acpi_operand_object *start_desc;
        union acpi_operand_object **last_obj_ptr;
 
        ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
@@ -235,10 +236,11 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                        if (handler_desc) {
                                next_desc =
                                    handler_desc->address_space.region_list;
+                               start_desc = next_desc;
                                last_obj_ptr =
                                    &handler_desc->address_space.region_list;
 
-                               /* Remove the region object from the handler's list */
+                               /* Remove the region object from the handler list */
 
                                while (next_desc) {
                                        if (next_desc == object) {
@@ -247,10 +249,19 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                                                break;
                                        }
 
-                                       /* Walk the linked list of handler */
+                                       /* Walk the linked list of handlers */
 
                                        last_obj_ptr = &next_desc->region.next;
                                        next_desc = next_desc->region.next;
+
+                                       /* Prevent infinite loop if list is corrupted */
+
+                                       if (next_desc == start_desc) {
+                                               ACPI_ERROR((AE_INFO,
+                                                           "Circular region list in address handler object %p",
+                                                           handler_desc));
+                                               return_VOID;
+                                       }
                                }
 
                                if (handler_desc->address_space.handler_flags &
index 154fdcaa5830dc61d9cbcda341dc5a1e94069a94..8e544d4688cd827c6a3b05ab734f81775ef7fd9d 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 16fb90506db72ea16d0e9bf81696bba1d9c4ccfa..8fed1482d228b2409e2a5beddd623f71d9e0de61 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3cf7b597edb9357e0e05864cacaa88d55942c609..0403dcaabaf20f49b9d0141c7b4762f6cb1b8bfc 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 030cb0dc673c881dadcb6f62980869e985c16d61..f3abeae9d2f87b69650e324cd66a2ea799cc1a93 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -55,31 +55,27 @@ ACPI_MODULE_NAME("utglobal")
  * Static global variable initialization.
  *
  ******************************************************************************/
-/*
- * We want the debug switches statically initialized so they
- * are already set when the debugger is entered.
- */
-/* Debug switch - level and trace mask */
+/* Debug output control masks */
 u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
 
-/* Debug switch - layer (component) mask */
-
 u32 acpi_dbg_layer = 0;
-u32 acpi_gbl_nesting_level = 0;
 
-/* Debugger globals */
+/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
 
-u8 acpi_gbl_db_terminate_threads = FALSE;
-u8 acpi_gbl_abort_method = FALSE;
-u8 acpi_gbl_method_executing = FALSE;
+struct acpi_table_fadt acpi_gbl_FADT;
+u32 acpi_gbl_trace_flags;
+acpi_name acpi_gbl_trace_method_name;
+u8 acpi_gbl_system_awake_and_running;
+u32 acpi_current_gpe_count;
 
-/* System flags */
-
-u32 acpi_gbl_startup_flags = 0;
-
-/* System starts uninitialized */
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
 
-u8 acpi_gbl_shutdown = TRUE;
+/* Various state name strings */
 
 const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT] = {
        "\\_S0_",
@@ -335,7 +331,6 @@ acpi_status acpi_ut_init_globals(void)
 
        acpi_gbl_DSDT = NULL;
        acpi_gbl_cm_single_step = FALSE;
-       acpi_gbl_db_terminate_threads = FALSE;
        acpi_gbl_shutdown = FALSE;
        acpi_gbl_ns_lookup_count = 0;
        acpi_gbl_ps_find_count = 0;
@@ -382,6 +377,10 @@ acpi_status acpi_ut_init_globals(void)
        acpi_gbl_disable_mem_tracking = FALSE;
 #endif
 
+#ifdef ACPI_DEBUGGER
+       acpi_gbl_db_terminate_threads = FALSE;
+#endif
+
        return_ACPI_STATUS(AE_OK);
 }
 
index bfca7b4b6731621016b11996d5a8158ecb0f825d..4b12880e5b11eec311eb4d2d554f5f2132c90088 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c5d1ac44c07dd2bf87bfdeec43fc719cce2a3c78..5f56fc49021ecf9f198baec7370e940441ecfc06 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5c26ad420344437dbc9b3294531ff82d91a2eec0..dc6e96547f1836c6d3d4b29af712adf27b52a46f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 909fe66e19349b207bf48527e41c0e07f3918cbb..d44dee6ee10a15b61cbd05573a79f0c3300243c2 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 02f9101b65e47c29dc974020a8701322c9dc3824..2e2bb14e1099c1315051aaccd96f42fe277ec1bc 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 08c32324558449001af0c8ad6374c4286f73d43f..82717fff9ffc34bff4161483808f39788ccdae67 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 517af700399da1e0c8f16c96baab96e7cad6299b..dfa9009bfc8704856242b15063fbc23971eb1181 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8856bd37bc763f2a94c157acb513aefa82342875..685766fc6ca8ae2dbbc4fbe648f08e4f7cd1fcb8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utosi")
 
+/******************************************************************************
+ *
+ * ACPICA policy for new _OSI strings:
+ *
+ * It is the stated policy of ACPICA that new _OSI strings will be integrated
+ * into this module as soon as possible after they are defined. It is strongly
+ * recommended that all ACPICA hosts mirror this policy and integrate any
+ * changes to this module as soon as possible. There are several historical
+ * reasons behind this policy:
+ *
+ * 1) New BIOSs tend to test only the case where the host responds TRUE to
+ *    the latest version of Windows, which would respond to the latest/newest
+ *    _OSI string. Not responding TRUE to the latest version of Windows will
+ *    risk executing untested code paths throughout the DSDT and SSDTs.
+ *
+ * 2) If a new _OSI string is recognized only after a significant delay, this
+ *    has the potential to cause problems on existing working machines because
+ *    of the possibility that a new and different path through the ASL code
+ *    will be executed.
+ *
+ * 3) New _OSI strings are tending to come out about once per year. A delay
+ *    in recognizing a new string for a significant amount of time risks the
+ *    release of another string which only compounds the initial problem.
+ *
+ *****************************************************************************/
 /*
  * Strings supported by the _OSI predefined control method (which is
  * implemented internally within this module.)
@@ -74,6 +99,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
        {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2},  /* Windows Vista SP2 - Added 09/2010 */
        {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7},      /* Windows 7 and Server 2008 R2 - Added 09/2009 */
        {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8},      /* Windows 8 and Server 2012 - Added 08/2012 */
+       {"Windows 2013", NULL, 0, ACPI_OSI_WIN_8},      /* Windows 8.1 and Server 2012 R2 - Added 01/2014 */
 
        /* Feature Group Strings */
 
index eb3aca761369c2ccb30d2d0c4465f107fa5da389..36bec57ebd23fb93b5b198ef59414ce7a6dac6bd 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2b1ce4cd32073fc23e6bcb7d6c5666d156ae6432..db30caff130ad2520f4dc1bd5c7ffb3a988b12bb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2c2accb9e53494fde7c60fe580a2cc267e99528c..14cb6c0c8be2b67df5a2693681c2028245b2363f 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,8 @@
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utresrc")
-#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined (ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
 /*
  * Strings used to decode resource descriptors.
  * Used by both the disassembler and the debugger resource dump routines
index 03c4c2febd84c54b19eaa0cf397cd5e03549449d..1cc97a752c15a320b276d68594f5244a1c22ad29 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 45c0eb26b33d5ade96f7d8d6d4f83fd7747ff88b..77219336c7e01f78c67003a1fc93e48d56e3197b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c0027773cccb250b4d40ea0f5c7621ec6b98ba98..7d0ee969d781310cf12fa122584fdd294539244a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,8 @@ acpi_ut_free_and_track(void *allocation,
        }
 
        acpi_os_free(debug_block);
-       ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed\n", allocation));
+       ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed (block %p)\n",
+                         allocation, debug_block));
        return_VOID;
 }
 
index be322c83643a832a66a3af591aa0c66b270f7db5..502a8492dc83720b41420895d2614da46a96e63f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f7edb88f60544b57f6a4097a93bd5d2f2e783a9a..edd861102f1bf07e8515b6af8621c921cee85283 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 246ef68681f4a4c866bcc28388cf3d383b819b32..13380d8184626e174cebba1950d27d0dbaa08274 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 312299721ba147ca0128517fbbbab407c416240f..2a0f9e04d3a4b128f71143d1e8788c6976388c16 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3650b21832279a91568266208059296fadfab94c..c4dac71509605985ae872a3f524f4dd51881eabc 100644 (file)
@@ -12,7 +12,7 @@ config ACPI_APEI
 
 config ACPI_APEI_GHES
        bool "APEI Generic Hardware Error Source"
-       depends on ACPI_APEI && X86
+       depends on ACPI_APEI
        select ACPI_HED
        select IRQ_WORK
        select GENERIC_ALLOCATOR
index 797a6938d0515edb7ce7e5ce3f4f1648c538e515..9a2c63b2005038476e5a5e77360fd836aad3f25f 100644 (file)
 #include <linux/acpi.h>
 #include <linux/power_supply.h>
 
+#include "battery.h"
+
 #define PREFIX "ACPI: "
 
 #define ACPI_BATTERY_VALUE_UNKNOWN 0xFFFFFFFF
 
-#define ACPI_BATTERY_CLASS             "battery"
 #define ACPI_BATTERY_DEVICE_NAME       "Battery"
-#define ACPI_BATTERY_NOTIFY_STATUS     0x80
-#define ACPI_BATTERY_NOTIFY_INFO       0x81
-#define ACPI_BATTERY_NOTIFY_THRESHOLD   0x82
 
 /* Battery power unit: 0 means mW, 1 means mA */
 #define ACPI_BATTERY_POWER_UNIT_MA     1
@@ -736,6 +734,7 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
        acpi_bus_generate_netlink_event(device->pnp.device_class,
                                        dev_name(&device->dev), event,
                                        acpi_battery_present(battery));
+       acpi_notifier_call_chain(device, event, acpi_battery_present(battery));
        /* acpi_battery_update could remove power_supply object */
        if (old && battery->bat.dev)
                power_supply_changed(&battery->bat);
diff --git a/drivers/acpi/battery.h b/drivers/acpi/battery.h
new file mode 100644 (file)
index 0000000..6c08497
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __ACPI_BATTERY_H
+#define __ACPI_BATTERY_H
+
+#define ACPI_BATTERY_CLASS "battery"
+
+#define ACPI_BATTERY_NOTIFY_STATUS     0x80
+#define ACPI_BATTERY_NOTIFY_INFO       0x81
+#define ACPI_BATTERY_NOTIFY_THRESHOLD   0x82
+
+#endif
index fcb59c21c68d5c53696a29749d88792f58bc4a64..e7e5844c87d0c8de87379ae7ea6eef8ad91cb79f 100644 (file)
@@ -311,9 +311,7 @@ static void acpi_bus_osc_support(void)
        capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
 #endif
 
-#ifdef ACPI_HOTPLUG_OST
        capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
-#endif
 
        if (!ghes_disable)
                capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
@@ -340,60 +338,77 @@ static void acpi_bus_osc_support(void)
  */
 static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
 {
-       struct acpi_device *device = NULL;
+       struct acpi_device *adev;
        struct acpi_driver *driver;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
-                         type, handle));
+       acpi_status status;
+       u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
 
        switch (type) {
-
        case ACPI_NOTIFY_BUS_CHECK:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
                break;
 
        case ACPI_NOTIFY_DEVICE_WAKE:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_WAKE event\n");
                break;
 
        case ACPI_NOTIFY_EJECT_REQUEST:
-               /* TBD */
+               acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK_LIGHT:
+               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK_LIGHT event\n");
                /* TBD: Exactly what does 'light' mean? */
                break;
 
        case ACPI_NOTIFY_FREQUENCY_MISMATCH:
-               /* TBD */
+               acpi_handle_err(handle, "Device cannot be configured due "
+                               "to a frequency mismatch\n");
                break;
 
        case ACPI_NOTIFY_BUS_MODE_MISMATCH:
-               /* TBD */
+               acpi_handle_err(handle, "Device cannot be configured due "
+                               "to a bus mode mismatch\n");
                break;
 
        case ACPI_NOTIFY_POWER_FAULT:
-               /* TBD */
+               acpi_handle_err(handle, "Device has suffered a power fault\n");
                break;
 
        default:
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Received unknown/unsupported notification [%08x]\n",
-                                 type));
-               break;
+               acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
+               ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
+               goto err;
        }
 
-       acpi_bus_get_device(handle, &device);
-       if (device) {
-               driver = device->driver;
-               if (driver && driver->ops.notify &&
-                   (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
-                       driver->ops.notify(device, type);
+       adev = acpi_bus_get_acpi_device(handle);
+       if (!adev)
+               goto err;
+
+       driver = adev->driver;
+       if (driver && driver->ops.notify &&
+           (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS))
+               driver->ops.notify(adev, type);
+
+       switch (type) {
+       case ACPI_NOTIFY_BUS_CHECK:
+       case ACPI_NOTIFY_DEVICE_CHECK:
+       case ACPI_NOTIFY_EJECT_REQUEST:
+               status = acpi_hotplug_schedule(adev, type);
+               if (ACPI_SUCCESS(status))
+                       return;
+       default:
+               break;
        }
+       acpi_bus_put_acpi_device(adev);
+       return;
+
+ err:
+       acpi_evaluate_ost(handle, type, ost_code, NULL);
 }
 
 /* --------------------------------------------------------------------------
index 714e957a871a8034ed18be2104b8c107d0b4ced9..db35594d4df7a072a76e0c2fb879c324efdcedb8 100644 (file)
@@ -302,6 +302,10 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                        input_sync(input);
 
                        pm_wakeup_event(&device->dev, 0);
+                       acpi_bus_generate_netlink_event(
+                                       device->pnp.device_class,
+                                       dev_name(&device->dev),
+                                       event, ++button->pushed);
                }
                break;
        default:
index 368f9ddb8480777420b2d5633facfeb238db1e27..63119d09b35432e3200a33c0202c9e7cb7ec2f15 100644 (file)
@@ -31,8 +31,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT                     ACPI_CONTAINER_COMPONENT
 ACPI_MODULE_NAME("container");
 
@@ -68,6 +66,9 @@ static int container_device_attach(struct acpi_device *adev,
        struct device *dev;
        int ret;
 
+       if (adev->flags.is_dock_station)
+               return 0;
+
        cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
        if (!cdev)
                return -ENOMEM;
index c14a00d3dca61e5d943c41d5fe11fb90669550a6..d047739f3380f2d0e53304002499205abe08abd1 100644 (file)
@@ -901,14 +901,29 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
 int acpi_subsys_prepare(struct device *dev)
 {
        /*
-        * Follow PCI and resume devices suspended at run time before running
-        * their system suspend callbacks.
+        * Devices having power.ignore_children set may still be necessary for
+        * suspending their children in the next phase of device suspend.
         */
-       pm_runtime_resume(dev);
+       if (dev->power.ignore_children)
+               pm_runtime_resume(dev);
+
        return pm_generic_prepare(dev);
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
 
+/**
+ * acpi_subsys_suspend - Run the device driver's suspend callback.
+ * @dev: Device to handle.
+ *
+ * Follow PCI and resume devices suspended at run time before running their
+ * system suspend callbacks.
+ */
+int acpi_subsys_suspend(struct device *dev)
+{
+       pm_runtime_resume(dev);
+       return pm_generic_suspend(dev);
+}
+
 /**
  * acpi_subsys_suspend_late - Suspend device using ACPI.
  * @dev: Device to suspend.
@@ -937,6 +952,23 @@ int acpi_subsys_resume_early(struct device *dev)
        return ret ? ret : pm_generic_resume_early(dev);
 }
 EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+
+/**
+ * acpi_subsys_freeze - Run the device driver's freeze callback.
+ * @dev: Device to handle.
+ */
+int acpi_subsys_freeze(struct device *dev)
+{
+       /*
+        * This used to be done in acpi_subsys_prepare() for all devices and
+        * some drivers may depend on it, so do it here.  Ideally, however,
+        * runtime-suspended devices should not be touched during freeze/thaw
+        * transitions.
+        */
+       pm_runtime_resume(dev);
+       return pm_generic_freeze(dev);
+}
+
 #endif /* CONFIG_PM_SLEEP */
 
 static struct dev_pm_domain acpi_general_pm_domain = {
@@ -947,8 +979,11 @@ static struct dev_pm_domain acpi_general_pm_domain = {
 #endif
 #ifdef CONFIG_PM_SLEEP
                .prepare = acpi_subsys_prepare,
+               .suspend = acpi_subsys_suspend,
                .suspend_late = acpi_subsys_suspend_late,
                .resume_early = acpi_subsys_resume_early,
+               .freeze = acpi_subsys_freeze,
+               .poweroff = acpi_subsys_suspend,
                .poweroff_late = acpi_subsys_suspend_late,
                .restore_early = acpi_subsys_resume_early,
 #endif
index 5bfd769fc91fa5bfd0890a146a4eed13ac342ff0..f0fc6260266bfe852a7228b7f64bf8db40ce4bb9 100644 (file)
@@ -1,7 +1,9 @@
 /*
  *  dock.c - ACPI dock station driver
  *
- *  Copyright (C) 2006 Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ *  Copyright (C) 2006, 2014, Intel Corp.
+ *  Author: Kristen Carlson Accardi <kristen.c.accardi@intel.com>
+ *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -35,8 +37,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_DOCK_DRIVER_DESCRIPTION "ACPI Dock Station Driver"
 
 ACPI_MODULE_NAME("dock");
@@ -68,15 +68,10 @@ struct dock_station {
 };
 static LIST_HEAD(dock_stations);
 static int dock_station_count;
-static DEFINE_MUTEX(hotplug_lock);
 
 struct dock_dependent_device {
        struct list_head list;
-       acpi_handle handle;
-       const struct acpi_dock_ops *hp_ops;
-       void *hp_context;
-       unsigned int hp_refcount;
-       void (*hp_release)(void *);
+       struct acpi_device *adev;
 };
 
 #define DOCK_DOCKING   0x00000001
@@ -98,13 +93,13 @@ enum dock_callback_type {
  *****************************************************************************/
 /**
  * add_dock_dependent_device - associate a device with the dock station
- * @ds: The dock station
- * @handle: handle of the dependent device
+ * @ds: Dock station.
+ * @adev: Dependent ACPI device object.
  *
  * Add the dependent device to the dock's dependent device list.
  */
-static int __init
-add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+static int add_dock_dependent_device(struct dock_station *ds,
+                                    struct acpi_device *adev)
 {
        struct dock_dependent_device *dd;
 
@@ -112,180 +107,120 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
        if (!dd)
                return -ENOMEM;
 
-       dd->handle = handle;
+       dd->adev = adev;
        INIT_LIST_HEAD(&dd->list);
        list_add_tail(&dd->list, &ds->dependent_devices);
 
        return 0;
 }
 
-static void remove_dock_dependent_devices(struct dock_station *ds)
+static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
+                              enum dock_callback_type cb_type)
 {
-       struct dock_dependent_device *dd, *aux;
+       struct acpi_device *adev = dd->adev;
 
-       list_for_each_entry_safe(dd, aux, &ds->dependent_devices, list) {
-               list_del(&dd->list);
-               kfree(dd);
-       }
-}
+       acpi_lock_hp_context();
 
-/**
- * dock_init_hotplug - Initialize a hotplug device on a docking station.
- * @dd: Dock-dependent device.
- * @ops: Dock operations to attach to the dependent device.
- * @context: Data to pass to the @ops callbacks and @release.
- * @init: Optional initialization routine to run after setting up context.
- * @release: Optional release routine to run on removal.
- */
-static int dock_init_hotplug(struct dock_dependent_device *dd,
-                            const struct acpi_dock_ops *ops, void *context,
-                            void (*init)(void *), void (*release)(void *))
-{
-       int ret = 0;
+       if (!adev->hp)
+               goto out;
 
-       mutex_lock(&hotplug_lock);
-       if (WARN_ON(dd->hp_context)) {
-               ret = -EEXIST;
-       } else {
-               dd->hp_refcount = 1;
-               dd->hp_ops = ops;
-               dd->hp_context = context;
-               dd->hp_release = release;
-               if (init)
-                       init(context);
-       }
-       mutex_unlock(&hotplug_lock);
-       return ret;
-}
+       if (cb_type == DOCK_CALL_FIXUP) {
+               void (*fixup)(struct acpi_device *);
 
-/**
- * dock_release_hotplug - Decrement hotplug reference counter of dock device.
- * @dd: Dock-dependent device.
- *
- * Decrement the reference counter of @dd and if 0, detach its hotplug
- * operations from it, reset its context pointer and run the optional release
- * routine if present.
- */
-static void dock_release_hotplug(struct dock_dependent_device *dd)
-{
-       mutex_lock(&hotplug_lock);
-       if (dd->hp_context && !--dd->hp_refcount) {
-               void (*release)(void *) = dd->hp_release;
-               void *context = dd->hp_context;
-
-               dd->hp_ops = NULL;
-               dd->hp_context = NULL;
-               dd->hp_release = NULL;
-               if (release)
-                       release(context);
-       }
-       mutex_unlock(&hotplug_lock);
-}
+               fixup = adev->hp->fixup;
+               if (fixup) {
+                       acpi_unlock_hp_context();
+                       fixup(adev);
+                       return;
+               }
+       } else if (cb_type == DOCK_CALL_UEVENT) {
+               void (*uevent)(struct acpi_device *, u32);
+
+               uevent = adev->hp->uevent;
+               if (uevent) {
+                       acpi_unlock_hp_context();
+                       uevent(adev, event);
+                       return;
+               }
+       } else {
+               int (*notify)(struct acpi_device *, u32);
 
-static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
-                              enum dock_callback_type cb_type)
-{
-       acpi_notify_handler cb = NULL;
-       bool run = false;
-
-       mutex_lock(&hotplug_lock);
-
-       if (dd->hp_context) {
-               run = true;
-               dd->hp_refcount++;
-               if (dd->hp_ops) {
-                       switch (cb_type) {
-                       case DOCK_CALL_FIXUP:
-                               cb = dd->hp_ops->fixup;
-                               break;
-                       case DOCK_CALL_UEVENT:
-                               cb = dd->hp_ops->uevent;
-                               break;
-                       default:
-                               cb = dd->hp_ops->handler;
-                       }
+               notify = adev->hp->notify;
+               if (notify) {
+                       acpi_unlock_hp_context();
+                       notify(adev, event);
+                       return;
                }
        }
 
-       mutex_unlock(&hotplug_lock);
+ out:
+       acpi_unlock_hp_context();
+}
 
-       if (!run)
-               return;
+static struct dock_station *find_dock_station(acpi_handle handle)
+{
+       struct dock_station *ds;
 
-       if (cb)
-               cb(dd->handle, event, dd->hp_context);
+       list_for_each_entry(ds, &dock_stations, sibling)
+               if (ds->handle == handle)
+                       return ds;
 
-       dock_release_hotplug(dd);
+       return NULL;
 }
 
 /**
  * find_dock_dependent_device - get a device dependent on this dock
  * @ds: the dock station
- * @handle: the acpi_handle of the device we want
+ * @adev: ACPI device object to find.
  *
  * iterate over the dependent device list for this dock.  If the
  * dependent device matches the handle, return.
  */
 static struct dock_dependent_device *
-find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
+find_dock_dependent_device(struct dock_station *ds, struct acpi_device *adev)
 {
        struct dock_dependent_device *dd;
 
        list_for_each_entry(dd, &ds->dependent_devices, list)
-               if (handle == dd->handle)
+               if (adev == dd->adev)
                        return dd;
 
        return NULL;
 }
 
-/*****************************************************************************
- *                         Dock functions                                    *
- *****************************************************************************/
-static int __init is_battery(acpi_handle handle)
+void register_dock_dependent_device(struct acpi_device *adev,
+                                   acpi_handle dshandle)
 {
-       struct acpi_device_info *info;
-       int ret = 1;
+       struct dock_station *ds = find_dock_station(dshandle);
 
-       if (!ACPI_SUCCESS(acpi_get_object_info(handle, &info)))
-               return 0;
-       if (!(info->valid & ACPI_VALID_HID))
-               ret = 0;
-       else
-               ret = !strcmp("PNP0C0A", info->hardware_id.string);
-
-       kfree(info);
-       return ret;
+       if (ds && !find_dock_dependent_device(ds, adev))
+               add_dock_dependent_device(ds, adev);
 }
 
-/* Check whether ACPI object is an ejectable battery or disk bay */
-static bool __init is_ejectable_bay(acpi_handle handle)
-{
-       if (acpi_has_method(handle, "_EJ0") && is_battery(handle))
-               return true;
-
-       return acpi_bay_match(handle);
-}
+/*****************************************************************************
+ *                         Dock functions                                    *
+ *****************************************************************************/
 
 /**
  * is_dock_device - see if a device is on a dock station
- * @handle: acpi handle of the device
+ * @adev: ACPI device object to check.
  *
  * If this device is either the dock station itself,
  * or is a device dependent on the dock station, then it
  * is a dock device
  */
-int is_dock_device(acpi_handle handle)
+int is_dock_device(struct acpi_device *adev)
 {
        struct dock_station *dock_station;
 
        if (!dock_station_count)
                return 0;
 
-       if (acpi_dock_match(handle))
+       if (acpi_dock_match(adev->handle))
                return 1;
 
        list_for_each_entry(dock_station, &dock_stations, sibling)
-               if (find_dock_dependent_device(dock_station, handle))
+               if (find_dock_dependent_device(dock_station, adev))
                        return 1;
 
        return 0;
@@ -312,43 +247,6 @@ static int dock_present(struct dock_station *ds)
        return 0;
 }
 
-/**
- * dock_create_acpi_device - add new devices to acpi
- * @handle - handle of the device to add
- *
- *  This function will create a new acpi_device for the given
- *  handle if one does not exist already.  This should cause
- *  acpi to scan for drivers for the given devices, and call
- *  matching driver's add routine.
- */
-static void dock_create_acpi_device(acpi_handle handle)
-{
-       struct acpi_device *device = NULL;
-       int ret;
-
-       acpi_bus_get_device(handle, &device);
-       if (!acpi_device_enumerated(device)) {
-               ret = acpi_bus_scan(handle);
-               if (ret)
-                       pr_debug("error adding bus, %x\n", -ret);
-       }
-}
-
-/**
- * dock_remove_acpi_device - remove the acpi_device struct from acpi
- * @handle - the handle of the device to remove
- *
- *  Tell acpi to remove the acpi_device.  This should cause any loaded
- *  driver to have it's remove routine called.
- */
-static void dock_remove_acpi_device(acpi_handle handle)
-{
-       struct acpi_device *device;
-
-       if (!acpi_bus_get_device(handle, &device))
-               acpi_bus_trim(device);
-}
-
 /**
  * hot_remove_dock_devices - Remove dock station devices.
  * @ds: Dock station.
@@ -366,7 +264,7 @@ static void hot_remove_dock_devices(struct dock_station *ds)
                dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
 
        list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
-               dock_remove_acpi_device(dd->handle);
+               acpi_bus_trim(dd->adev);
 }
 
 /**
@@ -392,12 +290,20 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
                dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
 
        /*
-        * Now make sure that an acpi_device is created for each dependent
-        * device.  That will cause scan handlers to be attached to device
-        * objects or acpi_drivers to be stopped/started if they are present.
+        * Check if all devices have been enumerated already.  If not, run
+        * acpi_bus_scan() for them and that will cause scan handlers to be
+        * attached to device objects or acpi_drivers to be stopped/started if
+        * they are present.
         */
-       list_for_each_entry(dd, &ds->dependent_devices, list)
-               dock_create_acpi_device(dd->handle);
+       list_for_each_entry(dd, &ds->dependent_devices, list) {
+               struct acpi_device *adev = dd->adev;
+
+               if (!acpi_device_enumerated(adev)) {
+                       int ret = acpi_bus_scan(adev->handle);
+                       if (ret)
+                               dev_dbg(&adev->dev, "scan error %d\n", -ret);
+               }
+       }
 }
 
 static void dock_event(struct dock_station *ds, u32 event, int num)
@@ -500,71 +406,6 @@ static int dock_in_progress(struct dock_station *ds)
        return 0;
 }
 
-/**
- * register_hotplug_dock_device - register a hotplug function
- * @handle: the handle of the device
- * @ops: handlers to call after docking
- * @context: device specific data
- * @init: Optional initialization routine to run after registration
- * @release: Optional release routine to run on unregistration
- *
- * If a driver would like to perform a hotplug operation after a dock
- * event, they can register an acpi_notifiy_handler to be called by
- * the dock driver after _DCK is executed.
- */
-int register_hotplug_dock_device(acpi_handle handle,
-                                const struct acpi_dock_ops *ops, void *context,
-                                void (*init)(void *), void (*release)(void *))
-{
-       struct dock_dependent_device *dd;
-       struct dock_station *dock_station;
-       int ret = -EINVAL;
-
-       if (WARN_ON(!context))
-               return -EINVAL;
-
-       if (!dock_station_count)
-               return -ENODEV;
-
-       /*
-        * make sure this handle is for a device dependent on the dock,
-        * this would include the dock station itself
-        */
-       list_for_each_entry(dock_station, &dock_stations, sibling) {
-               /*
-                * An ATA bay can be in a dock and itself can be ejected
-                * separately, so there are two 'dock stations' which need the
-                * ops
-                */
-               dd = find_dock_dependent_device(dock_station, handle);
-               if (dd && !dock_init_hotplug(dd, ops, context, init, release))
-                       ret = 0;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(register_hotplug_dock_device);
-
-/**
- * unregister_hotplug_dock_device - remove yourself from the hotplug list
- * @handle: the acpi handle of the device
- */
-void unregister_hotplug_dock_device(acpi_handle handle)
-{
-       struct dock_dependent_device *dd;
-       struct dock_station *dock_station;
-
-       if (!dock_station_count)
-               return;
-
-       list_for_each_entry(dock_station, &dock_stations, sibling) {
-               dd = find_dock_dependent_device(dock_station, handle);
-               if (dd)
-                       dock_release_hotplug(dd);
-       }
-}
-EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
-
 /**
  * handle_eject_request - handle an undock request checking for error conditions
  *
@@ -598,20 +439,23 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
 }
 
 /**
- * dock_notify - act upon an acpi dock notification
- * @ds: dock station
- * @event: the acpi event
+ * dock_notify - Handle ACPI dock notification.
+ * @adev: Dock station's ACPI device object.
+ * @event: Event code.
  *
  * If we are notified to dock, then check to see if the dock is
  * present and then dock.  Notify all drivers of the dock event,
  * and then hotplug and devices that may need hotplugging.
  */
-static void dock_notify(struct dock_station *ds, u32 event)
+int dock_notify(struct acpi_device *adev, u32 event)
 {
-       acpi_handle handle = ds->handle;
-       struct acpi_device *adev = NULL;
+       acpi_handle handle = adev->handle;
+       struct dock_station *ds = find_dock_station(handle);
        int surprise_removal = 0;
 
+       if (!ds)
+               return -ENODEV;
+
        /*
         * According to acpi spec 3.0a, if a DEVICE_CHECK notification
         * is sent and _DCK is present, it is assumed to mean an undock
@@ -632,7 +476,6 @@ static void dock_notify(struct dock_station *ds, u32 event)
        switch (event) {
        case ACPI_NOTIFY_BUS_CHECK:
        case ACPI_NOTIFY_DEVICE_CHECK:
-               acpi_bus_get_device(handle, &adev);
                if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
                        begin_dock(ds);
                        dock(ds);
@@ -662,49 +505,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
                else
                        dock_event(ds, event, UNDOCK_EVENT);
                break;
-       default:
-               acpi_handle_err(handle, "Unknown dock event %d\n", event);
        }
-}
-
-static void acpi_dock_deferred_cb(void *data, u32 event)
-{
-       acpi_scan_lock_acquire();
-       dock_notify(data, event);
-       acpi_scan_lock_release();
-}
-
-static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
-{
-       if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
-          && event != ACPI_NOTIFY_EJECT_REQUEST)
-               return;
-
-       acpi_hotplug_execute(acpi_dock_deferred_cb, data, event);
-}
-
-/**
- * find_dock_devices - find devices on the dock station
- * @handle: the handle of the device we are examining
- * @lvl: unused
- * @context: the dock station private data
- * @rv: unused
- *
- * This function is called by acpi_walk_namespace.  It will
- * check to see if an object has an _EJD method.  If it does, then it
- * will see if it is dependent on the dock station.
- */
-static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
-                                           void *context, void **rv)
-{
-       struct dock_station *ds = context;
-       acpi_handle ejd = NULL;
-
-       acpi_bus_get_ejd(handle, &ejd);
-       if (ejd == ds->handle)
-               add_dock_dependent_device(ds, handle);
-
-       return AE_OK;
+       return 0;
 }
 
 /*
@@ -803,23 +605,28 @@ static struct attribute_group dock_attribute_group = {
 };
 
 /**
- * dock_add - add a new dock station
- * @handle: the dock station handle
+ * acpi_dock_add - Add a new dock station
+ * @adev: Dock station ACPI device object.
  *
- * allocated and initialize a new dock station device.  Find all devices
- * that are on the dock station, and register for dock event notifications.
+ * allocated and initialize a new dock station device.
  */
-static int __init dock_add(acpi_handle handle)
+void acpi_dock_add(struct acpi_device *adev)
 {
        struct dock_station *dock_station, ds = { NULL, };
+       struct platform_device_info pdevinfo;
+       acpi_handle handle = adev->handle;
        struct platform_device *dd;
-       acpi_status status;
        int ret;
 
-       dd = platform_device_register_data(NULL, "dock", dock_station_count,
-                                          &ds, sizeof(ds));
+       memset(&pdevinfo, 0, sizeof(pdevinfo));
+       pdevinfo.name = "dock";
+       pdevinfo.id = dock_station_count;
+       pdevinfo.acpi_node.companion = adev;
+       pdevinfo.data = &ds;
+       pdevinfo.size_data = sizeof(ds);
+       dd = platform_device_register_full(&pdevinfo);
        if (IS_ERR(dd))
-               return PTR_ERR(dd);
+               return;
 
        dock_station = dd->dev.platform_data;
 
@@ -837,72 +644,29 @@ static int __init dock_add(acpi_handle handle)
                dock_station->flags |= DOCK_IS_DOCK;
        if (acpi_ata_match(handle))
                dock_station->flags |= DOCK_IS_ATA;
-       if (is_battery(handle))
+       if (acpi_device_is_battery(adev))
                dock_station->flags |= DOCK_IS_BAT;
 
        ret = sysfs_create_group(&dd->dev.kobj, &dock_attribute_group);
        if (ret)
                goto err_unregister;
 
-       /* Find dependent devices */
-       acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-                           ACPI_UINT32_MAX, find_dock_devices, NULL,
-                           dock_station, NULL);
-
        /* add the dock station as a device dependent on itself */
-       ret = add_dock_dependent_device(dock_station, handle);
+       ret = add_dock_dependent_device(dock_station, adev);
        if (ret)
                goto err_rmgroup;
 
-       status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                                            dock_notify_handler, dock_station);
-       if (ACPI_FAILURE(status)) {
-               ret = -ENODEV;
-               goto err_rmgroup;
-       }
-
        dock_station_count++;
        list_add(&dock_station->sibling, &dock_stations);
-       return 0;
+       adev->flags.is_dock_station = true;
+       dev_info(&adev->dev, "ACPI dock station (docks/bays count: %d)\n",
+                dock_station_count);
+       return;
 
 err_rmgroup:
-       remove_dock_dependent_devices(dock_station);
        sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
+
 err_unregister:
        platform_device_unregister(dd);
        acpi_handle_err(handle, "%s encountered error %d\n", __func__, ret);
-       return ret;
-}
-
-/**
- * find_dock_and_bay - look for dock stations and bays
- * @handle: acpi handle of a device
- * @lvl: unused
- * @context: unused
- * @rv: unused
- *
- * This is called by acpi_walk_namespace to look for dock stations and bays.
- */
-static acpi_status __init
-find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
-       if (acpi_dock_match(handle) || is_ejectable_bay(handle))
-               dock_add(handle);
-
-       return AE_OK;
-}
-
-void __init acpi_dock_init(void)
-{
-       /* look for dock stations and bays */
-       acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-               ACPI_UINT32_MAX, find_dock_and_bay, NULL, NULL, NULL);
-
-       if (!dock_station_count) {
-               pr_info(PREFIX "No dock devices found.\n");
-               return;
-       }
-
-       pr_info(PREFIX "%s: %d docks/bays found\n",
-               ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
 }
index 09e423f3d8ad30fbd16d2d2b2e2766e69e64de82..8acf53e6296605a013a6359299c3e9cf29bcbf67 100644 (file)
@@ -55,11 +55,16 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
 #ifdef CONFIG_PM_SLEEP
 static int acpi_fan_suspend(struct device *dev);
 static int acpi_fan_resume(struct device *dev);
+static struct dev_pm_ops acpi_fan_pm = {
+       .resume = acpi_fan_resume,
+       .freeze = acpi_fan_suspend,
+       .thaw = acpi_fan_resume,
+       .restore = acpi_fan_resume,
+};
+#define FAN_PM_OPS_PTR (&acpi_fan_pm)
 #else
-#define acpi_fan_suspend NULL
-#define acpi_fan_resume NULL
+#define FAN_PM_OPS_PTR NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
 
 static struct acpi_driver acpi_fan_driver = {
        .name = "fan",
@@ -69,7 +74,7 @@ static struct acpi_driver acpi_fan_driver = {
                .add = acpi_fan_add,
                .remove = acpi_fan_remove,
                },
-       .drv.pm = &acpi_fan_pm,
+       .drv.pm = FAN_PM_OPS_PTR,
 };
 
 /* thermal cooling device callbacks */
index 0c789224d40d4da34aa3c7f1a9314bb12ba7b459..f774c65ecb8bb03065ba406e658710aead5d02c0 100644 (file)
@@ -287,6 +287,7 @@ EXPORT_SYMBOL_GPL(acpi_unbind_one);
 static int acpi_platform_notify(struct device *dev)
 {
        struct acpi_bus_type *type = acpi_get_bus_type(dev);
+       struct acpi_device *adev;
        int ret;
 
        ret = acpi_bind_one(dev, NULL);
@@ -303,9 +304,14 @@ static int acpi_platform_notify(struct device *dev)
                if (ret)
                        goto out;
        }
+       adev = ACPI_COMPANION(dev);
+       if (!adev)
+               goto out;
 
        if (type && type->setup)
                type->setup(dev);
+       else if (adev->handler && adev->handler->bind)
+               adev->handler->bind(dev);
 
  out:
 #if ACPI_GLUE_DEBUG
@@ -324,11 +330,17 @@ static int acpi_platform_notify(struct device *dev)
 
 static int acpi_platform_notify_remove(struct device *dev)
 {
+       struct acpi_device *adev = ACPI_COMPANION(dev);
        struct acpi_bus_type *type;
 
+       if (!adev)
+               return 0;
+
        type = acpi_get_bus_type(dev);
        if (type && type->cleanup)
                type->cleanup(dev);
+       else if (adev->handler && adev->handler->unbind)
+               adev->handler->unbind(dev);
 
        acpi_unbind_one(dev);
        return 0;
index dedbb2d802f1a87bd5a3c288067124514cad9824..957391306cbf32953457b1e8a540a66b32bd253b 100644 (file)
@@ -37,9 +37,15 @@ void acpi_container_init(void);
 static inline void acpi_container_init(void) {}
 #endif
 #ifdef CONFIG_ACPI_DOCK
-void acpi_dock_init(void);
+void register_dock_dependent_device(struct acpi_device *adev,
+                                   acpi_handle dshandle);
+int dock_notify(struct acpi_device *adev, u32 event);
+void acpi_dock_add(struct acpi_device *adev);
 #else
-static inline void acpi_dock_init(void) {}
+static inline void register_dock_dependent_device(struct acpi_device *adev,
+                                                 acpi_handle dshandle) {}
+static inline int dock_notify(struct acpi_device *adev, u32 event) { return -ENODEV; }
+static inline void acpi_dock_add(struct acpi_device *adev) {}
 #endif
 #ifdef CONFIG_ACPI_HOTPLUG_MEMORY
 void acpi_memory_hotplug_init(void);
@@ -72,7 +78,9 @@ void acpi_lpss_init(void);
 static inline void acpi_lpss_init(void) {}
 #endif
 
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src);
 bool acpi_queue_hotplug_work(struct work_struct *work);
+void acpi_device_hotplug(struct acpi_device *adev, u32 src);
 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
 
 /* --------------------------------------------------------------------------
@@ -90,6 +98,7 @@ void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
 int acpi_bind_one(struct device *dev, struct acpi_device *adev);
 int acpi_unbind_one(struct device *dev);
 bool acpi_device_is_present(struct acpi_device *adev);
+bool acpi_device_is_battery(struct acpi_device *adev);
 
 /* --------------------------------------------------------------------------
                                   Power Resource
index fc1aa7909690c3ef148930a4a96080d2bcaaa155..27f84af4e337df87540e3c4e1e84f358eef7cb3c 100644 (file)
@@ -52,7 +52,7 @@
 
 #define _COMPONENT             ACPI_OS_SERVICES
 ACPI_MODULE_NAME("osl");
-#define PREFIX         "ACPI: "
+
 struct acpi_os_dpc {
        acpi_osd_exec_callback function;
        void *context;
@@ -1168,8 +1168,7 @@ void acpi_os_wait_events_complete(void)
 
 struct acpi_hp_work {
        struct work_struct work;
-       acpi_hp_callback func;
-       void *data;
+       struct acpi_device *adev;
        u32 src;
 };
 
@@ -1178,25 +1177,24 @@ static void acpi_hotplug_work_fn(struct work_struct *work)
        struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
 
        acpi_os_wait_events_complete();
-       hpw->func(hpw->data, hpw->src);
+       acpi_device_hotplug(hpw->adev, hpw->src);
        kfree(hpw);
 }
 
-acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src)
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
 {
        struct acpi_hp_work *hpw;
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                 "Scheduling function [%p(%p, %u)] for deferred execution.\n",
-                 func, data, src));
+                 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
+                 adev, src));
 
        hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
        if (!hpw)
                return AE_NO_MEMORY;
 
        INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
-       hpw->func = func;
-       hpw->data = data;
+       hpw->adev = adev;
        hpw->src = src;
        /*
         * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
@@ -1780,6 +1778,17 @@ static int __init acpi_no_auto_ssdt_setup(char *s)
 
 __setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
 
+static int __init acpi_disable_return_repair(char *s)
+{
+       printk(KERN_NOTICE PREFIX
+              "ACPI: Predefined validation mechanism disabled\n");
+       acpi_gbl_disable_auto_repair = TRUE;
+
+       return 1;
+}
+
+__setup("acpica_no_return_repair", acpi_disable_return_repair);
+
 acpi_status __init acpi_os_initialize(void)
 {
        acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
index 361b40c10c3f522e28e2b334dd5f8976109bc772..9c62340c2360b5960bdde09dc5172ca763506473 100644 (file)
@@ -370,6 +370,30 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
        return NULL;
 }
 
+#if IS_ENABLED(CONFIG_ISA) || IS_ENABLED(CONFIG_EISA)
+static int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+       u32 dev_gsi;
+
+       /* Interrupt Line values above 0xF are forbidden */
+       if (dev->irq > 0 && (dev->irq <= 0xF) &&
+           (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
+               dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
+                        pin_name(dev->pin), dev->irq);
+               acpi_register_gsi(&dev->dev, dev_gsi,
+                                 ACPI_LEVEL_SENSITIVE,
+                                 ACPI_ACTIVE_LOW);
+               return 0;
+       }
+       return -EINVAL;
+}
+#else
+static inline int acpi_isa_register_gsi(struct pci_dev *dev)
+{
+       return -ENODEV;
+}
+#endif
+
 int acpi_pci_irq_enable(struct pci_dev *dev)
 {
        struct acpi_prt_entry *entry;
@@ -416,19 +440,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
         * driver reported one, then use it. Exit in any case.
         */
        if (gsi < 0) {
-               u32 dev_gsi;
-               /* Interrupt Line values above 0xF are forbidden */
-               if (dev->irq > 0 && (dev->irq <= 0xF) &&
-                   (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
-                       dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
-                                pin_name(pin), dev->irq);
-                       acpi_register_gsi(&dev->dev, dev_gsi,
-                                         ACPI_LEVEL_SENSITIVE,
-                                         ACPI_ACTIVE_LOW);
-               } else {
+               if (acpi_isa_register_gsi(dev))
                        dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
                                 pin_name(pin));
-               }
 
                kfree(entry);
                return 0;
index 9418c7a1f78665b32c980f815e789f152e973cb5..cfd7581cc19fa24c37d1a3fb7cb370e33a63c760 100644 (file)
@@ -43,8 +43,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT                     ACPI_PCI_COMPONENT
 ACPI_MODULE_NAME("pci_link");
 #define ACPI_PCI_LINK_CLASS            "pci_irq_routing"
index c1c4102e647898717fb8e3c7acc440561d608d8a..d388f13d48b43634acaaddaf526f037051db78c0 100644 (file)
@@ -39,8 +39,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT             ACPI_PCI_COMPONENT
 ACPI_MODULE_NAME("pci_root");
 #define ACPI_PCI_ROOT_CLASS            "pci_bridge"
@@ -51,7 +49,7 @@ static void acpi_pci_root_remove(struct acpi_device *device);
 
 static int acpi_pci_root_scan_dependent(struct acpi_device *adev)
 {
-       acpiphp_check_host_bridge(adev->handle);
+       acpiphp_check_host_bridge(adev);
        return 0;
 }
 
index ad7da686e6e6f8d9745a4edf866e174b99b08538..e0bcfb642b52654f1dfb2f8948c1cac62ff96c68 100644 (file)
@@ -46,8 +46,6 @@
 #include "sleep.h"
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define _COMPONENT                     ACPI_POWER_COMPONENT
 ACPI_MODULE_NAME("power");
 #define ACPI_POWER_CLASS               "power_resource"
index a4eea9a508d3efd977fe97563b967b776a49a084..86d73d5d503f7708f52444e6a10bb4da285e3036 100644 (file)
 
 #include "internal.h"
 
-#define PREFIX                 "ACPI: "
 #define _COMPONENT             ACPI_PROCESSOR_COMPONENT
 ACPI_MODULE_NAME("processor_core");
 
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
-       printk(KERN_NOTICE PREFIX "%s detected - "
-               "disabling mwait for CPU C-states\n", id->ident);
-       boot_option_idle_override = IDLE_NOMWAIT;
-       return 0;
-}
-
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
-       {
-       set_no_mwait, "Extensa 5220", {
-       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
-       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-       DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
-       DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
-       {},
-};
-
 static int map_lapic_id(struct acpi_subtable_header *entry,
                 u32 acpi_id, int *apic_id)
 {
@@ -89,6 +70,28 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
        return 0;
 }
 
+static int map_gic_id(struct acpi_subtable_header *entry,
+               int device_declaration, u32 acpi_id, int *apic_id)
+{
+       struct acpi_madt_generic_interrupt *gic =
+               (struct acpi_madt_generic_interrupt *)entry;
+
+       if (!(gic->flags & ACPI_MADT_ENABLED))
+               return -ENODEV;
+
+       /*
+        * In the GIC interrupt model, logical processors are
+        * required to have a Processor Device object in the DSDT,
+        * so we should check device_declaration here
+        */
+       if (device_declaration && (gic->uid == acpi_id)) {
+               *apic_id = gic->gic_id;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
 static int map_madt_entry(int type, u32 acpi_id)
 {
        unsigned long madt_end, entry;
@@ -124,6 +127,9 @@ static int map_madt_entry(int type, u32 acpi_id)
                } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
                        if (!map_lsapic_id(header, type, acpi_id, &apic_id))
                                break;
+               } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
+                       if (!map_gic_id(header, type, acpi_id, &apic_id))
+                               break;
                }
                entry += header->length;
        }
@@ -154,6 +160,8 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
                map_lapic_id(header, acpi_id, &apic_id);
        } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
                map_lsapic_id(header, type, acpi_id, &apic_id);
+       } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
+               map_gic_id(header, type, acpi_id, &apic_id);
        }
 
 exit:
@@ -323,7 +331,7 @@ static struct acpi_object_list *acpi_processor_alloc_pdc(void)
  * _PDC is required for a BIOS-OS handshake for most of the newer
  * ACPI processor features.
  */
-static int
+static acpi_status
 acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
 {
        acpi_status status = AE_OK;
@@ -379,16 +387,43 @@ early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
        return AE_OK;
 }
 
-void __init acpi_early_processor_set_pdc(void)
+#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+static int __init set_no_mwait(const struct dmi_system_id *id)
+{
+       pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
+                 id->ident);
+       boot_option_idle_override = IDLE_NOMWAIT;
+       return 0;
+}
+
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+       {
+       set_no_mwait, "Extensa 5220", {
+       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+       DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+       DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+       {},
+};
+
+static void __init processor_dmi_check(void)
 {
        /*
         * Check whether the system is DMI table. If yes, OSPM
         * should not use mwait for CPU-states.
         */
        dmi_check_system(processor_idle_dmi_table);
+}
+#else
+static inline void processor_dmi_check(void) {}
+#endif
+
+void __init acpi_early_processor_set_pdc(void)
+{
+       processor_dmi_check();
 
        acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
                            ACPI_UINT32_MAX,
                            early_init_pdc, NULL, NULL, NULL);
-       acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL);
+       acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
 }
index c1c35623550fd723e74a63d5abfaea1ef724e3ab..7f70f3182d506ffc41ce61f370e047d514c34db3 100644 (file)
@@ -41,8 +41,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
 #define ACPI_PROCESSOR_NOTIFY_POWER    0x81
 #define ACPI_PROCESSOR_NOTIFY_THROTTLING       0x82
index ff90054f04fdb88e15e1279b23b43d78884596f9..cfc8aba72f86d02eb0c9ae53576382b6ef76f287 100644 (file)
@@ -156,17 +156,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
  */
 static void acpi_processor_ppc_ost(acpi_handle handle, int status)
 {
-       union acpi_object params[2] = {
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_INTEGER,},
-       };
-       struct acpi_object_list arg_list = {2, params};
-
-       if (acpi_has_method(handle, "_OST")) {
-               params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
-               params[1].integer.value =  status;
-               acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-       }
+       if (acpi_has_method(handle, "_OST"))
+               acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
+                                 status, NULL);
 }
 
 int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
index dbd48498b93863a1fa37ab0b23706f2dae89345d..366ca40a6f703433efc1016a658eaac9e28c5c48 100644 (file)
 #include <linux/power_supply.h>
 
 #include "sbshc.h"
+#include "battery.h"
 
 #define PREFIX "ACPI: "
 
 #define ACPI_SBS_CLASS                 "sbs"
 #define ACPI_AC_CLASS                  "ac_adapter"
-#define ACPI_BATTERY_CLASS             "battery"
 #define ACPI_SBS_DEVICE_NAME           "Smart Battery System"
 #define ACPI_SBS_FILE_INFO             "info"
 #define ACPI_SBS_FILE_STATE            "state"
index 57b053f424d13e23ef2dd4b43a191c0d9dda09da..7efe546a8c42704137f3cbae8a3159485f3473b9 100644 (file)
@@ -41,6 +41,7 @@ static DEFINE_MUTEX(acpi_scan_lock);
 static LIST_HEAD(acpi_scan_handlers_list);
 DEFINE_MUTEX(acpi_device_lock);
 LIST_HEAD(acpi_wakeup_device_list);
+static DEFINE_MUTEX(acpi_hp_context_lock);
 
 struct acpi_device_bus_id{
        char bus_id[15];
@@ -60,6 +61,27 @@ void acpi_scan_lock_release(void)
 }
 EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
 
+void acpi_lock_hp_context(void)
+{
+       mutex_lock(&acpi_hp_context_lock);
+}
+
+void acpi_unlock_hp_context(void)
+{
+       mutex_unlock(&acpi_hp_context_lock);
+}
+
+void acpi_initialize_hp_context(struct acpi_device *adev,
+                               struct acpi_hotplug_context *hp,
+                               int (*notify)(struct acpi_device *, u32),
+                               void (*uevent)(struct acpi_device *, u32))
+{
+       acpi_lock_hp_context();
+       acpi_set_hp_context(adev, hp, notify, uevent, NULL);
+       acpi_unlock_hp_context();
+}
+EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
+
 int acpi_scan_add_handler(struct acpi_scan_handler *handler)
 {
        if (!handler || !handler->attach)
@@ -439,90 +461,75 @@ static int acpi_scan_bus_check(struct acpi_device *adev)
        return 0;
 }
 
-static void acpi_device_hotplug(void *data, u32 src)
+static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
 {
-       u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-       struct acpi_device *adev = data;
-       int error;
-
-       lock_device_hotplug();
-       mutex_lock(&acpi_scan_lock);
-
-       /*
-        * The device object's ACPI handle cannot become invalid as long as we
-        * are holding acpi_scan_lock, but it may have become invalid before
-        * that lock was acquired.
-        */
-       if (adev->handle == INVALID_ACPI_HANDLE)
-               goto out;
-
-       switch (src) {
+       switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
-               error = acpi_scan_bus_check(adev);
-               break;
+               return acpi_scan_bus_check(adev);
        case ACPI_NOTIFY_DEVICE_CHECK:
-               error = acpi_scan_device_check(adev);
-               break;
+               return acpi_scan_device_check(adev);
        case ACPI_NOTIFY_EJECT_REQUEST:
        case ACPI_OST_EC_OSPM_EJECT:
-               error = acpi_scan_hot_remove(adev);
-               break;
-       default:
-               error = -EINVAL;
-               break;
+               if (adev->handler && !adev->handler->hotplug.enabled) {
+                       dev_info(&adev->dev, "Eject disabled\n");
+                       return -EPERM;
+               }
+               acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
+                                 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+               return acpi_scan_hot_remove(adev);
        }
-       if (!error)
-               ost_code = ACPI_OST_SC_SUCCESS;
-
- out:
-       acpi_evaluate_hotplug_ost(adev->handle, src, ost_code, NULL);
-       put_device(&adev->dev);
-       mutex_unlock(&acpi_scan_lock);
-       unlock_device_hotplug();
+       return -EINVAL;
 }
 
-static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
+void acpi_device_hotplug(struct acpi_device *adev, u32 src)
 {
        u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-       struct acpi_device *adev;
-       acpi_status status;
+       int error = -ENODEV;
 
-       if (acpi_bus_get_device(handle, &adev))
-               goto err_out;
+       lock_device_hotplug();
+       mutex_lock(&acpi_scan_lock);
 
-       switch (type) {
-       case ACPI_NOTIFY_BUS_CHECK:
-               acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
-               break;
-       case ACPI_NOTIFY_DEVICE_CHECK:
-               acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
-               break;
-       case ACPI_NOTIFY_EJECT_REQUEST:
-               acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
-               if (!adev->handler)
-                       goto err_out;
+       /*
+        * The device object's ACPI handle cannot become invalid as long as we
+        * are holding acpi_scan_lock, but it might have become invalid before
+        * that lock was acquired.
+        */
+       if (adev->handle == INVALID_ACPI_HANDLE)
+               goto err_out;
 
-               if (!adev->handler->hotplug.enabled) {
-                       acpi_handle_err(handle, "Eject disabled\n");
+       if (adev->flags.is_dock_station) {
+               error = dock_notify(adev, src);
+       } else if (adev->flags.hotplug_notify) {
+               error = acpi_generic_hotplug_event(adev, src);
+               if (error == -EPERM) {
                        ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
                        goto err_out;
                }
-               acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
-                                         ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
-               break;
-       default:
-               /* non-hotplug event; possibly handled by other handler */
-               return;
-       }
-       get_device(&adev->dev);
-       status = acpi_hotplug_execute(acpi_device_hotplug, adev, type);
-       if (ACPI_SUCCESS(status))
-               return;
+       } else {
+               int (*notify)(struct acpi_device *, u32);
 
-       put_device(&adev->dev);
+               acpi_lock_hp_context();
+               notify = adev->hp ? adev->hp->notify : NULL;
+               acpi_unlock_hp_context();
+               /*
+                * There may be additional notify handlers for device objects
+                * without the .event() callback, so ignore them here.
+                */
+               if (notify)
+                       error = notify(adev, src);
+               else
+                       goto out;
+       }
+       if (!error)
+               ost_code = ACPI_OST_SC_SUCCESS;
 
  err_out:
-       acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
+       acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
+
+ out:
+       acpi_bus_put_acpi_device(adev);
+       mutex_unlock(&acpi_scan_lock);
+       unlock_device_hotplug();
 }
 
 static ssize_t real_power_state_show(struct device *dev,
@@ -570,17 +577,14 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
        if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
                return -ENODEV;
 
-       acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
-                                 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
        get_device(&acpi_device->dev);
-       status = acpi_hotplug_execute(acpi_device_hotplug, acpi_device,
-                                     ACPI_OST_EC_OSPM_EJECT);
+       status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
        if (ACPI_SUCCESS(status))
                return count;
 
        put_device(&acpi_device->dev);
-       acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
-                                 ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+       acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
+                         ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
        return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
 }
 
@@ -1114,14 +1118,16 @@ static void acpi_scan_drop_device(acpi_handle handle, void *context)
        mutex_unlock(&acpi_device_del_lock);
 }
 
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
+                               void (*callback)(void *))
 {
        acpi_status status;
 
        if (!device)
                return -EINVAL;
 
-       status = acpi_get_data(handle, acpi_scan_drop_device, (void **)device);
+       status = acpi_get_data_full(handle, acpi_scan_drop_device,
+                                   (void **)device, callback);
        if (ACPI_FAILURE(status) || !*device) {
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
                                  handle));
@@ -1129,8 +1135,32 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
        }
        return 0;
 }
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+{
+       return acpi_get_device_data(handle, device, NULL);
+}
 EXPORT_SYMBOL(acpi_bus_get_device);
 
+static void get_acpi_device(void *dev)
+{
+       if (dev)
+               get_device(&((struct acpi_device *)dev)->dev);
+}
+
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
+{
+       struct acpi_device *adev = NULL;
+
+       acpi_get_device_data(handle, &adev, get_acpi_device);
+       return adev;
+}
+
+void acpi_bus_put_acpi_device(struct acpi_device *adev)
+{
+       put_device(&adev->dev);
+}
+
 int acpi_device_add(struct acpi_device *device,
                    void (*release)(struct device *))
 {
@@ -1641,6 +1671,27 @@ bool acpi_bay_match(acpi_handle handle)
        return acpi_ata_match(phandle);
 }
 
+bool acpi_device_is_battery(struct acpi_device *adev)
+{
+       struct acpi_hardware_id *hwid;
+
+       list_for_each_entry(hwid, &adev->pnp.ids, list)
+               if (!strcmp("PNP0C0A", hwid->id))
+                       return true;
+
+       return false;
+}
+
+static bool is_ejectable_bay(struct acpi_device *adev)
+{
+       acpi_handle handle = adev->handle;
+
+       if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
+               return true;
+
+       return acpi_bay_match(handle);
+}
+
 /*
  * acpi_dock_match - see if an acpi object has a _DCK method
  */
@@ -1706,6 +1757,20 @@ static bool acpi_ibm_smbus_match(acpi_handle handle)
        return false;
 }
 
+static bool acpi_object_is_system_bus(acpi_handle handle)
+{
+       acpi_handle tmp;
+
+       if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
+           tmp == handle)
+               return true;
+       if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
+           tmp == handle)
+               return true;
+
+       return false;
+}
+
 static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                                int device_type)
 {
@@ -1757,8 +1822,10 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                        acpi_add_id(pnp, ACPI_DOCK_HID);
                else if (acpi_ibm_smbus_match(handle))
                        acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
-               else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
-                       acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+               else if (list_empty(&pnp->ids) &&
+                        acpi_object_is_system_bus(handle)) {
+                       /* \_SB, \_TZ, LNXSYBUS */
+                       acpi_add_id(pnp, ACPI_BUS_HID);
                        strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
                        strcpy(pnp->device_class, ACPI_BUS_CLASS);
                }
@@ -1941,33 +2008,23 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
        mutex_unlock(&acpi_scan_lock);
 }
 
-static void acpi_scan_init_hotplug(acpi_handle handle, int type)
+static void acpi_scan_init_hotplug(struct acpi_device *adev)
 {
-       struct acpi_device_pnp pnp = {};
        struct acpi_hardware_id *hwid;
-       struct acpi_scan_handler *handler;
 
-       INIT_LIST_HEAD(&pnp.ids);
-       acpi_set_pnp_ids(handle, &pnp, type);
-
-       if (!pnp.type.hardware_id)
-               goto out;
+       if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
+               acpi_dock_add(adev);
+               return;
+       }
+       list_for_each_entry(hwid, &adev->pnp.ids, list) {
+               struct acpi_scan_handler *handler;
 
-       /*
-        * This relies on the fact that acpi_install_notify_handler() will not
-        * install the same notify handler routine twice for the same handle.
-        */
-       list_for_each_entry(hwid, &pnp.ids, list) {
                handler = acpi_scan_match_handler(hwid->id, NULL);
                if (handler) {
-                       acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                                       acpi_hotplug_notify_cb, handler);
+                       adev->flags.hotplug_notify = true;
                        break;
                }
        }
-
-out:
-       acpi_free_pnp_ids(&pnp);
 }
 
 static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
@@ -1991,12 +2048,12 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
                return AE_OK;
        }
 
-       acpi_scan_init_hotplug(handle, type);
-
        acpi_add_single_object(&device, handle, type, sta);
        if (!device)
                return AE_CTRL_DEPTH;
 
+       acpi_scan_init_hotplug(device);
+
  out:
        if (!*return_value)
                *return_value = device;
@@ -2015,13 +2072,14 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
 
                handler = acpi_scan_match_handler(hwid->id, &devid);
                if (handler) {
+                       device->handler = handler;
                        ret = handler->attach(device, devid);
-                       if (ret > 0) {
-                               device->handler = handler;
+                       if (ret > 0)
                                break;
-                       } else if (ret < 0) {
+
+                       device->handler = NULL;
+                       if (ret < 0)
                                break;
-                       }
                }
        }
        return ret;
@@ -2030,8 +2088,12 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
 static void acpi_bus_attach(struct acpi_device *device)
 {
        struct acpi_device *child;
+       acpi_handle ejd;
        int ret;
 
+       if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
+               register_dock_dependent_device(device, ejd);
+
        acpi_bus_get_status(device);
        /* Skip devices that are not present. */
        if (!acpi_device_is_present(device)) {
@@ -2184,7 +2246,6 @@ int __init acpi_scan_init(void)
        acpi_cmos_rtc_init();
        acpi_container_init();
        acpi_memory_hotplug_init();
-       acpi_dock_init();
 
        mutex_lock(&acpi_scan_lock);
        /*
index 91a32cefb11f6098fbea62d9744ed61ed864c4a9..38cb9782d4b871f54c646fc360f922bac03a68fd 100644 (file)
@@ -12,8 +12,6 @@
 #define _COMPONENT             ACPI_SYSTEM_COMPONENT
 ACPI_MODULE_NAME("sysfs");
 
-#define PREFIX "ACPI: "
-
 #ifdef CONFIG_ACPI_DEBUG
 /*
  * ACPI debug sysfs I/F, including:
index 5837f857ac2e82aff69a5d9bf7e619ace323d946..21782290df41f3383140667edffbdddbb2ec1ae7 100644 (file)
@@ -23,6 +23,8 @@
  *
  */
 
+#define pr_fmt(fmt) "ACPI: " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/smp.h>
@@ -33,8 +35,6 @@
 #include <linux/acpi.h>
 #include <linux/bootmem.h>
 
-#define PREFIX                 "ACPI: "
-
 #define ACPI_MAX_TABLES                128
 
 static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
@@ -55,10 +55,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic *p =
                            (struct acpi_madt_local_apic *)header;
-                       printk(KERN_INFO PREFIX
-                              "LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
-                              p->processor_id, p->id,
-                              (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+                       pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+                               p->processor_id, p->id,
+                               (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
                }
                break;
 
@@ -66,11 +65,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_x2apic *p =
                            (struct acpi_madt_local_x2apic *)header;
-                       printk(KERN_INFO PREFIX
-                              "X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
-                              p->local_apic_id, p->uid,
-                              (p->lapic_flags & ACPI_MADT_ENABLED) ?
-                              "enabled" : "disabled");
+                       pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
+                               p->local_apic_id, p->uid,
+                               (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
                }
                break;
 
@@ -78,9 +75,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_io_apic *p =
                            (struct acpi_madt_io_apic *)header;
-                       printk(KERN_INFO PREFIX
-                              "IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
-                              p->id, p->address, p->global_irq_base);
+                       pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+                               p->id, p->address, p->global_irq_base);
                }
                break;
 
@@ -88,18 +84,15 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_interrupt_override *p =
                            (struct acpi_madt_interrupt_override *)header;
-                       printk(KERN_INFO PREFIX
-                              "INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
-                              p->bus, p->source_irq, p->global_irq,
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
+                       pr_info("INT_SRC_OVR (bus %d bus_irq %d global_irq %d %s %s)\n",
+                               p->bus, p->source_irq, p->global_irq,
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2]);
                        if (p->inti_flags  &
                            ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK))
-                               printk(KERN_INFO PREFIX
-                                      "INT_SRC_OVR unexpected reserved flags: 0x%x\n",
-                                      p->inti_flags  &
+                               pr_info("INT_SRC_OVR unexpected reserved flags: 0x%x\n",
+                                       p->inti_flags  &
                                        ~(ACPI_MADT_POLARITY_MASK | ACPI_MADT_TRIGGER_MASK));
-
                }
                break;
 
@@ -107,11 +100,10 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_nmi_source *p =
                            (struct acpi_madt_nmi_source *)header;
-                       printk(KERN_INFO PREFIX
-                              "NMI_SRC (%s %s global_irq %d)\n",
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
-                              p->global_irq);
+                       pr_info("NMI_SRC (%s %s global_irq %d)\n",
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+                               p->global_irq);
                }
                break;
 
@@ -119,12 +111,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic_nmi *p =
                            (struct acpi_madt_local_apic_nmi *)header;
-                       printk(KERN_INFO PREFIX
-                              "LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
-                              p->processor_id,
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK  ],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
-                              p->lint);
+                       pr_info("LAPIC_NMI (acpi_id[0x%02x] %s %s lint[0x%x])\n",
+                               p->processor_id,
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK ],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+                               p->lint);
                }
                break;
 
@@ -137,12 +128,11 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                        polarity = p->inti_flags & ACPI_MADT_POLARITY_MASK;
                        trigger = (p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2;
 
-                       printk(KERN_INFO PREFIX
-                              "X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
-                              p->uid,
-                              mps_inti_flags_polarity[polarity],
-                              mps_inti_flags_trigger[trigger],
-                              p->lint);
+                       pr_info("X2APIC_NMI (uid[0x%02x] %s %s lint[0x%x])\n",
+                               p->uid,
+                               mps_inti_flags_polarity[polarity],
+                               mps_inti_flags_trigger[trigger],
+                               p->lint);
                }
                break;
 
@@ -150,9 +140,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_apic_override *p =
                            (struct acpi_madt_local_apic_override *)header;
-                       printk(KERN_INFO PREFIX
-                              "LAPIC_ADDR_OVR (address[%p])\n",
-                              (void *)(unsigned long)p->address);
+                       pr_info("LAPIC_ADDR_OVR (address[%p])\n",
+                               (void *)(unsigned long)p->address);
                }
                break;
 
@@ -160,10 +149,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_io_sapic *p =
                            (struct acpi_madt_io_sapic *)header;
-                       printk(KERN_INFO PREFIX
-                              "IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
-                              p->id, (void *)(unsigned long)p->address,
-                              p->global_irq_base);
+                       pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+                               p->id, (void *)(unsigned long)p->address,
+                               p->global_irq_base);
                }
                break;
 
@@ -171,10 +159,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_local_sapic *p =
                            (struct acpi_madt_local_sapic *)header;
-                       printk(KERN_INFO PREFIX
-                              "LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
-                              p->processor_id, p->id, p->eid,
-                              (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+                       pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+                               p->processor_id, p->id, p->eid,
+                               (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
                }
                break;
 
@@ -182,19 +169,17 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
                {
                        struct acpi_madt_interrupt_source *p =
                            (struct acpi_madt_interrupt_source *)header;
-                       printk(KERN_INFO PREFIX
-                              "PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
-                              mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
-                              mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
-                              p->type, p->id, p->eid, p->io_sapic_vector,
-                              p->global_irq);
+                       pr_info("PLAT_INT_SRC (%s %s type[0x%x] id[0x%04x] eid[0x%x] iosapic_vector[0x%x] global_irq[0x%x]\n",
+                               mps_inti_flags_polarity[p->inti_flags & ACPI_MADT_POLARITY_MASK],
+                               mps_inti_flags_trigger[(p->inti_flags & ACPI_MADT_TRIGGER_MASK) >> 2],
+                               p->type, p->id, p->eid, p->io_sapic_vector,
+                               p->global_irq);
                }
                break;
 
        default:
-               printk(KERN_WARNING PREFIX
-                      "Found unsupported MADT entry (type = 0x%x)\n",
-                      header->type);
+               pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
+                       header->type);
                break;
        }
 }
@@ -225,7 +210,7 @@ acpi_table_parse_entries(char *id,
                acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
 
        if (!table_header) {
-               printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
+               pr_warn("%4.4s not present\n", id);
                return -ENODEV;
        }
 
@@ -248,7 +233,7 @@ acpi_table_parse_entries(char *id,
                 * infinite loop.
                 */
                if (entry->length == 0) {
-                       pr_err(PREFIX "[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
+                       pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, entry_id);
                        goto err;
                }
 
@@ -256,8 +241,8 @@ acpi_table_parse_entries(char *id,
                    ((unsigned long)entry + entry->length);
        }
        if (max_entries && count > max_entries) {
-               printk(KERN_WARNING PREFIX "[%4.4s:0x%02x] ignored %i entries of "
-                      "%i found\n", id, entry_id, count - max_entries, count);
+               pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+                       id, entry_id, count - max_entries, count);
        }
 
        early_acpi_os_unmap_memory((char *)table_header, tbl_size);
@@ -322,13 +307,11 @@ static void __init check_multiple_madt(void)
 
        acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
        if (table) {
-               printk(KERN_WARNING PREFIX
-                      "BIOS bug: multiple APIC/MADT found,"
-                      " using %d\n", acpi_apic_instance);
-               printk(KERN_WARNING PREFIX
-                      "If \"acpi_apic_instance=%d\" works better, "
-                      "notify linux-acpi@vger.kernel.org\n",
-                      acpi_apic_instance ? 0 : 2);
+               pr_warn("BIOS bug: multiple APIC/MADT found, using %d\n",
+                       acpi_apic_instance);
+               pr_warn("If \"acpi_apic_instance=%d\" works better, "
+                       "notify linux-acpi@vger.kernel.org\n",
+                       acpi_apic_instance ? 0 : 2);
                early_acpi_os_unmap_memory(table, tbl_size);
 
        } else
@@ -365,8 +348,7 @@ static int __init acpi_parse_apic_instance(char *str)
 
        acpi_apic_instance = simple_strtoul(str, NULL, 0);
 
-       printk(KERN_NOTICE PREFIX "Shall use APIC/MADT table %d\n",
-              acpi_apic_instance);
+       pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
 
        return 0;
 }
index 08626c851be7eef72c5a639522b708586736adc5..9640685533345340bbff00bc67abeecac4e9c7d1 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/device.h>
 #include <linux/thermal.h>
 #include <linux/acpi.h>
+#include <linux/workqueue.h>
 #include <asm/uaccess.h>
 
 #define PREFIX "ACPI: "
@@ -90,6 +91,8 @@ static int psv;
 module_param(psv, int, 0644);
 MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
 
+static struct workqueue_struct *acpi_thermal_pm_queue;
+
 static int acpi_thermal_add(struct acpi_device *device);
 static int acpi_thermal_remove(struct acpi_device *device);
 static void acpi_thermal_notify(struct acpi_device *device, u32 event);
@@ -101,11 +104,13 @@ static const struct acpi_device_id  thermal_device_ids[] = {
 MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
 
 #ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev);
 static int acpi_thermal_resume(struct device *dev);
 #else
+#define acpi_thermal_suspend NULL
 #define acpi_thermal_resume NULL
 #endif
-static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
+static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, acpi_thermal_suspend, acpi_thermal_resume);
 
 static struct acpi_driver acpi_thermal_driver = {
        .name = "thermal",
@@ -186,6 +191,7 @@ struct acpi_thermal {
        struct thermal_zone_device *thermal_zone;
        int tz_enabled;
        int kelvin_offset;
+       struct work_struct thermal_check_work;
 };
 
 /* --------------------------------------------------------------------------
@@ -1064,6 +1070,13 @@ static void acpi_thermal_guess_offset(struct acpi_thermal *tz)
                tz->kelvin_offset = 2732;
 }
 
+static void acpi_thermal_check_fn(struct work_struct *work)
+{
+       struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
+                                              thermal_check_work);
+       acpi_thermal_check(tz);
+}
+
 static int acpi_thermal_add(struct acpi_device *device)
 {
        int result = 0;
@@ -1093,6 +1106,8 @@ static int acpi_thermal_add(struct acpi_device *device)
        if (result)
                goto free_memory;
 
+       INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
+
        pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
                acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
        goto end;
@@ -1110,6 +1125,7 @@ static int acpi_thermal_remove(struct acpi_device *device)
        if (!device || !acpi_driver_data(device))
                return -EINVAL;
 
+       flush_workqueue(acpi_thermal_pm_queue);
        tz = acpi_driver_data(device);
 
        acpi_thermal_unregister_thermal_zone(tz);
@@ -1118,6 +1134,13 @@ static int acpi_thermal_remove(struct acpi_device *device)
 }
 
 #ifdef CONFIG_PM_SLEEP
+static int acpi_thermal_suspend(struct device *dev)
+{
+       /* Make sure the previously queued thermal check work has been done */
+       flush_workqueue(acpi_thermal_pm_queue);
+       return 0;
+}
+
 static int acpi_thermal_resume(struct device *dev)
 {
        struct acpi_thermal *tz;
@@ -1148,7 +1171,7 @@ static int acpi_thermal_resume(struct device *dev)
                tz->state.active |= tz->trips.active[i].flags.enabled;
        }
 
-       acpi_thermal_check(tz);
+       queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
 
        return AE_OK;
 }
@@ -1240,16 +1263,22 @@ static int __init acpi_thermal_init(void)
                return -ENODEV;
        }
 
+       acpi_thermal_pm_queue = create_workqueue("acpi_thermal_pm");
+       if (!acpi_thermal_pm_queue)
+               return -ENODEV;
+
        result = acpi_bus_register_driver(&acpi_thermal_driver);
-       if (result < 0)
+       if (result < 0) {
+               destroy_workqueue(acpi_thermal_pm_queue);
                return -ENODEV;
+       }
 
        return 0;
 }
 
 static void __exit acpi_thermal_exit(void)
 {
-
+       destroy_workqueue(acpi_thermal_pm_queue);
        acpi_bus_unregister_driver(&acpi_thermal_driver);
 
        return;
index 85e3b612bdc0d49f7df07d056b0d9c14f8aca89e..0f5f78fa6545cf4d30da8f56b426033ac7f7132e 100644 (file)
@@ -422,7 +422,7 @@ out:
 EXPORT_SYMBOL(acpi_get_physical_device_location);
 
 /**
- * acpi_evaluate_hotplug_ost: Evaluate _OST for hotplug operations
+ * acpi_evaluate_ost: Evaluate _OST for hotplug operations
  * @handle: ACPI device handle
  * @source_event: source event code
  * @status_code: status code
@@ -433,17 +433,15 @@ EXPORT_SYMBOL(acpi_get_physical_device_location);
  * When the platform does not support _OST, this function has no effect.
  */
 acpi_status
-acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
-               u32 status_code, struct acpi_buffer *status_buf)
+acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
+                 struct acpi_buffer *status_buf)
 {
-#ifdef ACPI_HOTPLUG_OST
        union acpi_object params[3] = {
                {.type = ACPI_TYPE_INTEGER,},
                {.type = ACPI_TYPE_INTEGER,},
                {.type = ACPI_TYPE_BUFFER,}
        };
        struct acpi_object_list arg_list = {3, params};
-       acpi_status status;
 
        params[0].integer.value = source_event;
        params[1].integer.value = status_code;
@@ -455,13 +453,9 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
                params[2].buffer.length = 0;
        }
 
-       status = acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-       return status;
-#else
-       return AE_OK;
-#endif
+       return acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
 }
-EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
+EXPORT_SYMBOL(acpi_evaluate_ost);
 
 /**
  * acpi_handle_printk: Print message with ACPI prefix and object path
index b6ba88ed31aeeb1854e48bb73b95fc59a4d58706..48c7e8af9c96cba545731cb0ab464cb4097dc44b 100644 (file)
@@ -45,8 +45,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 #define ACPI_VIDEO_BUS_NAME            "Video Bus"
 #define ACPI_VIDEO_DEVICE_NAME         "Video Device"
 #define ACPI_VIDEO_NOTIFY_SWITCH       0x80
index 19080c8e2f2a9c6d5df7f5475d581f5348ed9e8e..33e3db548a2918a6ee0fc71d7daf9c678c9a141f 100644 (file)
@@ -40,8 +40,6 @@
 
 #include "internal.h"
 
-#define PREFIX "ACPI: "
-
 ACPI_MODULE_NAME("video");
 #define _COMPONENT             ACPI_VIDEO_COMPONENT
 
index 868429a47be41a2c50ff146389df25dfcb8f5b30..20e03a7eb8b431f692e534f6a3d895a2c2cd9476 100644 (file)
@@ -11,13 +11,13 @@ config HAVE_PATA_PLATFORM
          to update the PATA_PLATFORM entry.
 
 menuconfig ATA
-       tristate "Serial ATA and Parallel ATA drivers"
+       tristate "Serial ATA and Parallel ATA drivers (libata)"
        depends on HAS_IOMEM
        depends on BLOCK
        depends on !(M32R || M68K || S390) || BROKEN
        select SCSI
        ---help---
-         If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
+         If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
          any other ATA device under Linux, say Y and make sure that you know
          the name of your ATA host adapter (the card inside your computer
          that "speaks" the ATA protocol, also called ATA controller),
@@ -60,7 +60,7 @@ config ATA_ACPI
 
 config SATA_ZPODD
        bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
-       depends on ATA_ACPI
+       depends on ATA_ACPI && PM_RUNTIME
        default n
        help
          This option adds support for SATA Zero Power Optical Disc
@@ -97,15 +97,48 @@ config SATA_AHCI_PLATFORM
 
          If unsure, say N.
 
+config AHCI_DA850
+       tristate "DaVinci DA850 AHCI SATA support"
+       depends on ARCH_DAVINCI_DA850
+       help
+         This option enables support for the DaVinci DA850 SoC's
+         onboard AHCI SATA.
+
+         If unsure, say N.
+
+config AHCI_ST
+       tristate "ST AHCI SATA support"
+       depends on ARCH_STI
+       help
+         This option enables support for ST AHCI SATA controller.
+
+         If unsure, say N.
+
 config AHCI_IMX
        tristate "Freescale i.MX AHCI SATA support"
-       depends on SATA_AHCI_PLATFORM && MFD_SYSCON
+       depends on MFD_SYSCON
        help
          This option enables support for the Freescale i.MX SoC's
          onboard AHCI SATA.
 
          If unsure, say N.
 
+config AHCI_SUNXI
+       tristate "Allwinner sunxi AHCI SATA support"
+       depends on ARCH_SUNXI
+       help
+         This option enables support for the Allwinner sunxi SoC's
+         onboard AHCI SATA.
+
+         If unsure, say N.
+
+config AHCI_XGENE
+       tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
+       depends on ARM64 || COMPILE_TEST
+       select PHY_XGENE
+       help
+        This option enables support for APM X-Gene SoC SATA host controller.
+
 config SATA_FSL
        tristate "Freescale 3.0Gbps SATA support"
        depends on FSL_SOC
@@ -239,6 +272,7 @@ config SATA_DWC_VDEBUG
 
 config SATA_HIGHBANK
        tristate "Calxeda Highbank SATA support"
+       depends on ARCH_HIGHBANK || COMPILE_TEST
        help
          This option enables support for the Calxeda Highbank SoC's
          onboard SATA.
@@ -247,6 +281,8 @@ config SATA_HIGHBANK
 
 config SATA_MV
        tristate "Marvell SATA support"
+       depends on PCI || ARCH_DOVE || ARCH_KIRKWOOD || ARCH_MV78XX0 || \
+                  ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
        select GENERIC_PHY
        help
          This option enables support for the Marvell Serial ATA family.
@@ -273,6 +309,7 @@ config SATA_PROMISE
 
 config SATA_RCAR
        tristate "Renesas R-Car SATA support"
+       depends on ARCH_SHMOBILE || COMPILE_TEST
        help
          This option enables support for Renesas R-Car Serial ATA.
 
@@ -352,6 +389,7 @@ config PATA_AMD
 
 config PATA_ARASAN_CF
        tristate "ARASAN CompactFlash PATA Controller Support"
+       depends on ARCH_SPEAR13XX || COMPILE_TEST
        depends on DMADEVICES
        select DMA_ENGINE
        help
@@ -403,7 +441,7 @@ config PATA_CMD64X
 
 config PATA_CS5520
        tristate "CS5510/5520 PATA support"
-       depends on PCI
+       depends on PCI && (X86_32 || COMPILE_TEST)
        help
          This option enables support for the Cyrix 5510/5520
          companion chip used with the MediaGX/Geode processor family.
@@ -412,7 +450,7 @@ config PATA_CS5520
 
 config PATA_CS5530
        tristate "CS5530 PATA support"
-       depends on PCI
+       depends on PCI && (X86_32 || COMPILE_TEST)
        help
          This option enables support for the Cyrix/NatSemi/AMD CS5530
          companion chip used with the MediaGX/Geode processor family.
@@ -421,7 +459,7 @@ config PATA_CS5530
 
 config PATA_CS5535
        tristate "CS5535 PATA support (Experimental)"
-       depends on PCI && X86 && !X86_64
+       depends on PCI && X86_32
        help
          This option enables support for the NatSemi/AMD CS5535
          companion chip used with the Geode processor family.
@@ -430,7 +468,7 @@ config PATA_CS5535
 
 config PATA_CS5536
        tristate "CS5536 PATA support"
-       depends on PCI
+       depends on PCI && (X86_32 || MIPS || COMPILE_TEST)
        help
          This option enables support for the AMD CS5536
          companion chip used with the Geode LX processor family.
@@ -666,7 +704,7 @@ config PATA_RDC
 
 config PATA_SC1200
        tristate "SC1200 PATA support"
-       depends on PCI
+       depends on PCI && (X86_32 || COMPILE_TEST)
        help
          This option enables support for the NatSemi/AMD SC1200 SoC
          companion chip used with the Geode processor family.
index 46518c622460942cdf8e2971dd95f3b069961ada..44c8016e565c9b8771924fbc7f7b5a00d9c078c7 100644 (file)
@@ -4,13 +4,17 @@ obj-$(CONFIG_ATA)             += libata.o
 # non-SFF interface
 obj-$(CONFIG_SATA_AHCI)                += ahci.o libahci.o
 obj-$(CONFIG_SATA_ACARD_AHCI)  += acard-ahci.o libahci.o
-obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_FSL)         += sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)    += sata_inic162x.o
 obj-$(CONFIG_SATA_SIL24)       += sata_sil24.o
 obj-$(CONFIG_SATA_DWC)         += sata_dwc_460ex.o
 obj-$(CONFIG_SATA_HIGHBANK)    += sata_highbank.o libahci.o
-obj-$(CONFIG_AHCI_IMX)         += ahci_imx.o
+obj-$(CONFIG_AHCI_DA850)       += ahci_da850.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_IMX)         += ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_SUNXI)       += ahci_sunxi.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_ST)          += ahci_st.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_XGENE)       += ahci_xgene.o libahci.o libahci_platform.o
 
 # SFF w/ custom DMA
 obj-$(CONFIG_PDC_ADMA)         += pdc_adma.o
index fd665d919df2e59a4b44c334bd9c96d14fd946b5..b51605ac597418f7aa352789dc9aeeb1b8ca015b 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index c81d809c111b238e72f65d185add59f5bef4fade..a52a5b662f35ecceb992fd41943ef5f636834838 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -578,6 +577,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
                                 unsigned long deadline)
 {
        struct ata_port *ap = link->ap;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        bool online;
        int rc;
 
@@ -588,7 +588,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
        rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
                                 deadline, &online, NULL);
 
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
 
        DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
 
@@ -603,6 +603,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
 {
        struct ata_port *ap = link->ap;
        struct ahci_port_priv *pp = ap->private_data;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
        struct ata_taskfile tf;
        bool online;
@@ -618,7 +619,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
        rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
                                 deadline, &online, NULL);
 
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
 
        /* The pseudo configuration device on SIMG4726 attached to
         * ASUS P5W-DH Deluxe doesn't send signature FIS after
index 2289efdf82030e388ce977957eeb5b26f26f19d2..51af275b3388541baad3f7bf021a098de9da9bf0 100644 (file)
@@ -37,6 +37,8 @@
 
 #include <linux/clk.h>
 #include <linux/libata.h>
+#include <linux/phy/phy.h>
+#include <linux/regulator/consumer.h>
 
 /* Enclosure Management Control */
 #define EM_CTRL_MSG_TYPE              0x000f0000
@@ -51,6 +53,7 @@
 
 enum {
        AHCI_MAX_PORTS          = 32,
+       AHCI_MAX_CLKS           = 3,
        AHCI_MAX_SG             = 168, /* hardware max is 64K */
        AHCI_DMA_BOUNDARY       = 0xffffffff,
        AHCI_MAX_CMDS           = 32,
@@ -321,8 +324,17 @@ struct ahci_host_priv {
        u32                     em_loc; /* enclosure management location */
        u32                     em_buf_sz;      /* EM buffer size in byte */
        u32                     em_msg_type;    /* EM message type */
-       struct clk              *clk;           /* Only for platforms supporting clk */
+       bool                    got_runtime_pm; /* Did we do pm_runtime_get? */
+       struct clk              *clks[AHCI_MAX_CLKS]; /* Optional */
+       struct regulator        *target_pwr;    /* Optional */
+       struct phy              *phy;           /* If platform uses phy */
        void                    *plat_data;     /* Other platform data */
+       /*
+        * Optional ahci_start_engine override, if not set this gets set to the
+        * default ahci_start_engine during ahci_save_initial_config, this can
+        * be overridden anytime before the host is activated.
+        */
+       void                    (*start_engine)(struct ata_port *ap);
 };
 
 extern int ahci_ignore_sss;
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
new file mode 100644 (file)
index 0000000..2c83613
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * DaVinci DA850 AHCI SATA platform driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include "ahci.h"
+
+/* SATA PHY Control Register offset from AHCI base */
+#define SATA_P0PHYCR_REG       0x178
+
+#define SATA_PHY_MPY(x)                ((x) << 0)
+#define SATA_PHY_LOS(x)                ((x) << 6)
+#define SATA_PHY_RXCDR(x)      ((x) << 10)
+#define SATA_PHY_RXEQ(x)       ((x) << 13)
+#define SATA_PHY_TXSWING(x)    ((x) << 19)
+#define SATA_PHY_ENPLL(x)      ((x) << 31)
+
+/*
+ * The multiplier needed for 1.5GHz PLL output.
+ *
+ * NOTE: This is currently hardcoded to be suitable for 100MHz crystal
+ * frequency (which is used by DA850 EVM board) and may need to be changed
+ * if you would like to use this driver on some other board.
+ */
+#define DA850_SATA_CLK_MULTIPLIER      7
+
+static void da850_sata_init(struct device *dev, void __iomem *pwrdn_reg,
+                           void __iomem *ahci_base)
+{
+       unsigned int val;
+
+       /* Enable SATA clock receiver */
+       val = readl(pwrdn_reg);
+       val &= ~BIT(0);
+       writel(val, pwrdn_reg);
+
+       val = SATA_PHY_MPY(DA850_SATA_CLK_MULTIPLIER + 1) | SATA_PHY_LOS(1) |
+             SATA_PHY_RXCDR(4) | SATA_PHY_RXEQ(1) | SATA_PHY_TXSWING(3) |
+             SATA_PHY_ENPLL(1);
+
+       writel(val, ahci_base + SATA_P0PHYCR_REG);
+}
+
+static const struct ata_port_info ahci_da850_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_platform_ops,
+};
+
+static int ahci_da850_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ahci_host_priv *hpriv;
+       struct resource *res;
+       void __iomem *pwrdn_reg;
+       int rc;
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               goto disable_resources;
+
+       pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
+       if (!pwrdn_reg)
+               goto disable_resources;
+
+       da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
+
+       rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info, 0, 0);
+       if (rc)
+               goto disable_resources;
+
+       return 0;
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+       return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_da850_pm_ops, ahci_platform_suspend,
+                        ahci_platform_resume);
+
+static struct platform_driver ahci_da850_driver = {
+       .probe = ahci_da850_probe,
+       .remove = ata_platform_remove_one,
+       .driver = {
+               .name = "ahci_da850",
+               .owner = THIS_MODULE,
+               .pm = &ahci_da850_pm_ops,
+       },
+};
+module_platform_driver(ahci_da850_driver);
+
+MODULE_DESCRIPTION("DaVinci DA850 AHCI SATA platform driver");
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
+MODULE_LICENSE("GPL");
index dd4d6f74d7bd5067a62840019a53c016b74efd6e..497c7abe1c7df5ef79ccd246828b68a1251c5201 100644 (file)
@@ -42,13 +42,7 @@ enum ahci_imx_type {
 struct imx_ahci_priv {
        struct platform_device *ahci_pdev;
        enum ahci_imx_type type;
-
-       /* i.MX53 clock */
-       struct clk *sata_gate_clk;
-       /* Common clock */
-       struct clk *sata_ref_clk;
        struct clk *ahb_clk;
-
        struct regmap *gpr;
        bool no_device;
        bool first_time;
@@ -58,28 +52,52 @@ static int ahci_imx_hotplug;
 module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
 MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
 
-static int imx_sata_clock_enable(struct device *dev)
+static void ahci_imx_host_stop(struct ata_host *host);
+
+static int imx_sata_enable(struct ahci_host_priv *hpriv)
 {
-       struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+       struct imx_ahci_priv *imxpriv = hpriv->plat_data;
        int ret;
 
-       if (imxpriv->type == AHCI_IMX53) {
-               ret = clk_prepare_enable(imxpriv->sata_gate_clk);
-               if (ret < 0) {
-                       dev_err(dev, "prepare-enable sata_gate clock err:%d\n",
-                               ret);
+       if (imxpriv->no_device)
+               return 0;
+
+       if (hpriv->target_pwr) {
+               ret = regulator_enable(hpriv->target_pwr);
+               if (ret)
                        return ret;
-               }
        }
 
-       ret = clk_prepare_enable(imxpriv->sata_ref_clk);
-       if (ret < 0) {
-               dev_err(dev, "prepare-enable sata_ref clock err:%d\n",
-                       ret);
-               goto clk_err;
-       }
+       ret = ahci_platform_enable_clks(hpriv);
+       if (ret < 0)
+               goto disable_regulator;
 
        if (imxpriv->type == AHCI_IMX6Q) {
+               /*
+                * set PHY Paremeters, two steps to configure the GPR13,
+                * one write for rest of parameters, mask of first write
+                * is 0x07ffffff, and the other one write for setting
+                * the mpll_clk_en.
+                */
+               regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+                                  IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
+                                  IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
+                                  IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
+                                  IMX6Q_GPR13_SATA_SPD_MODE_MASK |
+                                  IMX6Q_GPR13_SATA_MPLL_SS_EN |
+                                  IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
+                                  IMX6Q_GPR13_SATA_TX_BOOST_MASK |
+                                  IMX6Q_GPR13_SATA_TX_LVL_MASK |
+                                  IMX6Q_GPR13_SATA_MPLL_CLK_EN |
+                                  IMX6Q_GPR13_SATA_TX_EDGE_RATE,
+                                  IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
+                                  IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+                                  IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+                                  IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+                                  IMX6Q_GPR13_SATA_MPLL_SS_EN |
+                                  IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
+                                  IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
+                                  IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
                regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN);
@@ -89,15 +107,19 @@ static int imx_sata_clock_enable(struct device *dev)
 
        return 0;
 
-clk_err:
-       if (imxpriv->type == AHCI_IMX53)
-               clk_disable_unprepare(imxpriv->sata_gate_clk);
+disable_regulator:
+       if (hpriv->target_pwr)
+               regulator_disable(hpriv->target_pwr);
+
        return ret;
 }
 
-static void imx_sata_clock_disable(struct device *dev)
+static void imx_sata_disable(struct ahci_host_priv *hpriv)
 {
-       struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+       struct imx_ahci_priv *imxpriv = hpriv->plat_data;
+
+       if (imxpriv->no_device)
+               return;
 
        if (imxpriv->type == AHCI_IMX6Q) {
                regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
@@ -105,10 +127,10 @@ static void imx_sata_clock_disable(struct device *dev)
                                   !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
        }
 
-       clk_disable_unprepare(imxpriv->sata_ref_clk);
+       ahci_platform_disable_clks(hpriv);
 
-       if (imxpriv->type == AHCI_IMX53)
-               clk_disable_unprepare(imxpriv->sata_gate_clk);
+       if (hpriv->target_pwr)
+               regulator_disable(hpriv->target_pwr);
 }
 
 static void ahci_imx_error_handler(struct ata_port *ap)
@@ -118,7 +140,7 @@ static void ahci_imx_error_handler(struct ata_port *ap)
        struct ata_host *host = dev_get_drvdata(ap->dev);
        struct ahci_host_priv *hpriv = host->private_data;
        void __iomem *mmio = hpriv->mmio;
-       struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+       struct imx_ahci_priv *imxpriv = hpriv->plat_data;
 
        ahci_error_handler(ap);
 
@@ -136,7 +158,7 @@ static void ahci_imx_error_handler(struct ata_port *ap)
         */
        reg_val = readl(mmio + PORT_PHY_CTL);
        writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
-       imx_sata_clock_disable(ap->dev);
+       imx_sata_disable(hpriv);
        imxpriv->no_device = true;
 }
 
@@ -144,7 +166,9 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
                       unsigned long deadline)
 {
        struct ata_port *ap = link->ap;
-       struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+       struct ata_host *host = dev_get_drvdata(ap->dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       struct imx_ahci_priv *imxpriv = hpriv->plat_data;
        int ret = -EIO;
 
        if (imxpriv->type == AHCI_IMX53)
@@ -156,7 +180,8 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
 }
 
 static struct ata_port_operations ahci_imx_ops = {
-       .inherits       = &ahci_platform_ops,
+       .inherits       = &ahci_ops,
+       .host_stop      = ahci_imx_host_stop,
        .error_handler  = ahci_imx_error_handler,
        .softreset      = ahci_imx_softreset,
 };
@@ -168,79 +193,6 @@ static const struct ata_port_info ahci_imx_port_info = {
        .port_ops       = &ahci_imx_ops,
 };
 
-static int imx_sata_init(struct device *dev, void __iomem *mmio)
-{
-       int ret = 0;
-       unsigned int reg_val;
-       struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
-
-       ret = imx_sata_clock_enable(dev);
-       if (ret < 0)
-               return ret;
-
-       /*
-        * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
-        * and IP vendor specific register HOST_TIMER1MS.
-        * Configure CAP_SSS (support stagered spin up).
-        * Implement the port0.
-        * Get the ahb clock rate, and configure the TIMER1MS register.
-        */
-       reg_val = readl(mmio + HOST_CAP);
-       if (!(reg_val & HOST_CAP_SSS)) {
-               reg_val |= HOST_CAP_SSS;
-               writel(reg_val, mmio + HOST_CAP);
-       }
-       reg_val = readl(mmio + HOST_PORTS_IMPL);
-       if (!(reg_val & 0x1)) {
-               reg_val |= 0x1;
-               writel(reg_val, mmio + HOST_PORTS_IMPL);
-       }
-
-       reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
-       writel(reg_val, mmio + HOST_TIMER1MS);
-
-       return 0;
-}
-
-static void imx_sata_exit(struct device *dev)
-{
-       imx_sata_clock_disable(dev);
-}
-
-static int imx_ahci_suspend(struct device *dev)
-{
-       struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
-
-       /*
-        * If no_device is set, The CLKs had been gated off in the
-        * initialization so don't do it again here.
-        */
-       if (!imxpriv->no_device)
-               imx_sata_clock_disable(dev);
-
-       return 0;
-}
-
-static int imx_ahci_resume(struct device *dev)
-{
-       struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
-       int ret = 0;
-
-       if (!imxpriv->no_device)
-               ret = imx_sata_clock_enable(dev);
-
-       return ret;
-}
-
-static struct ahci_platform_data imx_sata_pdata = {
-       .init           = imx_sata_init,
-       .exit           = imx_sata_exit,
-       .ata_port_info  = &ahci_imx_port_info,
-       .suspend        = imx_ahci_suspend,
-       .resume         = imx_ahci_resume,
-
-};
-
 static const struct of_device_id imx_ahci_of_match[] = {
        { .compatible = "fsl,imx53-ahci", .data = (void *)AHCI_IMX53 },
        { .compatible = "fsl,imx6q-ahci", .data = (void *)AHCI_IMX6Q },
@@ -251,151 +203,124 @@ MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
 static int imx_ahci_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct resource *mem, *irq, res[2];
        const struct of_device_id *of_id;
-       enum ahci_imx_type type;
-       const struct ahci_platform_data *pdata = NULL;
+       struct ahci_host_priv *hpriv;
        struct imx_ahci_priv *imxpriv;
-       struct device *ahci_dev;
-       struct platform_device *ahci_pdev;
+       unsigned int reg_val;
        int ret;
 
        of_id = of_match_device(imx_ahci_of_match, dev);
        if (!of_id)
                return -EINVAL;
 
-       type = (enum ahci_imx_type)of_id->data;
-       pdata = &imx_sata_pdata;
-
        imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
-       if (!imxpriv) {
-               dev_err(dev, "can't alloc ahci_host_priv\n");
+       if (!imxpriv)
                return -ENOMEM;
-       }
-
-       ahci_pdev = platform_device_alloc("ahci", -1);
-       if (!ahci_pdev)
-               return -ENODEV;
-
-       ahci_dev = &ahci_pdev->dev;
-       ahci_dev->parent = dev;
 
        imxpriv->no_device = false;
        imxpriv->first_time = true;
-       imxpriv->type = type;
-
+       imxpriv->type = (enum ahci_imx_type)of_id->data;
        imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
        if (IS_ERR(imxpriv->ahb_clk)) {
                dev_err(dev, "can't get ahb clock.\n");
-               ret = PTR_ERR(imxpriv->ahb_clk);
-               goto err_out;
+               return PTR_ERR(imxpriv->ahb_clk);
        }
 
-       if (type == AHCI_IMX53) {
-               imxpriv->sata_gate_clk = devm_clk_get(dev, "sata_gate");
-               if (IS_ERR(imxpriv->sata_gate_clk)) {
-                       dev_err(dev, "can't get sata_gate clock.\n");
-                       ret = PTR_ERR(imxpriv->sata_gate_clk);
-                       goto err_out;
+       if (imxpriv->type == AHCI_IMX6Q) {
+               imxpriv->gpr = syscon_regmap_lookup_by_compatible(
+                                                       "fsl,imx6q-iomuxc-gpr");
+               if (IS_ERR(imxpriv->gpr)) {
+                       dev_err(dev,
+                               "failed to find fsl,imx6q-iomux-gpr regmap\n");
+                       return PTR_ERR(imxpriv->gpr);
                }
        }
 
-       imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
-       if (IS_ERR(imxpriv->sata_ref_clk)) {
-               dev_err(dev, "can't get sata_ref clock.\n");
-               ret = PTR_ERR(imxpriv->sata_ref_clk);
-               goto err_out;
-       }
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       hpriv->plat_data = imxpriv;
 
-       imxpriv->ahci_pdev = ahci_pdev;
-       platform_set_drvdata(pdev, imxpriv);
+       ret = imx_sata_enable(hpriv);
+       if (ret)
+               return ret;
 
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!mem || !irq) {
-               dev_err(dev, "no mmio/irq resource\n");
-               ret = -ENOMEM;
-               goto err_out;
+       /*
+        * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+        * and IP vendor specific register HOST_TIMER1MS.
+        * Configure CAP_SSS (support stagered spin up).
+        * Implement the port0.
+        * Get the ahb clock rate, and configure the TIMER1MS register.
+        */
+       reg_val = readl(hpriv->mmio + HOST_CAP);
+       if (!(reg_val & HOST_CAP_SSS)) {
+               reg_val |= HOST_CAP_SSS;
+               writel(reg_val, hpriv->mmio + HOST_CAP);
+       }
+       reg_val = readl(hpriv->mmio + HOST_PORTS_IMPL);
+       if (!(reg_val & 0x1)) {
+               reg_val |= 0x1;
+               writel(reg_val, hpriv->mmio + HOST_PORTS_IMPL);
        }
 
-       res[0] = *mem;
-       res[1] = *irq;
+       reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+       writel(reg_val, hpriv->mmio + HOST_TIMER1MS);
 
-       ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
-       ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
-       ahci_dev->of_node = dev->of_node;
+       ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0);
+       if (ret)
+               imx_sata_disable(hpriv);
 
-       if (type == AHCI_IMX6Q) {
-               imxpriv->gpr = syscon_regmap_lookup_by_compatible(
-                                                       "fsl,imx6q-iomuxc-gpr");
-               if (IS_ERR(imxpriv->gpr)) {
-                       dev_err(dev,
-                               "failed to find fsl,imx6q-iomux-gpr regmap\n");
-                       ret = PTR_ERR(imxpriv->gpr);
-                       goto err_out;
-               }
+       return ret;
+}
 
-               /*
-                * Set PHY Paremeters, two steps to configure the GPR13,
-                * one write for rest of parameters, mask of first write
-                * is 0x07fffffe, and the other one write for setting
-                * the mpll_clk_en happens in imx_sata_clock_enable().
-                */
-               regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
-                                  IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK |
-                                  IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK |
-                                  IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK |
-                                  IMX6Q_GPR13_SATA_SPD_MODE_MASK |
-                                  IMX6Q_GPR13_SATA_MPLL_SS_EN |
-                                  IMX6Q_GPR13_SATA_TX_ATTEN_MASK |
-                                  IMX6Q_GPR13_SATA_TX_BOOST_MASK |
-                                  IMX6Q_GPR13_SATA_TX_LVL_MASK |
-                                  IMX6Q_GPR13_SATA_MPLL_CLK_EN |
-                                  IMX6Q_GPR13_SATA_TX_EDGE_RATE,
-                                  IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
-                                  IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
-                                  IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
-                                  IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
-                                  IMX6Q_GPR13_SATA_MPLL_SS_EN |
-                                  IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
-                                  IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
-                                  IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
-       }
+static void ahci_imx_host_stop(struct ata_host *host)
+{
+       struct ahci_host_priv *hpriv = host->private_data;
 
-       ret = platform_device_add_resources(ahci_pdev, res, 2);
-       if (ret)
-               goto err_out;
+       imx_sata_disable(hpriv);
+}
 
-       ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
-       if (ret)
-               goto err_out;
+#ifdef CONFIG_PM_SLEEP
+static int imx_ahci_suspend(struct device *dev)
+{
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       int ret;
 
-       ret = platform_device_add(ahci_pdev);
-       if (ret) {
-err_out:
-               platform_device_put(ahci_pdev);
+       ret = ahci_platform_suspend_host(dev);
+       if (ret)
                return ret;
-       }
+
+       imx_sata_disable(hpriv);
 
        return 0;
 }
 
-static int imx_ahci_remove(struct platform_device *pdev)
+static int imx_ahci_resume(struct device *dev)
 {
-       struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
-       struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       int ret;
 
-       platform_device_unregister(ahci_pdev);
-       return 0;
+       ret = imx_sata_enable(hpriv);
+       if (ret)
+               return ret;
+
+       return ahci_platform_resume_host(dev);
 }
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_imx_pm_ops, imx_ahci_suspend, imx_ahci_resume);
 
 static struct platform_driver imx_ahci_driver = {
        .probe = imx_ahci_probe,
-       .remove = imx_ahci_remove,
+       .remove = ata_platform_remove_one,
        .driver = {
                .name = "ahci-imx",
                .owner = THIS_MODULE,
                .of_match_table = imx_ahci_of_match,
+               .pm = &ahci_imx_pm_ops,
        },
 };
 module_platform_driver(imx_ahci_driver);
index 4b231baceb0995557c2cae40e501f6d3449dc365..ef67e79944f9962e3e32382dd43794404d866b01 100644 (file)
  * any later version.
  */
 
-#include <linux/clk.h>
 #include <linux/kernel.h>
-#include <linux/gfp.h>
 #include <linux/module.h>
 #include <linux/pm.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/libata.h>
 #include <linux/ahci_platform.h>
 #include "ahci.h"
 
-static void ahci_host_stop(struct ata_host *host);
-
-enum ahci_type {
-       AHCI,           /* standard platform ahci */
-       IMX53_AHCI,     /* ahci on i.mx53 */
-       STRICT_AHCI,    /* delayed DMA engine start */
-};
-
-static struct platform_device_id ahci_devtype[] = {
-       {
-               .name = "ahci",
-               .driver_data = AHCI,
-       }, {
-               .name = "imx53-ahci",
-               .driver_data = IMX53_AHCI,
-       }, {
-               .name = "strict-ahci",
-               .driver_data = STRICT_AHCI,
-       }, {
-               /* sentinel */
-       }
-};
-MODULE_DEVICE_TABLE(platform, ahci_devtype);
-
-struct ata_port_operations ahci_platform_ops = {
-       .inherits       = &ahci_ops,
-       .host_stop      = ahci_host_stop,
-};
-EXPORT_SYMBOL_GPL(ahci_platform_ops);
-
-static struct ata_port_operations ahci_platform_retry_srst_ops = {
-       .inherits       = &ahci_pmp_retry_srst_ops,
-       .host_stop      = ahci_host_stop,
-};
-
-static const struct ata_port_info ahci_port_info[] = {
-       /* by features */
-       [AHCI] = {
-               .flags          = AHCI_FLAG_COMMON,
-               .pio_mask       = ATA_PIO4,
-               .udma_mask      = ATA_UDMA6,
-               .port_ops       = &ahci_platform_ops,
-       },
-       [IMX53_AHCI] = {
-               .flags          = AHCI_FLAG_COMMON,
-               .pio_mask       = ATA_PIO4,
-               .udma_mask      = ATA_UDMA6,
-               .port_ops       = &ahci_platform_retry_srst_ops,
-       },
-       [STRICT_AHCI] = {
-               AHCI_HFLAGS     (AHCI_HFLAG_DELAY_ENGINE),
-               .flags          = AHCI_FLAG_COMMON,
-               .pio_mask       = ATA_PIO4,
-               .udma_mask      = ATA_UDMA6,
-               .port_ops       = &ahci_platform_ops,
-       },
-};
-
-static struct scsi_host_template ahci_platform_sht = {
-       AHCI_SHT("ahci_platform"),
+static const struct ata_port_info ahci_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_platform_ops,
 };
 
 static int ahci_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct ahci_platform_data *pdata = dev_get_platdata(dev);
-       const struct platform_device_id *id = platform_get_device_id(pdev);
-       struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
-       const struct ata_port_info *ppi[] = { &pi, NULL };
        struct ahci_host_priv *hpriv;
-       struct ata_host *host;
-       struct resource *mem;
-       int irq;
-       int n_ports;
-       int i;
        int rc;
 
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(dev, "no mmio space\n");
-               return -EINVAL;
-       }
-
-       irq = platform_get_irq(pdev, 0);
-       if (irq <= 0) {
-               dev_err(dev, "no irq\n");
-               return -EINVAL;
-       }
-
-       if (pdata && pdata->ata_port_info)
-               pi = *pdata->ata_port_info;
-
-       hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
-       if (!hpriv) {
-               dev_err(dev, "can't alloc ahci_host_priv\n");
-               return -ENOMEM;
-       }
-
-       hpriv->flags |= (unsigned long)pi.private_data;
-
-       hpriv->mmio = devm_ioremap(dev, mem->start, resource_size(mem));
-       if (!hpriv->mmio) {
-               dev_err(dev, "can't map %pR\n", mem);
-               return -ENOMEM;
-       }
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
 
-       hpriv->clk = clk_get(dev, NULL);
-       if (IS_ERR(hpriv->clk)) {
-               dev_err(dev, "can't get clock\n");
-       } else {
-               rc = clk_prepare_enable(hpriv->clk);
-               if (rc) {
-                       dev_err(dev, "clock prepare enable failed");
-                       goto free_clk;
-               }
-       }
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
 
        /*
         * Some platforms might need to prepare for mmio region access,
@@ -151,69 +52,10 @@ static int ahci_probe(struct platform_device *pdev)
        if (pdata && pdata->init) {
                rc = pdata->init(dev, hpriv->mmio);
                if (rc)
-                       goto disable_unprepare_clk;
-       }
-
-       ahci_save_initial_config(dev, hpriv,
-               pdata ? pdata->force_port_map : 0,
-               pdata ? pdata->mask_port_map  : 0);
-
-       /* prepare host */
-       if (hpriv->cap & HOST_CAP_NCQ)
-               pi.flags |= ATA_FLAG_NCQ;
-
-       if (hpriv->cap & HOST_CAP_PMP)
-               pi.flags |= ATA_FLAG_PMP;
-
-       ahci_set_em_messages(hpriv, &pi);
-
-       /* CAP.NP sometimes indicate the index of the last enabled
-        * port, at other times, that of the last possible port, so
-        * determining the maximum port number requires looking at
-        * both CAP.NP and port_map.
-        */
-       n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
-
-       host = ata_host_alloc_pinfo(dev, ppi, n_ports);
-       if (!host) {
-               rc = -ENOMEM;
-               goto pdata_exit;
-       }
-
-       host->private_data = hpriv;
-
-       if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
-               host->flags |= ATA_HOST_PARALLEL_SCAN;
-       else
-               dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
-
-       if (pi.flags & ATA_FLAG_EM)
-               ahci_reset_em(host);
-
-       for (i = 0; i < host->n_ports; i++) {
-               struct ata_port *ap = host->ports[i];
-
-               ata_port_desc(ap, "mmio %pR", mem);
-               ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
-
-               /* set enclosure management message type */
-               if (ap->flags & ATA_FLAG_EM)
-                       ap->em_message_type = hpriv->em_msg_type;
-
-               /* disabled/not-implemented port */
-               if (!(hpriv->port_map & (1 << i)))
-                       ap->ops = &ata_dummy_port_ops;
+                       goto disable_resources;
        }
 
-       rc = ahci_reset_controller(host);
-       if (rc)
-               goto pdata_exit;
-
-       ahci_init_controller(host);
-       ahci_print_info(host, "platform");
-
-       rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
-                              &ahci_platform_sht);
+       rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 0, 0);
        if (rc)
                goto pdata_exit;
 
@@ -221,115 +63,19 @@ static int ahci_probe(struct platform_device *pdev)
 pdata_exit:
        if (pdata && pdata->exit)
                pdata->exit(dev);
-disable_unprepare_clk:
-       if (!IS_ERR(hpriv->clk))
-               clk_disable_unprepare(hpriv->clk);
-free_clk:
-       if (!IS_ERR(hpriv->clk))
-               clk_put(hpriv->clk);
-       return rc;
-}
-
-static void ahci_host_stop(struct ata_host *host)
-{
-       struct device *dev = host->dev;
-       struct ahci_platform_data *pdata = dev_get_platdata(dev);
-       struct ahci_host_priv *hpriv = host->private_data;
-
-       if (pdata && pdata->exit)
-               pdata->exit(dev);
-
-       if (!IS_ERR(hpriv->clk)) {
-               clk_disable_unprepare(hpriv->clk);
-               clk_put(hpriv->clk);
-       }
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ahci_suspend(struct device *dev)
-{
-       struct ahci_platform_data *pdata = dev_get_platdata(dev);
-       struct ata_host *host = dev_get_drvdata(dev);
-       struct ahci_host_priv *hpriv = host->private_data;
-       void __iomem *mmio = hpriv->mmio;
-       u32 ctl;
-       int rc;
-
-       if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
-               dev_err(dev, "firmware update required for suspend/resume\n");
-               return -EIO;
-       }
-
-       /*
-        * AHCI spec rev1.1 section 8.3.3:
-        * Software must disable interrupts prior to requesting a
-        * transition of the HBA to D3 state.
-        */
-       ctl = readl(mmio + HOST_CTL);
-       ctl &= ~HOST_IRQ_EN;
-       writel(ctl, mmio + HOST_CTL);
-       readl(mmio + HOST_CTL); /* flush */
-
-       rc = ata_host_suspend(host, PMSG_SUSPEND);
-       if (rc)
-               return rc;
-
-       if (pdata && pdata->suspend)
-               return pdata->suspend(dev);
-
-       if (!IS_ERR(hpriv->clk))
-               clk_disable_unprepare(hpriv->clk);
-
-       return 0;
-}
-
-static int ahci_resume(struct device *dev)
-{
-       struct ahci_platform_data *pdata = dev_get_platdata(dev);
-       struct ata_host *host = dev_get_drvdata(dev);
-       struct ahci_host_priv *hpriv = host->private_data;
-       int rc;
-
-       if (!IS_ERR(hpriv->clk)) {
-               rc = clk_prepare_enable(hpriv->clk);
-               if (rc) {
-                       dev_err(dev, "clock prepare enable failed");
-                       return rc;
-               }
-       }
-
-       if (pdata && pdata->resume) {
-               rc = pdata->resume(dev);
-               if (rc)
-                       goto disable_unprepare_clk;
-       }
-
-       if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
-               rc = ahci_reset_controller(host);
-               if (rc)
-                       goto disable_unprepare_clk;
-
-               ahci_init_controller(host);
-       }
-
-       ata_host_resume(host);
-
-       return 0;
-
-disable_unprepare_clk:
-       if (!IS_ERR(hpriv->clk))
-               clk_disable_unprepare(hpriv->clk);
-
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
        return rc;
 }
-#endif
 
-static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_suspend, ahci_resume);
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+                        ahci_platform_resume);
 
 static const struct of_device_id ahci_of_match[] = {
        { .compatible = "snps,spear-ahci", },
        { .compatible = "snps,exynos5440-ahci", },
        { .compatible = "ibm,476gtr-ahci", },
+       { .compatible = "snps,dwc-ahci", },
        {},
 };
 MODULE_DEVICE_TABLE(of, ahci_of_match);
@@ -343,7 +89,6 @@ static struct platform_driver ahci_driver = {
                .of_match_table = ahci_of_match,
                .pm = &ahci_pm_ops,
        },
-       .id_table       = ahci_devtype,
 };
 module_platform_driver(ahci_driver);
 
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
new file mode 100644 (file)
index 0000000..6332222
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2012 STMicroelectronics Limited
+ *
+ * Authors: Francesco Virlinzi <francesco.virlinzi@st.com>
+ *         Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/ahci_platform.h>
+#include <linux/libata.h>
+#include <linux/reset.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+
+#include "ahci.h"
+
+#define ST_AHCI_OOBR                   0xbc
+#define ST_AHCI_OOBR_WE                        BIT(31)
+#define ST_AHCI_OOBR_CWMIN_SHIFT       24
+#define ST_AHCI_OOBR_CWMAX_SHIFT       16
+#define ST_AHCI_OOBR_CIMIN_SHIFT       8
+#define ST_AHCI_OOBR_CIMAX_SHIFT       0
+
+struct st_ahci_drv_data {
+       struct platform_device *ahci;
+       struct reset_control *pwr;
+       struct reset_control *sw_rst;
+       struct reset_control *pwr_rst;
+       struct ahci_host_priv *hpriv;
+};
+
+static void st_ahci_configure_oob(void __iomem *mmio)
+{
+       unsigned long old_val, new_val;
+
+       new_val = (0x02 << ST_AHCI_OOBR_CWMIN_SHIFT) |
+                 (0x04 << ST_AHCI_OOBR_CWMAX_SHIFT) |
+                 (0x08 << ST_AHCI_OOBR_CIMIN_SHIFT) |
+                 (0x0C << ST_AHCI_OOBR_CIMAX_SHIFT);
+
+       old_val = readl(mmio + ST_AHCI_OOBR);
+       writel(old_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+       writel(new_val | ST_AHCI_OOBR_WE, mmio + ST_AHCI_OOBR);
+       writel(new_val, mmio + ST_AHCI_OOBR);
+}
+
+static int st_ahci_deassert_resets(struct device *dev)
+{
+       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+       int err;
+
+       if (drv_data->pwr) {
+               err = reset_control_deassert(drv_data->pwr);
+               if (err) {
+                       dev_err(dev, "unable to bring out of pwrdwn\n");
+                       return err;
+               }
+       }
+
+       st_ahci_configure_oob(drv_data->hpriv->mmio);
+
+       if (drv_data->sw_rst) {
+               err = reset_control_deassert(drv_data->sw_rst);
+               if (err) {
+                       dev_err(dev, "unable to bring out of sw-rst\n");
+                       return err;
+               }
+       }
+
+       if (drv_data->pwr_rst) {
+               err = reset_control_deassert(drv_data->pwr_rst);
+               if (err) {
+                       dev_err(dev, "unable to bring out of pwr-rst\n");
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static void st_ahci_host_stop(struct ata_host *host)
+{
+       struct ahci_host_priv *hpriv = host->private_data;
+       struct device *dev = host->dev;
+       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+       int err;
+
+       if (drv_data->pwr) {
+               err = reset_control_assert(drv_data->pwr);
+               if (err)
+                       dev_err(dev, "unable to pwrdwn\n");
+       }
+
+       ahci_platform_disable_resources(hpriv);
+}
+
+static int st_ahci_probe_resets(struct platform_device *pdev)
+{
+       struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev);
+
+       drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn");
+       if (IS_ERR(drv_data->pwr)) {
+               dev_info(&pdev->dev, "power reset control not defined\n");
+               drv_data->pwr = NULL;
+       }
+
+       drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst");
+       if (IS_ERR(drv_data->sw_rst)) {
+               dev_info(&pdev->dev, "soft reset control not defined\n");
+               drv_data->sw_rst = NULL;
+       }
+
+       drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst");
+       if (IS_ERR(drv_data->pwr_rst)) {
+               dev_dbg(&pdev->dev, "power soft reset control not defined\n");
+               drv_data->pwr_rst = NULL;
+       }
+
+       return st_ahci_deassert_resets(&pdev->dev);
+}
+
+static struct ata_port_operations st_ahci_port_ops = {
+       .inherits       = &ahci_platform_ops,
+       .host_stop      = st_ahci_host_stop,
+};
+
+static const struct ata_port_info st_ahci_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &st_ahci_port_ops,
+};
+
+static int st_ahci_probe(struct platform_device *pdev)
+{
+       struct st_ahci_drv_data *drv_data;
+       struct ahci_host_priv *hpriv;
+       int err;
+
+       drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
+       if (!drv_data)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, drv_data);
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       drv_data->hpriv = hpriv;
+
+       err = st_ahci_probe_resets(pdev);
+       if (err)
+               return err;
+
+       err = ahci_platform_enable_resources(hpriv);
+       if (err)
+               return err;
+
+       err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0);
+       if (err) {
+               ahci_platform_disable_resources(hpriv);
+               return err;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int st_ahci_suspend(struct device *dev)
+{
+       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = drv_data->hpriv;
+       int err;
+
+       err = ahci_platform_suspend_host(dev);
+       if (err)
+               return err;
+
+       if (drv_data->pwr) {
+               err = reset_control_assert(drv_data->pwr);
+               if (err) {
+                       dev_err(dev, "unable to pwrdwn");
+                       return err;
+               }
+       }
+
+       ahci_platform_disable_resources(hpriv);
+
+       return 0;
+}
+
+static int st_ahci_resume(struct device *dev)
+{
+       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = drv_data->hpriv;
+       int err;
+
+       err = ahci_platform_enable_resources(hpriv);
+       if (err)
+               return err;
+
+       err = st_ahci_deassert_resets(dev);
+       if (err) {
+               ahci_platform_disable_resources(hpriv);
+               return err;
+       }
+
+       return ahci_platform_resume_host(dev);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
+
+static struct of_device_id st_ahci_match[] = {
+       { .compatible = "st,ahci", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, st_ahci_match);
+
+static struct platform_driver st_ahci_driver = {
+       .driver = {
+               .name = "st_ahci",
+               .owner = THIS_MODULE,
+               .pm = &st_ahci_pm_ops,
+               .of_match_table = of_match_ptr(st_ahci_match),
+       },
+       .probe = st_ahci_probe,
+       .remove = ata_platform_remove_one,
+};
+module_platform_driver(st_ahci_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@st.com>");
+MODULE_AUTHOR("Francesco Virlinzi <francesco.virlinzi@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
new file mode 100644 (file)
index 0000000..42d3f64
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Allwinner sunxi AHCI SATA platform driver
+ * Copyright 2013 Olliver Schinagl <oliver@schinagl.nl>
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ * Based on code from Allwinner Technology Co., Ltd. <www.allwinnertech.com>,
+ * Daniel Wang <danielwang@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include "ahci.h"
+
+#define AHCI_BISTAFR   0x00a0
+#define AHCI_BISTCR    0x00a4
+#define AHCI_BISTFCTR  0x00a8
+#define AHCI_BISTSR    0x00ac
+#define AHCI_BISTDECR  0x00b0
+#define AHCI_DIAGNR0   0x00b4
+#define AHCI_DIAGNR1   0x00b8
+#define AHCI_OOBR      0x00bc
+#define AHCI_PHYCS0R   0x00c0
+#define AHCI_PHYCS1R   0x00c4
+#define AHCI_PHYCS2R   0x00c8
+#define AHCI_TIMER1MS  0x00e0
+#define AHCI_GPARAM1R  0x00e8
+#define AHCI_GPARAM2R  0x00ec
+#define AHCI_PPARAMR   0x00f0
+#define AHCI_TESTR     0x00f4
+#define AHCI_VERSIONR  0x00f8
+#define AHCI_IDR       0x00fc
+#define AHCI_RWCR      0x00fc
+#define AHCI_P0DMACR   0x0170
+#define AHCI_P0PHYCR   0x0178
+#define AHCI_P0PHYSR   0x017c
+
+static void sunxi_clrbits(void __iomem *reg, u32 clr_val)
+{
+       u32 reg_val;
+
+       reg_val = readl(reg);
+       reg_val &= ~(clr_val);
+       writel(reg_val, reg);
+}
+
+static void sunxi_setbits(void __iomem *reg, u32 set_val)
+{
+       u32 reg_val;
+
+       reg_val = readl(reg);
+       reg_val |= set_val;
+       writel(reg_val, reg);
+}
+
+static void sunxi_clrsetbits(void __iomem *reg, u32 clr_val, u32 set_val)
+{
+       u32 reg_val;
+
+       reg_val = readl(reg);
+       reg_val &= ~(clr_val);
+       reg_val |= set_val;
+       writel(reg_val, reg);
+}
+
+static u32 sunxi_getbits(void __iomem *reg, u8 mask, u8 shift)
+{
+       return (readl(reg) >> shift) & mask;
+}
+
+static int ahci_sunxi_phy_init(struct device *dev, void __iomem *reg_base)
+{
+       u32 reg_val;
+       int timeout;
+
+       /* This magic is from the original code */
+       writel(0, reg_base + AHCI_RWCR);
+       msleep(5);
+
+       sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(19));
+       sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+                        (0x7 << 24),
+                        (0x5 << 24) | BIT(23) | BIT(18));
+       sunxi_clrsetbits(reg_base + AHCI_PHYCS1R,
+                        (0x3 << 16) | (0x1f << 8) | (0x3 << 6),
+                        (0x2 << 16) | (0x6 << 8) | (0x2 << 6));
+       sunxi_setbits(reg_base + AHCI_PHYCS1R, BIT(28) | BIT(15));
+       sunxi_clrbits(reg_base + AHCI_PHYCS1R, BIT(19));
+       sunxi_clrsetbits(reg_base + AHCI_PHYCS0R,
+                        (0x7 << 20), (0x3 << 20));
+       sunxi_clrsetbits(reg_base + AHCI_PHYCS2R,
+                        (0x1f << 5), (0x19 << 5));
+       msleep(5);
+
+       sunxi_setbits(reg_base + AHCI_PHYCS0R, (0x1 << 19));
+
+       timeout = 250; /* Power up takes aprox 50 us */
+       do {
+               reg_val = sunxi_getbits(reg_base + AHCI_PHYCS0R, 0x7, 28);
+               if (reg_val == 0x02)
+                       break;
+
+               if (--timeout == 0) {
+                       dev_err(dev, "PHY power up failed.\n");
+                       return -EIO;
+               }
+               udelay(1);
+       } while (1);
+
+       sunxi_setbits(reg_base + AHCI_PHYCS2R, (0x1 << 24));
+
+       timeout = 100; /* Calibration takes aprox 10 us */
+       do {
+               reg_val = sunxi_getbits(reg_base + AHCI_PHYCS2R, 0x1, 24);
+               if (reg_val == 0x00)
+                       break;
+
+               if (--timeout == 0) {
+                       dev_err(dev, "PHY calibration failed.\n");
+                       return -EIO;
+               }
+               udelay(1);
+       } while (1);
+
+       msleep(15);
+
+       writel(0x7, reg_base + AHCI_RWCR);
+
+       return 0;
+}
+
+static void ahci_sunxi_start_engine(struct ata_port *ap)
+{
+       void __iomem *port_mmio = ahci_port_base(ap);
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+
+       /* Setup DMA before DMA start */
+       sunxi_clrsetbits(hpriv->mmio + AHCI_P0DMACR, 0x0000ff00, 0x00004400);
+
+       /* Start DMA */
+       sunxi_setbits(port_mmio + PORT_CMD, PORT_CMD_START);
+}
+
+static const struct ata_port_info ahci_sunxi_port_info = {
+       AHCI_HFLAGS(AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+                         AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
+       .flags          = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_platform_ops,
+};
+
+static int ahci_sunxi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ahci_host_priv *hpriv;
+       int rc;
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       hpriv->start_engine = ahci_sunxi_start_engine;
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
+
+       rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+       if (rc)
+               goto disable_resources;
+
+       rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info, 0, 0);
+       if (rc)
+               goto disable_resources;
+
+       return 0;
+
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+       return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_sunxi_resume(struct device *dev)
+{
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       int rc;
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
+
+       rc = ahci_sunxi_phy_init(dev, hpriv->mmio);
+       if (rc)
+               goto disable_resources;
+
+       rc = ahci_platform_resume_host(dev);
+       if (rc)
+               goto disable_resources;
+
+       return 0;
+
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+       return rc;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
+                        ahci_sunxi_resume);
+
+static const struct of_device_id ahci_sunxi_of_match[] = {
+       { .compatible = "allwinner,sun4i-a10-ahci", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
+
+static struct platform_driver ahci_sunxi_driver = {
+       .probe = ahci_sunxi_probe,
+       .remove = ata_platform_remove_one,
+       .driver = {
+               .name = "ahci-sunxi",
+               .owner = THIS_MODULE,
+               .of_match_table = ahci_sunxi_of_match,
+               .pm = &ahci_sunxi_pm_ops,
+       },
+};
+module_platform_driver(ahci_sunxi_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi AHCI SATA driver");
+MODULE_AUTHOR("Olliver Schinagl <oliver@schinagl.nl>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
new file mode 100644 (file)
index 0000000..77c89bf
--- /dev/null
@@ -0,0 +1,486 @@
+/*
+ * AppliedMicro X-Gene SoC SATA Host Controller Driver
+ *
+ * Copyright (c) 2014, Applied Micro Circuits Corporation
+ * Author: Loc Ho <lho@apm.com>
+ *         Tuan Phan <tphan@apm.com>
+ *         Suman Tripathi <stripathi@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * NOTE: PM support is not currently available.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ahci_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/phy/phy.h>
+#include "ahci.h"
+
+/* Max # of disk per a controller */
+#define MAX_AHCI_CHN_PERCTR            2
+
+/* MUX CSR */
+#define SATA_ENET_CONFIG_REG           0x00000000
+#define  CFG_SATA_ENET_SELECT_MASK     0x00000001
+
+/* SATA core host controller CSR */
+#define SLVRDERRATTRIBUTES             0x00000000
+#define SLVWRERRATTRIBUTES             0x00000004
+#define MSTRDERRATTRIBUTES             0x00000008
+#define MSTWRERRATTRIBUTES             0x0000000c
+#define BUSCTLREG                      0x00000014
+#define IOFMSTRWAUX                    0x00000018
+#define INTSTATUSMASK                  0x0000002c
+#define ERRINTSTATUS                   0x00000030
+#define ERRINTSTATUSMASK               0x00000034
+
+/* SATA host AHCI CSR */
+#define PORTCFG                                0x000000a4
+#define  PORTADDR_SET(dst, src) \
+               (((dst) & ~0x0000003f) | (((u32)(src)) & 0x0000003f))
+#define PORTPHY1CFG            0x000000a8
+#define PORTPHY1CFG_FRCPHYRDY_SET(dst, src) \
+               (((dst) & ~0x00100000) | (((u32)(src) << 0x14) & 0x00100000))
+#define PORTPHY2CFG                    0x000000ac
+#define PORTPHY3CFG                    0x000000b0
+#define PORTPHY4CFG                    0x000000b4
+#define PORTPHY5CFG                    0x000000b8
+#define SCTL0                          0x0000012C
+#define PORTPHY5CFG_RTCHG_SET(dst, src) \
+               (((dst) & ~0xfff00000) | (((u32)(src) << 0x14) & 0xfff00000))
+#define PORTAXICFG_EN_CONTEXT_SET(dst, src) \
+               (((dst) & ~0x01000000) | (((u32)(src) << 0x18) & 0x01000000))
+#define PORTAXICFG                     0x000000bc
+#define PORTAXICFG_OUTTRANS_SET(dst, src) \
+               (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
+
+/* SATA host controller AXI CSR */
+#define INT_SLV_TMOMASK                        0x00000010
+
+/* SATA diagnostic CSR */
+#define CFG_MEM_RAM_SHUTDOWN           0x00000070
+#define BLOCK_MEM_RDY                  0x00000074
+
+struct xgene_ahci_context {
+       struct ahci_host_priv *hpriv;
+       struct device *dev;
+       void __iomem *csr_core;         /* Core CSR address of IP */
+       void __iomem *csr_diag;         /* Diag CSR address of IP */
+       void __iomem *csr_axi;          /* AXI CSR address of IP */
+       void __iomem *csr_mux;          /* MUX CSR address of IP */
+};
+
+static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
+{
+       dev_dbg(ctx->dev, "Release memory from shutdown\n");
+       writel(0x0, ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN);
+       readl(ctx->csr_diag + CFG_MEM_RAM_SHUTDOWN); /* Force a barrier */
+       msleep(1);      /* reset may take up to 1ms */
+       if (readl(ctx->csr_diag + BLOCK_MEM_RDY) != 0xFFFFFFFF) {
+               dev_err(ctx->dev, "failed to release memory from shutdown\n");
+               return -ENODEV;
+       }
+       return 0;
+}
+
+/**
+ * xgene_ahci_read_id - Read ID data from the specified device
+ * @dev: device
+ * @tf: proposed taskfile
+ * @id: data buffer
+ *
+ * This custom read ID function is required due to the fact that the HW
+ * does not support DEVSLP and the controller state machine may get stuck
+ * after processing the ID query command.
+ */
+static unsigned int xgene_ahci_read_id(struct ata_device *dev,
+                                      struct ata_taskfile *tf, u16 *id)
+{
+       u32 err_mask;
+       void __iomem *port_mmio = ahci_port_base(dev->link->ap);
+
+       err_mask = ata_do_dev_read_id(dev, tf, id);
+       if (err_mask)
+               return err_mask;
+
+       /*
+        * Mask reserved area. Word78 spec of Link Power Management
+        * bit15-8: reserved
+        * bit7: NCQ autosence
+        * bit6: Software settings preservation supported
+        * bit5: reserved
+        * bit4: In-order sata delivery supported
+        * bit3: DIPM requests supported
+        * bit2: DMA Setup FIS Auto-Activate optimization supported
+        * bit1: DMA Setup FIX non-Zero buffer offsets supported
+        * bit0: Reserved
+        *
+        * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
+        */
+       id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+
+       /*
+        * Due to HW errata, restart the port if no other command active.
+        * Otherwise the controller may get stuck.
+        */
+       if (!readl(port_mmio + PORT_CMD_ISSUE)) {
+               writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
+               readl(port_mmio + PORT_CMD);    /* Force a barrier */
+               writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
+               readl(port_mmio + PORT_CMD);    /* Force a barrier */
+       }
+       return 0;
+}
+
+static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
+{
+       void __iomem *mmio = ctx->hpriv->mmio;
+       u32 val;
+
+       dev_dbg(ctx->dev, "port configure mmio 0x%p channel %d\n",
+               mmio, channel);
+       val = readl(mmio + PORTCFG);
+       val = PORTADDR_SET(val, channel == 0 ? 2 : 3);
+       writel(val, mmio + PORTCFG);
+       readl(mmio + PORTCFG);  /* Force a barrier */
+       /* Disable fix rate */
+       writel(0x0001fffe, mmio + PORTPHY1CFG);
+       readl(mmio + PORTPHY1CFG); /* Force a barrier */
+       writel(0x5018461c, mmio + PORTPHY2CFG);
+       readl(mmio + PORTPHY2CFG); /* Force a barrier */
+       writel(0x1c081907, mmio + PORTPHY3CFG);
+       readl(mmio + PORTPHY3CFG); /* Force a barrier */
+       writel(0x1c080815, mmio + PORTPHY4CFG);
+       readl(mmio + PORTPHY4CFG); /* Force a barrier */
+       /* Set window negotiation */
+       val = readl(mmio + PORTPHY5CFG);
+       val = PORTPHY5CFG_RTCHG_SET(val, 0x300);
+       writel(val, mmio + PORTPHY5CFG);
+       readl(mmio + PORTPHY5CFG); /* Force a barrier */
+       val = readl(mmio + PORTAXICFG);
+       val = PORTAXICFG_EN_CONTEXT_SET(val, 0x1); /* Enable context mgmt */
+       val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
+       writel(val, mmio + PORTAXICFG);
+       readl(mmio + PORTAXICFG); /* Force a barrier */
+}
+
+/**
+ * xgene_ahci_do_hardreset - Issue the actual COMRESET
+ * @link: link to reset
+ * @deadline: deadline jiffies for the operation
+ * @online: Return value to indicate if device online
+ *
+ * Due to the limitation of the hardware PHY, a difference set of setting is
+ * required for each supported disk speed - Gen3 (6.0Gbps), Gen2 (3.0Gbps),
+ * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will
+ * report disparity error and etc. In addition, during COMRESET, there can
+ * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and
+ * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following
+ * algorithm is followed to proper configure the hardware PHY during COMRESET:
+ *
+ * Alg Part 1:
+ * 1. Start the PHY at Gen3 speed (default setting)
+ * 2. Issue the COMRESET
+ * 3. If no link, go to Alg Part 3
+ * 4. If link up, determine if the negotiated speed matches the PHY
+ *    configured speed
+ * 5. If they matched, go to Alg Part 2
+ * 6. If they do not matched and first time, configure the PHY for the linked
+ *    up disk speed and repeat step 2
+ * 7. Go to Alg Part 2
+ *
+ * Alg Part 2:
+ * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error
+ *    reported in the register PORT_SCR_ERR, then reset the PHY receiver line
+ * 2. Go to Alg Part 3
+ *
+ * Alg Part 3:
+ * 1. Clear any pending from register PORT_SCR_ERR.
+ *
+ * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition
+ *       and until the underlying PHY supports an method to reset the receiver
+ *       line, on detection of SERR_DISPARITY or SERR_10B_8B_ERR errors,
+ *       an warning message will be printed.
+ */
+static int xgene_ahci_do_hardreset(struct ata_link *link,
+                                  unsigned long deadline, bool *online)
+{
+       const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+       struct ata_port *ap = link->ap;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+       struct xgene_ahci_context *ctx = hpriv->plat_data;
+       struct ahci_port_priv *pp = ap->private_data;
+       u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+       void __iomem *port_mmio = ahci_port_base(ap);
+       struct ata_taskfile tf;
+       int rc;
+       u32 val;
+
+       /* clear D2H reception area to properly wait for D2H FIS */
+       ata_tf_init(link->device, &tf);
+       tf.command = ATA_BUSY;
+       ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+       rc = sata_link_hardreset(link, timing, deadline, online,
+                                ahci_check_ready);
+
+       val = readl(port_mmio + PORT_SCR_ERR);
+       if (val & (SERR_DISPARITY | SERR_10B_8B_ERR))
+               dev_warn(ctx->dev, "link has error\n");
+
+       /* clear all errors if any pending */
+       val = readl(port_mmio + PORT_SCR_ERR);
+       writel(val, port_mmio + PORT_SCR_ERR);
+
+       return rc;
+}
+
+static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
+                               unsigned long deadline)
+{
+       struct ata_port *ap = link->ap;
+        struct ahci_host_priv *hpriv = ap->host->private_data;
+       void __iomem *port_mmio = ahci_port_base(ap);
+       bool online;
+       int rc;
+       u32 portcmd_saved;
+       u32 portclb_saved;
+       u32 portclbhi_saved;
+       u32 portrxfis_saved;
+       u32 portrxfishi_saved;
+
+       /* As hardreset resets these CSR, save it to restore later */
+       portcmd_saved = readl(port_mmio + PORT_CMD);
+       portclb_saved = readl(port_mmio + PORT_LST_ADDR);
+       portclbhi_saved = readl(port_mmio + PORT_LST_ADDR_HI);
+       portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
+       portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
+
+       ahci_stop_engine(ap);
+
+       rc = xgene_ahci_do_hardreset(link, deadline, &online);
+
+       /* As controller hardreset clears them, restore them */
+       writel(portcmd_saved, port_mmio + PORT_CMD);
+       writel(portclb_saved, port_mmio + PORT_LST_ADDR);
+       writel(portclbhi_saved, port_mmio + PORT_LST_ADDR_HI);
+       writel(portrxfis_saved, port_mmio + PORT_FIS_ADDR);
+       writel(portrxfishi_saved, port_mmio + PORT_FIS_ADDR_HI);
+
+       hpriv->start_engine(ap);
+
+       if (online)
+               *class = ahci_dev_classify(ap);
+
+       return rc;
+}
+
+static void xgene_ahci_host_stop(struct ata_host *host)
+{
+       struct ahci_host_priv *hpriv = host->private_data;
+
+       ahci_platform_disable_resources(hpriv);
+}
+
+static struct ata_port_operations xgene_ahci_ops = {
+       .inherits = &ahci_ops,
+       .host_stop = xgene_ahci_host_stop,
+       .hardreset = xgene_ahci_hardreset,
+       .read_id = xgene_ahci_read_id,
+};
+
+static const struct ata_port_info xgene_ahci_port_info = {
+       AHCI_HFLAGS(AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ),
+       .flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
+       .pio_mask = ATA_PIO4,
+       .udma_mask = ATA_UDMA6,
+       .port_ops = &xgene_ahci_ops,
+};
+
+static int xgene_ahci_hw_init(struct ahci_host_priv *hpriv)
+{
+       struct xgene_ahci_context *ctx = hpriv->plat_data;
+       int i;
+       int rc;
+       u32 val;
+
+       /* Remove IP RAM out of shutdown */
+       rc = xgene_ahci_init_memram(ctx);
+       if (rc)
+               return rc;
+
+       for (i = 0; i < MAX_AHCI_CHN_PERCTR; i++)
+               xgene_ahci_set_phy_cfg(ctx, i);
+
+       /* AXI disable Mask */
+       writel(0xffffffff, hpriv->mmio + HOST_IRQ_STAT);
+       readl(hpriv->mmio + HOST_IRQ_STAT); /* Force a barrier */
+       writel(0, ctx->csr_core + INTSTATUSMASK);
+       val = readl(ctx->csr_core + INTSTATUSMASK); /* Force a barrier */
+       dev_dbg(ctx->dev, "top level interrupt mask 0x%X value 0x%08X\n",
+               INTSTATUSMASK, val);
+
+       writel(0x0, ctx->csr_core + ERRINTSTATUSMASK);
+       readl(ctx->csr_core + ERRINTSTATUSMASK); /* Force a barrier */
+       writel(0x0, ctx->csr_axi + INT_SLV_TMOMASK);
+       readl(ctx->csr_axi + INT_SLV_TMOMASK);
+
+       /* Enable AXI Interrupt */
+       writel(0xffffffff, ctx->csr_core + SLVRDERRATTRIBUTES);
+       writel(0xffffffff, ctx->csr_core + SLVWRERRATTRIBUTES);
+       writel(0xffffffff, ctx->csr_core + MSTRDERRATTRIBUTES);
+       writel(0xffffffff, ctx->csr_core + MSTWRERRATTRIBUTES);
+
+       /* Enable coherency */
+       val = readl(ctx->csr_core + BUSCTLREG);
+       val &= ~0x00000002;     /* Enable write coherency */
+       val &= ~0x00000001;     /* Enable read coherency */
+       writel(val, ctx->csr_core + BUSCTLREG);
+
+       val = readl(ctx->csr_core + IOFMSTRWAUX);
+       val |= (1 << 3);        /* Enable read coherency */
+       val |= (1 << 9);        /* Enable write coherency */
+       writel(val, ctx->csr_core + IOFMSTRWAUX);
+       val = readl(ctx->csr_core + IOFMSTRWAUX);
+       dev_dbg(ctx->dev, "coherency 0x%X value 0x%08X\n",
+               IOFMSTRWAUX, val);
+
+       return rc;
+}
+
+static int xgene_ahci_mux_select(struct xgene_ahci_context *ctx)
+{
+       u32 val;
+
+       /* Check for optional MUX resource */
+       if (IS_ERR(ctx->csr_mux))
+               return 0;
+
+       val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+       val &= ~CFG_SATA_ENET_SELECT_MASK;
+       writel(val, ctx->csr_mux + SATA_ENET_CONFIG_REG);
+       val = readl(ctx->csr_mux + SATA_ENET_CONFIG_REG);
+       return val & CFG_SATA_ENET_SELECT_MASK ? -1 : 0;
+}
+
+static int xgene_ahci_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ahci_host_priv *hpriv;
+       struct xgene_ahci_context *ctx;
+       struct resource *res;
+       int rc;
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       hpriv->plat_data = ctx;
+       ctx->hpriv = hpriv;
+       ctx->dev = dev;
+
+       /* Retrieve the IP core resource */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       ctx->csr_core = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ctx->csr_core))
+               return PTR_ERR(ctx->csr_core);
+
+       /* Retrieve the IP diagnostic resource */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+       ctx->csr_diag = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ctx->csr_diag))
+               return PTR_ERR(ctx->csr_diag);
+
+       /* Retrieve the IP AXI resource */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+       ctx->csr_axi = devm_ioremap_resource(dev, res);
+       if (IS_ERR(ctx->csr_axi))
+               return PTR_ERR(ctx->csr_axi);
+
+       /* Retrieve the optional IP mux resource */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+       ctx->csr_mux = devm_ioremap_resource(dev, res);
+
+       dev_dbg(dev, "VAddr 0x%p Mmio VAddr 0x%p\n", ctx->csr_core,
+               hpriv->mmio);
+
+       /* Select ATA */
+       if ((rc = xgene_ahci_mux_select(ctx))) {
+               dev_err(dev, "SATA mux selection failed error %d\n", rc);
+               return -ENODEV;
+       }
+
+       /* Due to errata, HW requires full toggle transition */
+       rc = ahci_platform_enable_clks(hpriv);
+       if (rc)
+               goto disable_resources;
+       ahci_platform_disable_clks(hpriv);
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               goto disable_resources;
+
+       /* Configure the host controller */
+       xgene_ahci_hw_init(hpriv);
+
+       /*
+        * Setup DMA mask. This is preliminary until the DMA range is sorted
+        * out.
+        */
+       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+       if (rc) {
+               dev_err(dev, "Unable to set dma mask\n");
+               goto disable_resources;
+       }
+
+       rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info, 0, 0);
+       if (rc)
+               goto disable_resources;
+
+       dev_dbg(dev, "X-Gene SATA host controller initialized\n");
+       return 0;
+
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+       return rc;
+}
+
+static const struct of_device_id xgene_ahci_of_match[] = {
+       {.compatible = "apm,xgene-ahci"},
+       {},
+};
+MODULE_DEVICE_TABLE(of, xgene_ahci_of_match);
+
+static struct platform_driver xgene_ahci_driver = {
+       .probe = xgene_ahci_probe,
+       .remove = ata_platform_remove_one,
+       .driver = {
+               .name = "xgene-ahci",
+               .owner = THIS_MODULE,
+               .of_match_table = xgene_ahci_of_match,
+       },
+};
+
+module_platform_driver(xgene_ahci_driver);
+
+MODULE_DESCRIPTION("APM X-Gene AHCI SATA driver");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.4");
index 7d196656adb5581533517a6ed0ec49d8c2966b58..9498a7d3846fef6e2c88bdd8596d3f2080e0349e 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 36605abe5a6786dd8282bceb798c10663e3b16f6..6bd4f660b4e15966ca2c351b4501c0521491de32 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -394,6 +393,9 @@ static ssize_t ahci_show_em_supported(struct device *dev,
  *
  *     If inconsistent, config values are fixed up by this function.
  *
+ *     If it is not set already this function sets hpriv->start_engine to
+ *     ahci_start_engine.
+ *
  *     LOCKING:
  *     None.
  */
@@ -500,6 +502,9 @@ void ahci_save_initial_config(struct device *dev,
        hpriv->cap = cap;
        hpriv->cap2 = cap2;
        hpriv->port_map = port_map;
+
+       if (!hpriv->start_engine)
+               hpriv->start_engine = ahci_start_engine;
 }
 EXPORT_SYMBOL_GPL(ahci_save_initial_config);
 
@@ -766,7 +771,7 @@ static void ahci_start_port(struct ata_port *ap)
 
        /* enable DMA */
        if (!(hpriv->flags & AHCI_HFLAG_DELAY_ENGINE))
-               ahci_start_engine(ap);
+               hpriv->start_engine(ap);
 
        /* turn on LEDs */
        if (ap->flags & ATA_FLAG_EM) {
@@ -1032,12 +1037,13 @@ static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
                                size_t size)
 {
-       int state;
+       unsigned int state;
        int pmp;
        struct ahci_port_priv *pp = ap->private_data;
        struct ahci_em_priv *emp;
 
-       state = simple_strtoul(buf, NULL, 0);
+       if (kstrtouint(buf, 0, &state) < 0)
+               return -EINVAL;
 
        /* get the slot number from the message */
        pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
@@ -1234,7 +1240,7 @@ int ahci_kick_engine(struct ata_port *ap)
 
        /* restart engine */
  out_restart:
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_kick_engine);
@@ -1387,8 +1393,8 @@ static int ahci_bad_pmp_check_ready(struct ata_link *link)
        return ata_check_ready(status);
 }
 
-int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
-                               unsigned long deadline)
+static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+                                   unsigned long deadline)
 {
        struct ata_port *ap = link->ap;
        void __iomem *port_mmio = ahci_port_base(ap);
@@ -1426,6 +1432,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
        const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
        struct ata_port *ap = link->ap;
        struct ahci_port_priv *pp = ap->private_data;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
        struct ata_taskfile tf;
        bool online;
@@ -1443,7 +1450,7 @@ static int ahci_hardreset(struct ata_link *link, unsigned int *class,
        rc = sata_link_hardreset(link, timing, deadline, &online,
                                 ahci_check_ready);
 
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
 
        if (online)
                *class = ahci_dev_classify(ap);
@@ -1629,7 +1636,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
        }
 
        if (irq_stat & PORT_IRQ_UNK_FIS) {
-               u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
+               u32 *unk = pp->rx_fis + RX_FIS_UNK;
 
                active_ehi->err_mask |= AC_ERR_HSM;
                active_ehi->action |= ATA_EH_RESET;
@@ -2007,10 +2014,12 @@ static void ahci_thaw(struct ata_port *ap)
 
 void ahci_error_handler(struct ata_port *ap)
 {
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+
        if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
                /* restart engine */
                ahci_stop_engine(ap);
-               ahci_start_engine(ap);
+               hpriv->start_engine(ap);
        }
 
        sata_pmp_error_handler(ap);
@@ -2031,6 +2040,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
 
 static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
 {
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        void __iomem *port_mmio = ahci_port_base(ap);
        struct ata_device *dev = ap->link.device;
        u32 devslp, dm, dito, mdat, deto;
@@ -2094,7 +2104,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
                   PORT_DEVSLP_ADSE);
        writel(devslp, port_mmio + PORT_DEVSLP);
 
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
 
        /* enable device sleep feature for the drive */
        err_mask = ata_dev_set_feature(dev,
@@ -2106,6 +2116,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
 
 static void ahci_enable_fbs(struct ata_port *ap)
 {
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        struct ahci_port_priv *pp = ap->private_data;
        void __iomem *port_mmio = ahci_port_base(ap);
        u32 fbs;
@@ -2134,11 +2145,12 @@ static void ahci_enable_fbs(struct ata_port *ap)
        } else
                dev_err(ap->host->dev, "Failed to enable FBS\n");
 
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
 }
 
 static void ahci_disable_fbs(struct ata_port *ap)
 {
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        struct ahci_port_priv *pp = ap->private_data;
        void __iomem *port_mmio = ahci_port_base(ap);
        u32 fbs;
@@ -2166,7 +2178,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
                pp->fbs_enabled = false;
        }
 
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
 }
 
 static void ahci_pmp_attach(struct ata_port *ap)
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
new file mode 100644 (file)
index 0000000..7cb3a85
--- /dev/null
@@ -0,0 +1,541 @@
+/*
+ * AHCI SATA platform library
+ *
+ * Copyright 2004-2005  Red Hat, Inc.
+ *   Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010  MontaVista Software, LLC.
+ *   Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pm_runtime.h>
+#include "ahci.h"
+
+static void ahci_host_stop(struct ata_host *host);
+
+struct ata_port_operations ahci_platform_ops = {
+       .inherits       = &ahci_ops,
+       .host_stop      = ahci_host_stop,
+};
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
+
+static struct scsi_host_template ahci_platform_sht = {
+       AHCI_SHT("ahci_platform"),
+};
+
+/**
+ * ahci_platform_enable_clks - Enable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the clks found in hpriv->clks, starting at
+ * index 0. If any clk fails to enable it disables all the clks already
+ * enabled in reverse order, and then returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_clks(struct ahci_host_priv *hpriv)
+{
+       int c, rc;
+
+       for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) {
+               rc = clk_prepare_enable(hpriv->clks[c]);
+               if (rc)
+                       goto disable_unprepare_clk;
+       }
+       return 0;
+
+disable_unprepare_clk:
+       while (--c >= 0)
+               clk_disable_unprepare(hpriv->clks[c]);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_clks);
+
+/**
+ * ahci_platform_disable_clks - Disable platform clocks
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all the clks found in hpriv->clks, in reverse
+ * order of ahci_platform_enable_clks (starting at the end of the array).
+ */
+void ahci_platform_disable_clks(struct ahci_host_priv *hpriv)
+{
+       int c;
+
+       for (c = AHCI_MAX_CLKS - 1; c >= 0; c--)
+               if (hpriv->clks[c])
+                       clk_disable_unprepare(hpriv->clks[c]);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
+
+/**
+ * ahci_platform_enable_resources - Enable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all ahci_platform managed resources in the
+ * following order:
+ * 1) Regulator
+ * 2) Clocks (through ahci_platform_enable_clks)
+ * 3) Phy
+ *
+ * If resource enabling fails at any point the previous enabled resources
+ * are disabled in reverse order.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
+{
+       int rc;
+
+       if (hpriv->target_pwr) {
+               rc = regulator_enable(hpriv->target_pwr);
+               if (rc)
+                       return rc;
+       }
+
+       rc = ahci_platform_enable_clks(hpriv);
+       if (rc)
+               goto disable_regulator;
+
+       if (hpriv->phy) {
+               rc = phy_init(hpriv->phy);
+               if (rc)
+                       goto disable_clks;
+
+               rc = phy_power_on(hpriv->phy);
+               if (rc) {
+                       phy_exit(hpriv->phy);
+                       goto disable_clks;
+               }
+       }
+
+       return 0;
+
+disable_clks:
+       ahci_platform_disable_clks(hpriv);
+
+disable_regulator:
+       if (hpriv->target_pwr)
+               regulator_disable(hpriv->target_pwr);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
+
+/**
+ * ahci_platform_disable_resources - Disable platform resources
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all ahci_platform managed resources in the
+ * following order:
+ * 1) Phy
+ * 2) Clocks (through ahci_platform_disable_clks)
+ * 3) Regulator
+ */
+void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
+{
+       if (hpriv->phy) {
+               phy_power_off(hpriv->phy);
+               phy_exit(hpriv->phy);
+       }
+
+       ahci_platform_disable_clks(hpriv);
+
+       if (hpriv->target_pwr)
+               regulator_disable(hpriv->target_pwr);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_resources);
+
+static void ahci_platform_put_resources(struct device *dev, void *res)
+{
+       struct ahci_host_priv *hpriv = res;
+       int c;
+
+       if (hpriv->got_runtime_pm) {
+               pm_runtime_put_sync(dev);
+               pm_runtime_disable(dev);
+       }
+
+       for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++)
+               clk_put(hpriv->clks[c]);
+}
+
+/**
+ * ahci_platform_get_resources - Get platform resources
+ * @pdev: platform device to get resources for
+ *
+ * This function allocates an ahci_host_priv struct, and gets the following
+ * resources, storing a reference to them inside the returned struct:
+ *
+ * 1) mmio registers (IORESOURCE_MEM 0, mandatory)
+ * 2) regulator for controlling the targets power (optional)
+ * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
+ *    or for non devicetree enabled platforms a single clock
+ *     4) phy (optional)
+ *
+ * RETURNS:
+ * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
+ */
+struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ahci_host_priv *hpriv;
+       struct clk *clk;
+       int i, rc = -ENOMEM;
+
+       if (!devres_open_group(dev, NULL, GFP_KERNEL))
+               return ERR_PTR(-ENOMEM);
+
+       hpriv = devres_alloc(ahci_platform_put_resources, sizeof(*hpriv),
+                            GFP_KERNEL);
+       if (!hpriv)
+               goto err_out;
+
+       devres_add(dev, hpriv);
+
+       hpriv->mmio = devm_ioremap_resource(dev,
+                             platform_get_resource(pdev, IORESOURCE_MEM, 0));
+       if (IS_ERR(hpriv->mmio)) {
+               dev_err(dev, "no mmio space\n");
+               rc = PTR_ERR(hpriv->mmio);
+               goto err_out;
+       }
+
+       hpriv->target_pwr = devm_regulator_get_optional(dev, "target");
+       if (IS_ERR(hpriv->target_pwr)) {
+               rc = PTR_ERR(hpriv->target_pwr);
+               if (rc == -EPROBE_DEFER)
+                       goto err_out;
+               hpriv->target_pwr = NULL;
+       }
+
+       for (i = 0; i < AHCI_MAX_CLKS; i++) {
+               /*
+                * For now we must use clk_get(dev, NULL) for the first clock,
+                * because some platforms (da850, spear13xx) are not yet
+                * converted to use devicetree for clocks.  For new platforms
+                * this is equivalent to of_clk_get(dev->of_node, 0).
+                */
+               if (i == 0)
+                       clk = clk_get(dev, NULL);
+               else
+                       clk = of_clk_get(dev->of_node, i);
+
+               if (IS_ERR(clk)) {
+                       rc = PTR_ERR(clk);
+                       if (rc == -EPROBE_DEFER)
+                               goto err_out;
+                       break;
+               }
+               hpriv->clks[i] = clk;
+       }
+
+       hpriv->phy = devm_phy_get(dev, "sata-phy");
+       if (IS_ERR(hpriv->phy)) {
+               rc = PTR_ERR(hpriv->phy);
+               switch (rc) {
+               case -ENODEV:
+               case -ENOSYS:
+                       /* continue normally */
+                       hpriv->phy = NULL;
+                       break;
+
+               case -EPROBE_DEFER:
+                       goto err_out;
+
+               default:
+                       dev_err(dev, "couldn't get sata-phy\n");
+                       goto err_out;
+               }
+       }
+
+       pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
+       hpriv->got_runtime_pm = true;
+
+       devres_remove_group(dev, NULL);
+       return hpriv;
+
+err_out:
+       devres_release_group(dev, NULL);
+       return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
+
+/**
+ * ahci_platform_init_host - Bring up an ahci-platform host
+ * @pdev: platform device pointer for the host
+ * @hpriv: ahci-host private data for the host
+ * @pi_template: template for the ata_port_info to use
+ * @force_port_map: param passed to ahci_save_initial_config
+ * @mask_port_map: param passed to ahci_save_initial_config
+ *
+ * This function does all the usual steps needed to bring up an
+ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * must be initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_init_host(struct platform_device *pdev,
+                           struct ahci_host_priv *hpriv,
+                           const struct ata_port_info *pi_template,
+                           unsigned int force_port_map,
+                           unsigned int mask_port_map)
+{
+       struct device *dev = &pdev->dev;
+       struct ata_port_info pi = *pi_template;
+       const struct ata_port_info *ppi[] = { &pi, NULL };
+       struct ata_host *host;
+       int i, irq, n_ports, rc;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(dev, "no irq\n");
+               return -EINVAL;
+       }
+
+       /* prepare host */
+       hpriv->flags |= (unsigned long)pi.private_data;
+
+       ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
+
+       if (hpriv->cap & HOST_CAP_NCQ)
+               pi.flags |= ATA_FLAG_NCQ;
+
+       if (hpriv->cap & HOST_CAP_PMP)
+               pi.flags |= ATA_FLAG_PMP;
+
+       ahci_set_em_messages(hpriv, &pi);
+
+       /* CAP.NP sometimes indicate the index of the last enabled
+        * port, at other times, that of the last possible port, so
+        * determining the maximum port number requires looking at
+        * both CAP.NP and port_map.
+        */
+       n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+       host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+       if (!host)
+               return -ENOMEM;
+
+       host->private_data = hpriv;
+
+       if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+               host->flags |= ATA_HOST_PARALLEL_SCAN;
+       else
+               dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
+
+       if (pi.flags & ATA_FLAG_EM)
+               ahci_reset_em(host);
+
+       for (i = 0; i < host->n_ports; i++) {
+               struct ata_port *ap = host->ports[i];
+
+               ata_port_desc(ap, "mmio %pR",
+                             platform_get_resource(pdev, IORESOURCE_MEM, 0));
+               ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+               /* set enclosure management message type */
+               if (ap->flags & ATA_FLAG_EM)
+                       ap->em_message_type = hpriv->em_msg_type;
+
+               /* disabled/not-implemented port */
+               if (!(hpriv->port_map & (1 << i)))
+                       ap->ops = &ata_dummy_port_ops;
+       }
+
+       rc = ahci_reset_controller(host);
+       if (rc)
+               return rc;
+
+       ahci_init_controller(host);
+       ahci_print_info(host, "platform");
+
+       return ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+                                &ahci_platform_sht);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_init_host);
+
+static void ahci_host_stop(struct ata_host *host)
+{
+       struct device *dev = host->dev;
+       struct ahci_platform_data *pdata = dev_get_platdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+
+       if (pdata && pdata->exit)
+               pdata->exit(dev);
+
+       ahci_platform_disable_resources(hpriv);
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * ahci_platform_suspend_host - Suspend an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to suspend an
+ * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * must be disabled after calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend_host(struct device *dev)
+{
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *mmio = hpriv->mmio;
+       u32 ctl;
+
+       if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+               dev_err(dev, "firmware update required for suspend/resume\n");
+               return -EIO;
+       }
+
+       /*
+        * AHCI spec rev1.1 section 8.3.3:
+        * Software must disable interrupts prior to requesting a
+        * transition of the HBA to D3 state.
+        */
+       ctl = readl(mmio + HOST_CTL);
+       ctl &= ~HOST_IRQ_EN;
+       writel(ctl, mmio + HOST_CTL);
+       readl(mmio + HOST_CTL); /* flush */
+
+       return ata_host_suspend(host, PMSG_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
+
+/**
+ * ahci_platform_resume_host - Resume an ahci-platform host
+ * @dev: device pointer for the host
+ *
+ * This function does all the usual steps needed to resume an ahci-platform
+ * host, note any necessary resources (ie clks, phy, etc.)  must be
+ * initialized / enabled before calling this.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume_host(struct device *dev)
+{
+       struct ata_host *host = dev_get_drvdata(dev);
+       int rc;
+
+       if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+               rc = ahci_reset_controller(host);
+               if (rc)
+                       return rc;
+
+               ahci_init_controller(host);
+       }
+
+       ata_host_resume(host);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume_host);
+
+/**
+ * ahci_platform_suspend - Suspend an ahci-platform device
+ * @dev: the platform device to suspend
+ *
+ * This function suspends the host associated with the device, followed by
+ * disabling all the resources of the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_suspend(struct device *dev)
+{
+       struct ahci_platform_data *pdata = dev_get_platdata(dev);
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       int rc;
+
+       rc = ahci_platform_suspend_host(dev);
+       if (rc)
+               return rc;
+
+       if (pdata && pdata->suspend) {
+               rc = pdata->suspend(dev);
+               if (rc)
+                       goto resume_host;
+       }
+
+       ahci_platform_disable_resources(hpriv);
+
+       return 0;
+
+resume_host:
+       ahci_platform_resume_host(dev);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_suspend);
+
+/**
+ * ahci_platform_resume - Resume an ahci-platform device
+ * @dev: the platform device to resume
+ *
+ * This function enables all the resources of the device followed by
+ * resuming the host associated with the device.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_resume(struct device *dev)
+{
+       struct ahci_platform_data *pdata = dev_get_platdata(dev);
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       int rc;
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
+
+       if (pdata && pdata->resume) {
+               rc = pdata->resume(dev);
+               if (rc)
+                       goto disable_resources;
+       }
+
+       rc = ahci_platform_resume_host(dev);
+       if (rc)
+               goto disable_resources;
+
+       /* We resumed so update PM runtime state */
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       return 0;
+
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_resume);
+#endif
+
+MODULE_DESCRIPTION("AHCI SATA platform library");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL");
index 9e69a5308693de59abd5c596244033af43f7b3a8..97a14fe47de1b357f34081379f96454a3b6e49dc 100644 (file)
@@ -38,6 +38,16 @@ static void ata_acpi_clear_gtf(struct ata_device *dev)
        dev->gtf_cache = NULL;
 }
 
+struct ata_acpi_hotplug_context {
+       struct acpi_hotplug_context hp;
+       union {
+               struct ata_port *ap;
+               struct ata_device *dev;
+       } data;
+};
+
+#define ata_hotplug_data(context) (container_of((context), struct ata_acpi_hotplug_context, hp)->data)
+
 /**
  * ata_dev_acpi_handle - provide the acpi_handle for an ata_device
  * @dev: the acpi_handle returned will correspond to this device
@@ -121,18 +131,17 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
                ata_port_wait_eh(ap);
 }
 
-static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_dev_notify_dock(struct acpi_device *adev, u32 event)
 {
-       struct ata_device *dev = data;
-
+       struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
        ata_acpi_handle_hotplug(dev->link->ap, dev, event);
+       return 0;
 }
 
-static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data)
+static int ata_acpi_ap_notify_dock(struct acpi_device *adev, u32 event)
 {
-       struct ata_port *ap = data;
-
-       ata_acpi_handle_hotplug(ap, NULL, event);
+       ata_acpi_handle_hotplug(ata_hotplug_data(adev->hp).ap, NULL, event);
+       return 0;
 }
 
 static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
@@ -154,31 +163,23 @@ static void ata_acpi_uevent(struct ata_port *ap, struct ata_device *dev,
        }
 }
 
-static void ata_acpi_ap_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_ap_uevent(struct acpi_device *adev, u32 event)
 {
-       ata_acpi_uevent(data, NULL, event);
+       ata_acpi_uevent(ata_hotplug_data(adev->hp).ap, NULL, event);
 }
 
-static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
+static void ata_acpi_dev_uevent(struct acpi_device *adev, u32 event)
 {
-       struct ata_device *dev = data;
+       struct ata_device *dev = ata_hotplug_data(adev->hp).dev;
        ata_acpi_uevent(dev->link->ap, dev, event);
 }
 
-static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
-       .handler = ata_acpi_dev_notify_dock,
-       .uevent = ata_acpi_dev_uevent,
-};
-
-static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
-       .handler = ata_acpi_ap_notify_dock,
-       .uevent = ata_acpi_ap_uevent,
-};
-
 /* bind acpi handle to pata port */
 void ata_acpi_bind_port(struct ata_port *ap)
 {
        struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
+       struct acpi_device *adev;
+       struct ata_acpi_hotplug_context *context;
 
        if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_companion)
                return;
@@ -188,9 +189,17 @@ void ata_acpi_bind_port(struct ata_port *ap)
        if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
                ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
 
-       /* we might be on a docking station */
-       register_hotplug_dock_device(ACPI_HANDLE(&ap->tdev),
-                                    &ata_acpi_ap_dock_ops, ap, NULL, NULL);
+       adev = ACPI_COMPANION(&ap->tdev);
+       if (!adev || adev->hp)
+               return;
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
+               return;
+
+       context->data.ap = ap;
+       acpi_initialize_hp_context(adev, &context->hp, ata_acpi_ap_notify_dock,
+                                  ata_acpi_ap_uevent);
 }
 
 void ata_acpi_bind_dev(struct ata_device *dev)
@@ -198,7 +207,8 @@ void ata_acpi_bind_dev(struct ata_device *dev)
        struct ata_port *ap = dev->link->ap;
        struct acpi_device *port_companion = ACPI_COMPANION(&ap->tdev);
        struct acpi_device *host_companion = ACPI_COMPANION(ap->host->dev);
-       struct acpi_device *parent;
+       struct acpi_device *parent, *adev;
+       struct ata_acpi_hotplug_context *context;
        u64 adr;
 
        /*
@@ -221,9 +231,17 @@ void ata_acpi_bind_dev(struct ata_device *dev)
        }
 
        acpi_preset_companion(&dev->tdev, parent, adr);
+       adev = ACPI_COMPANION(&dev->tdev);
+       if (!adev || adev->hp)
+               return;
+
+       context = kzalloc(sizeof(*context), GFP_KERNEL);
+       if (!context)
+               return;
 
-       register_hotplug_dock_device(ata_dev_acpi_handle(dev),
-                                    &ata_acpi_dev_dock_ops, dev, NULL, NULL);
+       context->data.dev = dev;
+       acpi_initialize_hp_context(adev, &context->hp, ata_acpi_dev_notify_dock,
+                                  ata_acpi_dev_uevent);
 }
 
 /**
@@ -835,6 +853,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
                ata_for_each_dev(dev, &ap->link, ALL) {
                        ata_acpi_clear_gtf(dev);
                        if (ata_dev_enabled(dev) &&
+                           ata_dev_acpi_handle(dev) &&
                            ata_dev_get_GTF(dev, NULL) >= 0)
                                dev->flags |= ATA_DFLAG_ACPI_PENDING;
                }
index 8cb2522d592ac87f7b144f1f41d244cd5df777dc..34406f7fdd7a1274da79601489a30e6df774290c 100644 (file)
@@ -5352,22 +5352,17 @@ bool ata_link_offline(struct ata_link *link)
 }
 
 #ifdef CONFIG_PM
-static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
-                              unsigned int action, unsigned int ehi_flags,
-                              int *async)
+static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
+                               unsigned int action, unsigned int ehi_flags,
+                               bool async)
 {
        struct ata_link *link;
        unsigned long flags;
-       int rc = 0;
 
        /* Previous resume operation might still be in
         * progress.  Wait for PM_PENDING to clear.
         */
        if (ap->pflags & ATA_PFLAG_PM_PENDING) {
-               if (async) {
-                       *async = -EAGAIN;
-                       return 0;
-               }
                ata_port_wait_eh(ap);
                WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
        }
@@ -5376,11 +5371,6 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
        spin_lock_irqsave(ap->lock, flags);
 
        ap->pm_mesg = mesg;
-       if (async)
-               ap->pm_result = async;
-       else
-               ap->pm_result = &rc;
-
        ap->pflags |= ATA_PFLAG_PM_PENDING;
        ata_for_each_link(link, ap, HOST_FIRST) {
                link->eh_info.action |= action;
@@ -5391,87 +5381,81 @@ static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
 
        spin_unlock_irqrestore(ap->lock, flags);
 
-       /* wait and check result */
        if (!async) {
                ata_port_wait_eh(ap);
                WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
        }
-
-       return rc;
 }
 
-static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async)
+/*
+ * On some hardware, device fails to respond after spun down for suspend.  As
+ * the device won't be used before being resumed, we don't need to touch the
+ * device.  Ask EH to skip the usual stuff and proceed directly to suspend.
+ *
+ * http://thread.gmane.org/gmane.linux.ide/46764
+ */
+static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
+                                                | ATA_EHI_NO_AUTOPSY
+                                                | ATA_EHI_NO_RECOVERY;
+
+static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
 {
-       /*
-        * On some hardware, device fails to respond after spun down
-        * for suspend.  As the device won't be used before being
-        * resumed, we don't need to touch the device.  Ask EH to skip
-        * the usual stuff and proceed directly to suspend.
-        *
-        * http://thread.gmane.org/gmane.linux.ide/46764
-        */
-       unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
-                                ATA_EHI_NO_RECOVERY;
-       return ata_port_request_pm(ap, mesg, 0, ehi_flags, async);
+       ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
 }
 
-static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
+static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
 {
-       struct ata_port *ap = to_ata_port(dev);
-
-       return __ata_port_suspend_common(ap, mesg, NULL);
+       ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
 }
 
-static int ata_port_suspend(struct device *dev)
+static int ata_port_pm_suspend(struct device *dev)
 {
+       struct ata_port *ap = to_ata_port(dev);
+
        if (pm_runtime_suspended(dev))
                return 0;
 
-       return ata_port_suspend_common(dev, PMSG_SUSPEND);
+       ata_port_suspend(ap, PMSG_SUSPEND);
+       return 0;
 }
 
-static int ata_port_do_freeze(struct device *dev)
+static int ata_port_pm_freeze(struct device *dev)
 {
+       struct ata_port *ap = to_ata_port(dev);
+
        if (pm_runtime_suspended(dev))
                return 0;
 
-       return ata_port_suspend_common(dev, PMSG_FREEZE);
+       ata_port_suspend(ap, PMSG_FREEZE);
+       return 0;
 }
 
-static int ata_port_poweroff(struct device *dev)
+static int ata_port_pm_poweroff(struct device *dev)
 {
-       return ata_port_suspend_common(dev, PMSG_HIBERNATE);
+       ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
+       return 0;
 }
 
-static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg,
-                                   int *async)
-{
-       int rc;
+static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
+                                               | ATA_EHI_QUIET;
 
-       rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET,
-               ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async);
-       return rc;
+static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
+{
+       ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
 }
 
-static int ata_port_resume_common(struct device *dev, pm_message_t mesg)
+static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
 {
-       struct ata_port *ap = to_ata_port(dev);
-
-       return __ata_port_resume_common(ap, mesg, NULL);
+       ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
 }
 
-static int ata_port_resume(struct device *dev)
+static int ata_port_pm_resume(struct device *dev)
 {
-       int rc;
-
-       rc = ata_port_resume_common(dev, PMSG_RESUME);
-       if (!rc) {
-               pm_runtime_disable(dev);
-               pm_runtime_set_active(dev);
-               pm_runtime_enable(dev);
-       }
-
-       return rc;
+       ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       return 0;
 }
 
 /*
@@ -5500,21 +5484,23 @@ static int ata_port_runtime_idle(struct device *dev)
 
 static int ata_port_runtime_suspend(struct device *dev)
 {
-       return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND);
+       ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
+       return 0;
 }
 
 static int ata_port_runtime_resume(struct device *dev)
 {
-       return ata_port_resume_common(dev, PMSG_AUTO_RESUME);
+       ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
+       return 0;
 }
 
 static const struct dev_pm_ops ata_port_pm_ops = {
-       .suspend = ata_port_suspend,
-       .resume = ata_port_resume,
-       .freeze = ata_port_do_freeze,
-       .thaw = ata_port_resume,
-       .poweroff = ata_port_poweroff,
-       .restore = ata_port_resume,
+       .suspend = ata_port_pm_suspend,
+       .resume = ata_port_pm_resume,
+       .freeze = ata_port_pm_freeze,
+       .thaw = ata_port_pm_resume,
+       .poweroff = ata_port_pm_poweroff,
+       .restore = ata_port_pm_resume,
 
        .runtime_suspend = ata_port_runtime_suspend,
        .runtime_resume = ata_port_runtime_resume,
@@ -5526,18 +5512,17 @@ static const struct dev_pm_ops ata_port_pm_ops = {
  * level. sas suspend/resume is async to allow parallel port recovery
  * since sas has multiple ata_port instances per Scsi_Host.
  */
-int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+void ata_sas_port_suspend(struct ata_port *ap)
 {
-       return __ata_port_suspend_common(ap, PMSG_SUSPEND, async);
+       ata_port_suspend_async(ap, PMSG_SUSPEND);
 }
-EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend);
+EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
 
-int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+void ata_sas_port_resume(struct ata_port *ap)
 {
-       return __ata_port_resume_common(ap, PMSG_RESUME, async);
+       ata_port_resume_async(ap, PMSG_RESUME);
 }
-EXPORT_SYMBOL_GPL(ata_sas_port_async_resume);
-
+EXPORT_SYMBOL_GPL(ata_sas_port_resume);
 
 /**
  *     ata_host_suspend - suspend host
index 6d87570083187bbbe749fe566195f854707a641c..6760fc4e85b8c809e39655fd92adc83871e75fce 100644 (file)
@@ -95,12 +95,13 @@ enum {
  * represents timeout for that try.  The first try can be soft or
  * hardreset.  All others are hardreset if available.  In most cases
  * the first reset w/ 10sec timeout should succeed.  Following entries
- * are mostly for error handling, hotplug and retarded devices.
+ * are mostly for error handling, hotplug and those outlier devices that
+ * take an exceptionally long time to recover from reset.
  */
 static const unsigned long ata_eh_reset_timeouts[] = {
        10000,  /* most drives spin up by 10sec */
        10000,  /* > 99% working drives spin up before 20sec */
-       35000,  /* give > 30 secs of idleness for retarded devices */
+       35000,  /* give > 30 secs of idleness for outlier devices */
         5000,  /* and sweet one last chance */
        ULONG_MAX, /* > 1 min has elapsed, give up */
 };
@@ -4069,7 +4070,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
 
        ata_acpi_set_state(ap, ap->pm_mesg);
  out:
-       /* report result */
+       /* update the flags */
        spin_lock_irqsave(ap->lock, flags);
 
        ap->pflags &= ~ATA_PFLAG_PM_PENDING;
@@ -4078,11 +4079,6 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
        else if (ap->pflags & ATA_PFLAG_FROZEN)
                ata_port_schedule_eh(ap);
 
-       if (ap->pm_result) {
-               *ap->pm_result = rc;
-               ap->pm_result = NULL;
-       }
-
        spin_unlock_irqrestore(ap->lock, flags);
 
        return;
@@ -4134,13 +4130,9 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
        /* tell ACPI that we're resuming */
        ata_acpi_on_resume(ap);
 
-       /* report result */
+       /* update the flags */
        spin_lock_irqsave(ap->lock, flags);
        ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
-       if (ap->pm_result) {
-               *ap->pm_result = rc;
-               ap->pm_result = NULL;
-       }
        spin_unlock_irqrestore(ap->lock, flags);
 }
 #endif /* CONFIG_PM */
index 88949c6d55ddd43b32b07accdb892256028d5e25..f3a65a3140d3c7e51bef6dce19f676de768abfdf 100644 (file)
@@ -85,21 +85,6 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
                return ODD_MECH_TYPE_UNSUPPORTED;
 }
 
-static bool odd_can_poweroff(struct ata_device *ata_dev)
-{
-       acpi_handle handle;
-       struct acpi_device *acpi_dev;
-
-       handle = ata_dev_acpi_handle(ata_dev);
-       if (!handle)
-               return false;
-
-       if (acpi_bus_get_device(handle, &acpi_dev))
-               return false;
-
-       return acpi_device_can_poweroff(acpi_dev);
-}
-
 /* Test if ODD is zero power ready by sense code */
 static bool zpready(struct ata_device *dev)
 {
@@ -267,13 +252,11 @@ static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
 
 void zpodd_init(struct ata_device *dev)
 {
+       struct acpi_device *adev = ACPI_COMPANION(&dev->tdev);
        enum odd_mech_type mech_type;
        struct zpodd *zpodd;
 
-       if (dev->zpodd)
-               return;
-
-       if (!odd_can_poweroff(dev))
+       if (dev->zpodd || !adev || !acpi_device_can_poweroff(adev))
                return;
 
        mech_type = zpodd_get_mech_type(dev);
index 62c9ac80c6e96ed1f0a4421fb22b7ee3629ae2e9..5108b8744dce4c70d37d913fbf5b916984b36f6c 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index d23e2b3ca0b68a6c7109eef8e53b2e3e68048281..1206fa6b62cae199a00302729565915ed931509a 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 73492dd4a4bce8aade1f79a7dbdfd3926c7f6f6b..6fac524c2f500381ac8d76b2a94bf4d88314f1e7 100644 (file)
@@ -356,7 +356,7 @@ static void cf_exit(struct arasan_cf_dev *acdev)
 
 static void dma_callback(void *dev)
 {
-       struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
+       struct arasan_cf_dev *acdev = dev;
 
        complete(&acdev->dma_completion);
 }
index 1581dee2967a80368976303fc9e7865b1e69cef2..3aa4e655e3c64cb5a81f3759f5638e2eb003c5df 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index d63ee8f41a4f29e9c0bc9209236c97ca47259ab5..e9c87274a781551d4496ac81b213855dd766ae41 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/gfp.h>
 #include <scsi/scsi_host.h>
index 24e51056ac26a857c1db028b9eb71450a0608d45..30fa4ca4cef65607493a1e8fd1e5fe6bf13b5bbb 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 2ca5026f2c150f9e83b37e48e8227e944a18253f..7e73a0f1e3237698b2b7afcd70207655e53dc2d3 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 8fb69e5ca1b7f08be8ef7a635acdf8122765f577..57f1be64dbf239e8c631687ea5b5c28f1c1b5577 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
index 1275a8d4dedc06940cbce5660d24136d27aea0ef..6bca3505b9e9d5431d537e2fdf2fbcae917e76c6 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index f10baabbf5db5ae2f266aed4f7559d258962f267..bcde4b786807079126aed18744edbbf972a86d3c 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index f07f2296acdcf5c986a0c33bd423736268f42c33..8afe854a5a50c24abeb2c9f96966264d09c0f8bc 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 997e16a3a63f6154373d33854fed1c1532ee657f..2c0986fa4bb2ccd8efc1d2ead606c05e6d734ae1 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 0448860a2077526cea30354fc60b6c6c3cacada9..32ddcae5a360a387b010db88cf951287ab219bd6 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/libata.h>
index 810bc9964ddec6a12a72ca50206b7b67039c1183..3435bd6a5cc918984ba7aee85cfc8e4435e7106c 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 3c12fd7acd4126c256a3c87822b744c77af4a43b..f440892225f4d220603759f196cd2b5ca155a87f 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 980b88e109fcf5109e164d155dfca86de735c2c2..cad9d45749c419e51dbdf852fe67ee998796938b 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi_host.h>
 #include <linux/ata.h>
index 35b521348d311fbb7b2921618e348661a57edfb6..8e76f79689d371bd96fbd2f276d53ab265edc026 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index a9d74eff5fc4b159367b1f8f03d7aa842e331dba..3ba843f5cdc0f91a789e536b876a0b4bb292e645 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 4be0398c153d36b448cc5f23199514a48c8cc35b..b93c0f0729e7676e9e4b78d9611d3547ed8350b9 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 85cf2861e0b7d217eaabd6ffa446a6446de84c72..255c5aaff3a81e8c3ddce32dca7ba49a6dd0cf1d 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index b0b18ec5465ffe32bae62d11f97d1b0cba8c313c..e0872db913d65d1f120a753ac2f313162add41dd 100644 (file)
@@ -15,7 +15,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi_host.h>
 #include <linux/ata.h>
@@ -100,13 +99,9 @@ static int pata_imx_probe(struct platform_device *pdev)
        struct resource *io_res;
        int ret;
 
-       io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (io_res == NULL)
-               return -EINVAL;
-
        irq = platform_get_irq(pdev, 0);
-       if (irq <= 0)
-               return -EINVAL;
+       if (irq < 0)
+               return irq;
 
        priv = devm_kzalloc(&pdev->dev,
                                sizeof(struct pata_imx_priv), GFP_KERNEL);
@@ -136,11 +131,10 @@ static int pata_imx_probe(struct platform_device *pdev)
        ap->pio_mask = ATA_PIO0;
        ap->flags |= ATA_FLAG_SLAVE_POSS;
 
-       priv->host_regs = devm_ioremap(&pdev->dev, io_res->start,
-               resource_size(io_res));
-       if (!priv->host_regs) {
-               dev_err(&pdev->dev, "failed to map IO/CTL base\n");
-               ret = -EBUSY;
+       io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res);
+       if (IS_ERR(priv->host_regs)) {
+               ret = PTR_ERR(priv->host_regs);
                goto err;
        }
 
index 2a8dd9527eccd4d435b65f7532091520cd5de40b..81369d187a5cb8f9e8e7d40158741533486d19a5 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 581e04d80367a9690c22513aaba376bf7dff643f..dc3d7877f29d7898a11780595e27cabb01d53a7e 100644 (file)
@@ -72,7 +72,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
index 76e739b031b6a3a15c036b082bcb8ba8d4f1ad06..b1cfa0258fd38017337e345b5095677a50567133 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index be816428b430fbb73135ef1591bbde2219097221..bce2a8ca4678ace5375b6a0f90699481b8ff9659 100644 (file)
@@ -916,7 +916,6 @@ static __init int probe_chip_type(struct legacy_probe *probe)
                        local_irq_restore(flags);
                        return BIOS;
                }
-               local_irq_restore(flags);
        }
 
        if (ht6560a & mask)
index a4f5e781c8c237dd794588b0bc0c050216763ce0..6bad3df3a13c7741095ac7e88c80d7d7e75d98c4 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 1f5f28bb0bb8082c5cb66dcb714af3fec6bed5e2..f39a5379e8165a0adf8e59f3fa34412822724289 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index ad1a0febd6207dc08521c245a8e650f30e149dff..e3b97093ef9a3cc60aa5a904bc39e7c31f2546b7 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 9513e071040da8f148dc63a6b2aca8f7d7018a83..56201a69af127322e0070b3f4b18c8a0d0355cec 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 0c424dae56e7e13a686bdf062baadcbb422d8fe6..6154c3ee11a5b32d94fd065e0b5abbf8c0109c79 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 16dc3a63a23d5f763b28c92d7fcc9170d0e2320e..d44df7ccfe43930fa51354ce4dab57545bb381f9 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index d77b2e1054efc25cf8207ec6b963ce853427255f..319b64491b7b504fd1fb67350d16e8374fd6cecf 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 4ea70cd22aee8fc70a75f9bfb5696122fee2bd32..fb042e0519d00f780d4a642e4b8a3e27bdec0d11 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 78ede3fd187559360d246cb8f52439fdbd2364ad..bb71ea214b9902ee63258acefaefc795bf18bbc8 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 40254f4df584f781100476dbd391941148042fc8..bcc4b968c0497cb74ec005c433a993054fe42273 100644 (file)
@@ -26,7 +26,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
index 9d874c85d64d7f5eb7f5d4c44fe785e8aa1c555a..1151f23177bb803df56a20fbc30be6032e382598 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index c34fc50070a6382fc48f9c428557c9f9bca5ee9b..defa050e17843d26db5ef685e8c4263fc64eb23b 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 2beb6b5045f8fd224e1f535c20254caa99aa9540..0b46be11705169b0cb8150d2a3792be72caae3c2 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 02794885de10be8329003300d19c2f8084ff9032..a5579b55e3329ee395cfbddd30eb034711e0aab1 100644 (file)
@@ -13,7 +13,6 @@
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <scsi/scsi_host.h>
 #include <linux/ata.h>
index a6f05acad61ed01d68b0ec201698a8abc406cf44..73259bfda1e364e5e99b89aec1a6ae99400f6297 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/ata.h>
 #include <linux/libata.h>
index f582ba180a7d6e3c799a788f86c477ed43f36660..be3f10240dca4961f80d34437acaac78298dbea1 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 79a970f05a2e2bb54d60fcfa92211596f41791ec..521b2137ea3e39ccccb35cb78810316bebc26b72 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 040b093617a4ada5ffa76c61c2d20615644d5b35..caedc90855b2fdb931f39018b31f3d9f7b0effff 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index ce2f828c17b30f4ff5fa831c933d3174c6136d5d..96a232fffae6089ddfa77b8981e04115700e872a 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index f35f15f4d83e30288abf6506e9802c66f969ace7..f1f5b5ae33826356230f2689996c34065afb3f7a 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index d3830c45a369cd611ba99ff78bac235c5fc4e258..5a1cde0ea3601818456b44b51c0cc059ece726e0 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 96c6a79ef6066af0171b74864134f34a05ebb611..e27f31fe1b67b93a3c6c326be0d434a26d8d056a 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index c4b0b073ba8e8c7d13512a2c6930b987ba1a4745..73fe362d97161dffcb75a514c41b65fb9a5baf0b 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 1e8363640bf5e7bd16a065ce0222c161b745e459..78d913aa93c812f8eae1ccd572ad1c7ea1dfd22a 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 6816911ac4229ba8e7dc46829cd2da9461015ef5..900f0e4a1faf61462b20f37b8b29176d194a17e0 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index 94473da68c02d9664906f8c5c82a0f395d881d63..7bc78e264f9eda5cd4cd7eabc4a2acf6dae9bb7e 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <scsi/scsi_host.h>
index c3ab9a6c3965b1c8df7045da6f6a1a10f414ca9b..f6c9632bdff6a11c6fbdcc18d7ff703d1c4b20cc 100644 (file)
@@ -55,7 +55,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/gfp.h>
index 8ea6e6afd041db7736e6a69c88641253fa4b4078..f10631beffa87cd258950bb4c85778ce166205e0 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index 523524b68022d3710f342d96f54926014de551ae..0bb2cabd2197bba01c0b4dd175f662a353eba076 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/device.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -462,8 +461,7 @@ static irqreturn_t dma_dwc_interrupt(int irq, void *hsdev_instance)
        int chan;
        u32 tfr_reg, err_reg;
        unsigned long flags;
-       struct sata_dwc_device *hsdev =
-               (struct sata_dwc_device *)hsdev_instance;
+       struct sata_dwc_device *hsdev = hsdev_instance;
        struct ata_host *host = (struct ata_host *)hsdev->host;
        struct ata_port *ap;
        struct sata_dwc_device_port *hsdevp;
index 870b11eadc6d793d3abcebdcc6bb72c4cc326f45..65965cf5af06ba94c8c77ebfcf01ca584f81d0be 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/types.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -142,7 +141,7 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
                                        ssize_t size)
 {
        struct ahci_host_priv *hpriv =  ap->host->private_data;
-       struct ecx_plat_data *pdata = (struct ecx_plat_data *) hpriv->plat_data;
+       struct ecx_plat_data *pdata = hpriv->plat_data;
        struct ahci_port_priv *pp = ap->private_data;
        unsigned long flags;
        int pmp, i;
@@ -403,6 +402,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
        static const unsigned long timing[] = { 5, 100, 500};
        struct ata_port *ap = link->ap;
        struct ahci_port_priv *pp = ap->private_data;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
        struct ata_taskfile tf;
        bool online;
@@ -431,7 +431,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
                        break;
        } while (!online && retry--);
 
-       ahci_start_engine(ap);
+       hpriv->start_engine(ap);
 
        if (online)
                *class = ahci_dev_classify(ap);
index d74def823d3ed865be0841d771fb7141ac1c1a07..ba5f27120332fe5b2e15d553cb7a7bc25aaa7a44 100644 (file)
@@ -40,7 +40,6 @@
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index 97f4acb54ad626795972ffb8e35572101d05d08d..3638887476f610a933d57b29cae7f84c4d6f229f 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index 3b0dd57984e18370f5a659907c1c3326aed6fb49..9a6bd4cd29a0661cefd71f686e7c4bbc778911ed 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index b7695e804635b87c0d7c787be1d900facaa68963..3062f8605b2955956bff191d6034530ce7fbd636 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index 1ad2f62d34b98fd0b41be9a239827e1634327c54..b513428171b3592a4033e597ec593a7e4189ab21 100644 (file)
@@ -33,7 +33,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index dc4f70179e7d37470dc358b02c96b328d8bbeeae..c630fa81262439939c3ad770f3538e48c0862522 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index 9947010afc0f7973e2af77664f6ef39e53840358..39b5de60a1f96a93a80193b5a7ce8ec6e0ecea43 100644 (file)
@@ -82,7 +82,6 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -1021,8 +1020,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
        idx++;
        dist = ((long) (window_size - (offset + size))) >= 0 ? size :
                (long) (window_size - offset);
-       memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
-                     dist);
+       memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
 
        psource += dist;
        size -= dist;
@@ -1031,8 +1029,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
                readl(mmio + PDC_GENERAL_CTLR);
                writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
                readl(mmio + PDC_DIMM_WINDOW_CTLR);
-               memcpy_fromio((char *) psource, (char *) (dimm_mmio),
-                             window_size / 4);
+               memcpy_fromio(psource, dimm_mmio, window_size / 4);
                psource += window_size;
                size -= window_size;
                idx++;
@@ -1043,8 +1040,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
                readl(mmio + PDC_GENERAL_CTLR);
                writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
                readl(mmio + PDC_DIMM_WINDOW_CTLR);
-               memcpy_fromio((char *) psource, (char *) (dimm_mmio),
-                             size / 4);
+               memcpy_fromio(psource, dimm_mmio, size / 4);
        }
 }
 #endif
index 6d6489118873fe50bd4f04a0cc3320e5eae5747c..08f98c3ed5c8e28f89d45ec467f3dec69f28d204 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/module.h>
 #include <linux/gfp.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index 87f056e54a9d7566504925ca812347baf6b3e62f..f72e84228c5c10e05db80755df5a64efd5606b38 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/device.h>
index 44f304b3de63c6ee45fa87f88793e900359ac1be..29e847aac34be5ea88871ff03c99856b20c4492e 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
index 545c4de412c3e0ba5a2680d9143e059a9c475dbd..db4e264eecb60c88fb089b77878d644da4801a8f 100644 (file)
@@ -790,6 +790,32 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
 }
 EXPORT_SYMBOL_GPL(devm_kmalloc);
 
+/**
+ * devm_kstrdup - Allocate resource managed space and
+ *                copy an existing string into that.
+ * @dev: Device to allocate memory for
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the devm_kmalloc() call when
+ *       allocating memory
+ * RETURNS:
+ * Pointer to allocated string on success, NULL on failure.
+ */
+char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
+{
+       size_t size;
+       char *buf;
+
+       if (!s)
+               return NULL;
+
+       size = strlen(s) + 1;
+       buf = devm_kmalloc(dev, size, gfp);
+       if (buf)
+               memcpy(buf, s, size);
+       return buf;
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup);
+
 /**
  * devm_kfree - Resource-managed kfree
  * @dev: Device this memory belongs to
index bfb8955c406c5ef02d819d4d5fa02d709f0a6ec0..dc127e5dec4b5371e65048f9923e80d24d7a85b0 100644 (file)
@@ -42,7 +42,7 @@
        struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
        if (!__retval && __elapsed > __td->field) {                             \
                __td->field = __elapsed;                                        \
-               dev_warn(dev, name " latency exceeded, new value %lld ns\n",    \
+               dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
                        __elapsed);                                             \
                genpd->max_off_time_changed = true;                             \
                __td->constraint_changed = true;                                \
index 1b41fca3d65a54545c6c124e0df696998c29a1af..86d5e4fb5b98314d10504f0c91ea5c2765038ddd 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/async.h>
 #include <linux/suspend.h>
 #include <trace/events/power.h>
+#include <linux/cpufreq.h>
 #include <linux/cpuidle.h>
 #include <linux/timer.h>
 
@@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
 {
        dev->power.is_prepared = false;
        dev->power.is_suspended = false;
+       dev->power.is_noirq_suspended = false;
+       dev->power.is_late_suspended = false;
        init_completion(&dev->power.completion);
        complete_all(&dev->power.completion);
        dev->power.wakeup = NULL;
@@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  */
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
@@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
        if (dev->power.syscore)
                goto Out;
 
+       if (!dev->power.is_noirq_suspended)
+               goto Out;
+
+       dpm_wait(dev->parent, async);
+
        if (dev->pm_domain) {
                info = "noirq power domain ";
                callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
        }
 
        error = dpm_run_callback(callback, dev, state, info);
+       dev->power.is_noirq_suspended = false;
 
  Out:
+       complete_all(&dev->power.completion);
        TRACE_RESUME(error);
        return error;
 }
 
+static bool is_async(struct device *dev)
+{
+       return dev->power.async_suspend && pm_async_enabled
+               && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = device_resume_noirq(dev, pm_transition, true);
+       if (error)
+               pm_dev_err(dev, pm_transition, " async", error);
+
+       put_device(dev);
+}
+
 /**
  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
@@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
  */
 static void dpm_resume_noirq(pm_message_t state)
 {
+       struct device *dev;
        ktime_t starttime = ktime_get();
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_noirq_list)) {
-               struct device *dev = to_device(dpm_noirq_list.next);
-               int error;
+       pm_transition = state;
 
+       /*
+        * Advanced the async threads upfront,
+        * in case the starting of async threads is
+        * delayed by non-async resuming devices.
+        */
+       list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+               reinit_completion(&dev->power.completion);
+               if (is_async(dev)) {
+                       get_device(dev);
+                       async_schedule(async_resume_noirq, dev);
+               }
+       }
+
+       while (!list_empty(&dpm_noirq_list)) {
+               dev = to_device(dpm_noirq_list.next);
                get_device(dev);
                list_move_tail(&dev->power.entry, &dpm_late_early_list);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_resume_noirq(dev, state);
-               if (error) {
-                       suspend_stats.failed_resume_noirq++;
-                       dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
-                       dpm_save_failed_dev(dev_name(dev));
-                       pm_dev_err(dev, state, " noirq", error);
+               if (!is_async(dev)) {
+                       int error;
+
+                       error = device_resume_noirq(dev, state, false);
+                       if (error) {
+                               suspend_stats.failed_resume_noirq++;
+                               dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+                               dpm_save_failed_dev(dev_name(dev));
+                               pm_dev_err(dev, state, " noirq", error);
+                       }
                }
 
                mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
        mutex_unlock(&dpm_list_mtx);
+       async_synchronize_full();
        dpm_show_time(starttime, state, "noirq");
        resume_device_irqs();
        cpuidle_resume();
@@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state)
  *
  * Runtime PM is disabled for @dev while this function is being executed.
  */
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
@@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
        if (dev->power.syscore)
                goto Out;
 
+       if (!dev->power.is_late_suspended)
+               goto Out;
+
+       dpm_wait(dev->parent, async);
+
        if (dev->pm_domain) {
                info = "early power domain ";
                callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
        }
 
        error = dpm_run_callback(callback, dev, state, info);
+       dev->power.is_late_suspended = false;
 
  Out:
        TRACE_RESUME(error);
 
        pm_runtime_enable(dev);
+       complete_all(&dev->power.completion);
        return error;
 }
 
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = device_resume_early(dev, pm_transition, true);
+       if (error)
+               pm_dev_err(dev, pm_transition, " async", error);
+
+       put_device(dev);
+}
+
 /**
  * dpm_resume_early - Execute "early resume" callbacks for all devices.
  * @state: PM transition of the system being carried out.
  */
 static void dpm_resume_early(pm_message_t state)
 {
+       struct device *dev;
        ktime_t starttime = ktime_get();
 
        mutex_lock(&dpm_list_mtx);
-       while (!list_empty(&dpm_late_early_list)) {
-               struct device *dev = to_device(dpm_late_early_list.next);
-               int error;
+       pm_transition = state;
 
+       /*
+        * Advanced the async threads upfront,
+        * in case the starting of async threads is
+        * delayed by non-async resuming devices.
+        */
+       list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+               reinit_completion(&dev->power.completion);
+               if (is_async(dev)) {
+                       get_device(dev);
+                       async_schedule(async_resume_early, dev);
+               }
+       }
+
+       while (!list_empty(&dpm_late_early_list)) {
+               dev = to_device(dpm_late_early_list.next);
                get_device(dev);
                list_move_tail(&dev->power.entry, &dpm_suspended_list);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_resume_early(dev, state);
-               if (error) {
-                       suspend_stats.failed_resume_early++;
-                       dpm_save_failed_step(SUSPEND_RESUME_EARLY);
-                       dpm_save_failed_dev(dev_name(dev));
-                       pm_dev_err(dev, state, " early", error);
-               }
+               if (!is_async(dev)) {
+                       int error;
 
+                       error = device_resume_early(dev, state, false);
+                       if (error) {
+                               suspend_stats.failed_resume_early++;
+                               dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+                               dpm_save_failed_dev(dev_name(dev));
+                               pm_dev_err(dev, state, " early", error);
+                       }
+               }
                mutex_lock(&dpm_list_mtx);
                put_device(dev);
        }
        mutex_unlock(&dpm_list_mtx);
+       async_synchronize_full();
        dpm_show_time(starttime, state, "early");
 }
 
@@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie)
        put_device(dev);
 }
 
-static bool is_async(struct device *dev)
-{
-       return dev->power.async_suspend && pm_async_enabled
-               && !pm_trace_is_enabled();
-}
-
 /**
  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
  * @state: PM transition of the system being carried out.
@@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state)
        mutex_unlock(&dpm_list_mtx);
        async_synchronize_full();
        dpm_show_time(starttime, state, NULL);
+
+       cpufreq_resume();
 }
 
 /**
@@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
  * The driver of @dev will not receive interrupts while this function is being
  * executed.
  */
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
+       int error = 0;
+
+       if (async_error)
+               goto Complete;
+
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
 
        if (dev->power.syscore)
-               return 0;
+               goto Complete;
+
+       dpm_wait_for_children(dev, async);
 
        if (dev->pm_domain) {
                info = "noirq power domain ";
@@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
                callback = pm_noirq_op(dev->driver->pm, state);
        }
 
-       return dpm_run_callback(callback, dev, state, info);
+       error = dpm_run_callback(callback, dev, state, info);
+       if (!error)
+               dev->power.is_noirq_suspended = true;
+       else
+               async_error = error;
+
+Complete:
+       complete_all(&dev->power.completion);
+       return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = __device_suspend_noirq(dev, pm_transition, true);
+       if (error) {
+               dpm_save_failed_dev(dev_name(dev));
+               pm_dev_err(dev, pm_transition, " async", error);
+       }
+
+       put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+       reinit_completion(&dev->power.completion);
+
+       if (pm_async_enabled && dev->power.async_suspend) {
+               get_device(dev);
+               async_schedule(async_suspend_noirq, dev);
+               return 0;
+       }
+       return __device_suspend_noirq(dev, pm_transition, false);
 }
 
 /**
@@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state)
        cpuidle_pause();
        suspend_device_irqs();
        mutex_lock(&dpm_list_mtx);
+       pm_transition = state;
+       async_error = 0;
+
        while (!list_empty(&dpm_late_early_list)) {
                struct device *dev = to_device(dpm_late_early_list.prev);
 
                get_device(dev);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_suspend_noirq(dev, state);
+               error = device_suspend_noirq(dev);
 
                mutex_lock(&dpm_list_mtx);
                if (error) {
                        pm_dev_err(dev, state, " noirq", error);
-                       suspend_stats.failed_suspend_noirq++;
-                       dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
                        dpm_save_failed_dev(dev_name(dev));
                        put_device(dev);
                        break;
@@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state)
                        list_move(&dev->power.entry, &dpm_noirq_list);
                put_device(dev);
 
-               if (pm_wakeup_pending()) {
-                       error = -EBUSY;
+               if (async_error)
                        break;
-               }
        }
        mutex_unlock(&dpm_list_mtx);
-       if (error)
+       async_synchronize_full();
+       if (!error)
+               error = async_error;
+
+       if (error) {
+               suspend_stats.failed_suspend_noirq++;
+               dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
                dpm_resume_noirq(resume_event(state));
-       else
+       } else {
                dpm_show_time(starttime, state, "noirq");
+       }
        return error;
 }
 
@@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state)
  *
  * Runtime PM is disabled for @dev while this function is being executed.
  */
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
 {
        pm_callback_t callback = NULL;
        char *info = NULL;
+       int error = 0;
 
        __pm_runtime_disable(dev, false);
 
+       if (async_error)
+               goto Complete;
+
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
+
        if (dev->power.syscore)
-               return 0;
+               goto Complete;
+
+       dpm_wait_for_children(dev, async);
 
        if (dev->pm_domain) {
                info = "late power domain ";
@@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
                callback = pm_late_early_op(dev->driver->pm, state);
        }
 
-       return dpm_run_callback(callback, dev, state, info);
+       error = dpm_run_callback(callback, dev, state, info);
+       if (!error)
+               dev->power.is_late_suspended = true;
+       else
+               async_error = error;
+
+Complete:
+       complete_all(&dev->power.completion);
+       return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+       struct device *dev = (struct device *)data;
+       int error;
+
+       error = __device_suspend_late(dev, pm_transition, true);
+       if (error) {
+               dpm_save_failed_dev(dev_name(dev));
+               pm_dev_err(dev, pm_transition, " async", error);
+       }
+       put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+       reinit_completion(&dev->power.completion);
+
+       if (pm_async_enabled && dev->power.async_suspend) {
+               get_device(dev);
+               async_schedule(async_suspend_late, dev);
+               return 0;
+       }
+
+       return __device_suspend_late(dev, pm_transition, false);
 }
 
 /**
@@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state)
        int error = 0;
 
        mutex_lock(&dpm_list_mtx);
+       pm_transition = state;
+       async_error = 0;
+
        while (!list_empty(&dpm_suspended_list)) {
                struct device *dev = to_device(dpm_suspended_list.prev);
 
                get_device(dev);
                mutex_unlock(&dpm_list_mtx);
 
-               error = device_suspend_late(dev, state);
+               error = device_suspend_late(dev);
 
                mutex_lock(&dpm_list_mtx);
                if (error) {
                        pm_dev_err(dev, state, " late", error);
-                       suspend_stats.failed_suspend_late++;
-                       dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
                        dpm_save_failed_dev(dev_name(dev));
                        put_device(dev);
                        break;
@@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state)
                        list_move(&dev->power.entry, &dpm_late_early_list);
                put_device(dev);
 
-               if (pm_wakeup_pending()) {
-                       error = -EBUSY;
+               if (async_error)
                        break;
-               }
        }
        mutex_unlock(&dpm_list_mtx);
-       if (error)
+       async_synchronize_full();
+       if (error) {
+               suspend_stats.failed_suspend_late++;
+               dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
                dpm_resume_early(resume_event(state));
-       else
+       } else {
                dpm_show_time(starttime, state, "late");
-
+       }
        return error;
 }
 
@@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state)
 
        might_sleep();
 
+       cpufreq_suspend();
+
        mutex_lock(&dpm_list_mtx);
        pm_transition = state;
        async_error = 0;
index cfc3226ec4928dee99ff95540eed87af1058d5af..a21223d95926fe2d5207ae445f61fe6c79d86e78 100644 (file)
@@ -89,8 +89,8 @@ extern void dpm_sysfs_remove(struct device *dev);
 extern void rpm_sysfs_remove(struct device *dev);
 extern int wakeup_sysfs_add(struct device *dev);
 extern void wakeup_sysfs_remove(struct device *dev);
-extern int pm_qos_sysfs_add_latency(struct device *dev);
-extern void pm_qos_sysfs_remove_latency(struct device *dev);
+extern int pm_qos_sysfs_add_resume_latency(struct device *dev);
+extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
 extern int pm_qos_sysfs_add_flags(struct device *dev);
 extern void pm_qos_sysfs_remove_flags(struct device *dev);
 
index 5c1361a9e5dd58049c3835d414f42607cdb46411..36b9eb4862cb96e2561321500982730e4ff1587f 100644 (file)
@@ -105,7 +105,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
 s32 __dev_pm_qos_read_value(struct device *dev)
 {
        return IS_ERR_OR_NULL(dev->power.qos) ?
-               0 : pm_qos_read_value(&dev->power.qos->latency);
+               0 : pm_qos_read_value(&dev->power.qos->resume_latency);
 }
 
 /**
@@ -141,16 +141,24 @@ static int apply_constraint(struct dev_pm_qos_request *req,
        int ret;
 
        switch(req->type) {
-       case DEV_PM_QOS_LATENCY:
-               ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
-                                          action, value);
+       case DEV_PM_QOS_RESUME_LATENCY:
+               ret = pm_qos_update_target(&qos->resume_latency,
+                                          &req->data.pnode, action, value);
                if (ret) {
-                       value = pm_qos_read_value(&qos->latency);
+                       value = pm_qos_read_value(&qos->resume_latency);
                        blocking_notifier_call_chain(&dev_pm_notifiers,
                                                     (unsigned long)value,
                                                     req);
                }
                break;
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
+               ret = pm_qos_update_target(&qos->latency_tolerance,
+                                          &req->data.pnode, action, value);
+               if (ret) {
+                       value = pm_qos_read_value(&qos->latency_tolerance);
+                       req->dev->power.set_latency_tolerance(req->dev, value);
+               }
+               break;
        case DEV_PM_QOS_FLAGS:
                ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
                                          action, value);
@@ -186,13 +194,21 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        }
        BLOCKING_INIT_NOTIFIER_HEAD(n);
 
-       c = &qos->latency;
+       c = &qos->resume_latency;
        plist_head_init(&c->list);
-       c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
-       c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
+       c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+       c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+       c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
        c->type = PM_QOS_MIN;
        c->notifiers = n;
 
+       c = &qos->latency_tolerance;
+       plist_head_init(&c->list);
+       c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+       c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
+       c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+       c->type = PM_QOS_MIN;
+
        INIT_LIST_HEAD(&qos->flags.list);
 
        spin_lock_irq(&dev->power.lock);
@@ -224,7 +240,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
         * If the device's PM QoS resume latency limit or PM QoS flags have been
         * exposed to user space, they have to be hidden at this point.
         */
-       pm_qos_sysfs_remove_latency(dev);
+       pm_qos_sysfs_remove_resume_latency(dev);
        pm_qos_sysfs_remove_flags(dev);
 
        mutex_lock(&dev_pm_qos_mtx);
@@ -237,7 +253,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
                goto out;
 
        /* Flush the constraints lists for the device. */
-       c = &qos->latency;
+       c = &qos->resume_latency;
        plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
                /*
                 * Update constraints list and call the notification
@@ -246,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
                memset(req, 0, sizeof(*req));
        }
+       c = &qos->latency_tolerance;
+       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+               memset(req, 0, sizeof(*req));
+       }
        f = &qos->flags;
        list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -265,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
        mutex_unlock(&dev_pm_qos_sysfs_mtx);
 }
 
+static bool dev_pm_qos_invalid_request(struct device *dev,
+                                      struct dev_pm_qos_request *req)
+{
+       return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
+                       && !dev->power.set_latency_tolerance);
+}
+
+static int __dev_pm_qos_add_request(struct device *dev,
+                                   struct dev_pm_qos_request *req,
+                                   enum dev_pm_qos_req_type type, s32 value)
+{
+       int ret = 0;
+
+       if (!dev || dev_pm_qos_invalid_request(dev, req))
+               return -EINVAL;
+
+       if (WARN(dev_pm_qos_request_active(req),
+                "%s() called for already added request\n", __func__))
+               return -EINVAL;
+
+       if (IS_ERR(dev->power.qos))
+               ret = -ENODEV;
+       else if (!dev->power.qos)
+               ret = dev_pm_qos_constraints_allocate(dev);
+
+       trace_dev_pm_qos_add_request(dev_name(dev), type, value);
+       if (!ret) {
+               req->dev = dev;
+               req->type = type;
+               ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
+       }
+       return ret;
+}
+
 /**
  * dev_pm_qos_add_request - inserts new qos request into the list
  * @dev: target device for the constraint
@@ -290,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
                           enum dev_pm_qos_req_type type, s32 value)
 {
-       int ret = 0;
-
-       if (!dev || !req) /*guard against callers passing in null */
-               return -EINVAL;
-
-       if (WARN(dev_pm_qos_request_active(req),
-                "%s() called for already added request\n", __func__))
-               return -EINVAL;
+       int ret;
 
        mutex_lock(&dev_pm_qos_mtx);
-
-       if (IS_ERR(dev->power.qos))
-               ret = -ENODEV;
-       else if (!dev->power.qos)
-               ret = dev_pm_qos_constraints_allocate(dev);
-
-       trace_dev_pm_qos_add_request(dev_name(dev), type, value);
-       if (!ret) {
-               req->dev = dev;
-               req->type = type;
-               ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
-       }
-
+       ret = __dev_pm_qos_add_request(dev, req, type, value);
        mutex_unlock(&dev_pm_qos_mtx);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -341,7 +376,8 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
                return -ENODEV;
 
        switch(req->type) {
-       case DEV_PM_QOS_LATENCY:
+       case DEV_PM_QOS_RESUME_LATENCY:
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
                curr_value = req->data.pnode.prio;
                break;
        case DEV_PM_QOS_FLAGS:
@@ -460,8 +496,8 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
                ret = dev_pm_qos_constraints_allocate(dev);
 
        if (!ret)
-               ret = blocking_notifier_chain_register(
-                               dev->power.qos->latency.notifiers, notifier);
+               ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
+                                                      notifier);
 
        mutex_unlock(&dev_pm_qos_mtx);
        return ret;
@@ -487,9 +523,8 @@ int dev_pm_qos_remove_notifier(struct device *dev,
 
        /* Silently return if the constraints object is not present. */
        if (!IS_ERR_OR_NULL(dev->power.qos))
-               retval = blocking_notifier_chain_unregister(
-                               dev->power.qos->latency.notifiers,
-                               notifier);
+               retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
+                                                           notifier);
 
        mutex_unlock(&dev_pm_qos_mtx);
        return retval;
@@ -530,20 +565,32 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
  * @dev: Device whose ancestor to add the request for.
  * @req: Pointer to the preallocated handle.
+ * @type: Type of the request.
  * @value: Constraint latency value.
  */
 int dev_pm_qos_add_ancestor_request(struct device *dev,
-                                   struct dev_pm_qos_request *req, s32 value)
+                                   struct dev_pm_qos_request *req,
+                                   enum dev_pm_qos_req_type type, s32 value)
 {
        struct device *ancestor = dev->parent;
        int ret = -ENODEV;
 
-       while (ancestor && !ancestor->power.ignore_children)
-               ancestor = ancestor->parent;
+       switch (type) {
+       case DEV_PM_QOS_RESUME_LATENCY:
+               while (ancestor && !ancestor->power.ignore_children)
+                       ancestor = ancestor->parent;
 
+               break;
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
+               while (ancestor && !ancestor->power.set_latency_tolerance)
+                       ancestor = ancestor->parent;
+
+               break;
+       default:
+               ancestor = NULL;
+       }
        if (ancestor)
-               ret = dev_pm_qos_add_request(ancestor, req,
-                                            DEV_PM_QOS_LATENCY, value);
+               ret = dev_pm_qos_add_request(ancestor, req, type, value);
 
        if (ret < 0)
                req->dev = NULL;
@@ -559,9 +606,13 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
        struct dev_pm_qos_request *req = NULL;
 
        switch(type) {
-       case DEV_PM_QOS_LATENCY:
-               req = dev->power.qos->latency_req;
-               dev->power.qos->latency_req = NULL;
+       case DEV_PM_QOS_RESUME_LATENCY:
+               req = dev->power.qos->resume_latency_req;
+               dev->power.qos->resume_latency_req = NULL;
+               break;
+       case DEV_PM_QOS_LATENCY_TOLERANCE:
+               req = dev->power.qos->latency_tolerance_req;
+               dev->power.qos->latency_tolerance_req = NULL;
                break;
        case DEV_PM_QOS_FLAGS:
                req = dev->power.qos->flags_req;
@@ -597,7 +648,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
        if (!req)
                return -ENOMEM;
 
-       ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
+       ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
        if (ret < 0) {
                kfree(req);
                return ret;
@@ -609,7 +660,7 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
 
        if (IS_ERR_OR_NULL(dev->power.qos))
                ret = -ENODEV;
-       else if (dev->power.qos->latency_req)
+       else if (dev->power.qos->resume_latency_req)
                ret = -EEXIST;
 
        if (ret < 0) {
@@ -618,13 +669,13 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
                mutex_unlock(&dev_pm_qos_mtx);
                goto out;
        }
-       dev->power.qos->latency_req = req;
+       dev->power.qos->resume_latency_req = req;
 
        mutex_unlock(&dev_pm_qos_mtx);
 
-       ret = pm_qos_sysfs_add_latency(dev);
+       ret = pm_qos_sysfs_add_resume_latency(dev);
        if (ret)
-               dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+               dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
 
  out:
        mutex_unlock(&dev_pm_qos_sysfs_mtx);
@@ -634,8 +685,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
 
 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
 {
-       if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
-               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+       if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
+               __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
 }
 
 /**
@@ -646,7 +697,7 @@ void dev_pm_qos_hide_latency_limit(struct device *dev)
 {
        mutex_lock(&dev_pm_qos_sysfs_mtx);
 
-       pm_qos_sysfs_remove_latency(dev);
+       pm_qos_sysfs_remove_resume_latency(dev);
 
        mutex_lock(&dev_pm_qos_mtx);
        __dev_pm_qos_hide_latency_limit(dev);
@@ -768,6 +819,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
        pm_runtime_put(dev);
        return ret;
 }
+
+/**
+ * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
+ * @dev: Device to obtain the user space latency tolerance for.
+ */
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+{
+       s32 ret;
+
+       mutex_lock(&dev_pm_qos_mtx);
+       ret = IS_ERR_OR_NULL(dev->power.qos)
+               || !dev->power.qos->latency_tolerance_req ?
+                       PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
+                       dev->power.qos->latency_tolerance_req->data.pnode.prio;
+       mutex_unlock(&dev_pm_qos_mtx);
+       return ret;
+}
+
+/**
+ * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
+ * @dev: Device to update the user space latency tolerance for.
+ * @val: New user space latency tolerance for @dev (negative values disable).
+ */
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+{
+       int ret;
+
+       mutex_lock(&dev_pm_qos_mtx);
+
+       if (IS_ERR_OR_NULL(dev->power.qos)
+           || !dev->power.qos->latency_tolerance_req) {
+               struct dev_pm_qos_request *req;
+
+               if (val < 0) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               req = kzalloc(sizeof(*req), GFP_KERNEL);
+               if (!req) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
+               if (ret < 0) {
+                       kfree(req);
+                       goto out;
+               }
+               dev->power.qos->latency_tolerance_req = req;
+       } else {
+               if (val < 0) {
+                       __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
+                       ret = 0;
+               } else {
+                       ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
+               }
+       }
+
+ out:
+       mutex_unlock(&dev_pm_qos_mtx);
+       return ret;
+}
 #else /* !CONFIG_PM_RUNTIME */
 static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
 static void __dev_pm_qos_hide_flags(struct device *dev) {}
index 4776cf528d08538cded24f14902b75d9577b5ace..67c7938e430bbd87d706fb03688de83a2641cb7f 100644 (file)
@@ -1131,7 +1131,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_barrier);
  * @dev: Device to handle.
  * @check_resume: If set, check if there's a resume request for the device.
  *
- * Increment power.disable_depth for the device and if was zero previously,
+ * Increment power.disable_depth for the device and if it was zero previously,
  * cancel all pending runtime PM requests for the device and wait for all
  * operations in progress to complete.  The device can be either active or
  * suspended after its runtime PM has been disabled.
index 03e089ade5cef214227b54cd889dedcf08217a1a..95b181d1ca6df76d1b3a355e6a7bffca3aa26854 100644 (file)
@@ -218,15 +218,16 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
 static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
                autosuspend_delay_ms_store);
 
-static ssize_t pm_qos_latency_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
+static ssize_t pm_qos_resume_latency_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
-       return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
+       return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
 }
 
-static ssize_t pm_qos_latency_store(struct device *dev,
-                                   struct device_attribute *attr,
-                                   const char *buf, size_t n)
+static ssize_t pm_qos_resume_latency_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t n)
 {
        s32 value;
        int ret;
@@ -237,12 +238,47 @@ static ssize_t pm_qos_latency_store(struct device *dev,
        if (value < 0)
                return -EINVAL;
 
-       ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
+       ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
+                                       value);
        return ret < 0 ? ret : n;
 }
 
 static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
-                  pm_qos_latency_show, pm_qos_latency_store);
+                  pm_qos_resume_latency_show, pm_qos_resume_latency_store);
+
+static ssize_t pm_qos_latency_tolerance_show(struct device *dev,
+                                            struct device_attribute *attr,
+                                            char *buf)
+{
+       s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
+
+       if (value < 0)
+               return sprintf(buf, "auto\n");
+       else if (value == PM_QOS_LATENCY_ANY)
+               return sprintf(buf, "any\n");
+
+       return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
+                                             struct device_attribute *attr,
+                                             const char *buf, size_t n)
+{
+       s32 value;
+       int ret;
+
+       if (kstrtos32(buf, 0, &value)) {
+               if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n"))
+                       value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+               else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+                       value = PM_QOS_LATENCY_ANY;
+       }
+       ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+       return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644,
+                  pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store);
 
 static ssize_t pm_qos_no_power_off_show(struct device *dev,
                                        struct device_attribute *attr,
@@ -618,15 +654,26 @@ static struct attribute_group pm_runtime_attr_group = {
        .attrs  = runtime_attrs,
 };
 
-static struct attribute *pm_qos_latency_attrs[] = {
+static struct attribute *pm_qos_resume_latency_attrs[] = {
 #ifdef CONFIG_PM_RUNTIME
        &dev_attr_pm_qos_resume_latency_us.attr,
 #endif /* CONFIG_PM_RUNTIME */
        NULL,
 };
-static struct attribute_group pm_qos_latency_attr_group = {
+static struct attribute_group pm_qos_resume_latency_attr_group = {
+       .name   = power_group_name,
+       .attrs  = pm_qos_resume_latency_attrs,
+};
+
+static struct attribute *pm_qos_latency_tolerance_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+       &dev_attr_pm_qos_latency_tolerance_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+       NULL,
+};
+static struct attribute_group pm_qos_latency_tolerance_attr_group = {
        .name   = power_group_name,
-       .attrs  = pm_qos_latency_attrs,
+       .attrs  = pm_qos_latency_tolerance_attrs,
 };
 
 static struct attribute *pm_qos_flags_attrs[] = {
@@ -654,18 +701,23 @@ int dpm_sysfs_add(struct device *dev)
                if (rc)
                        goto err_out;
        }
-
        if (device_can_wakeup(dev)) {
                rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
-               if (rc) {
-                       if (pm_runtime_callbacks_present(dev))
-                               sysfs_unmerge_group(&dev->kobj,
-                                                   &pm_runtime_attr_group);
-                       goto err_out;
-               }
+               if (rc)
+                       goto err_runtime;
+       }
+       if (dev->power.set_latency_tolerance) {
+               rc = sysfs_merge_group(&dev->kobj,
+                                      &pm_qos_latency_tolerance_attr_group);
+               if (rc)
+                       goto err_wakeup;
        }
        return 0;
 
+ err_wakeup:
+       sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
+ err_runtime:
+       sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
  err_out:
        sysfs_remove_group(&dev->kobj, &pm_attr_group);
        return rc;
@@ -681,14 +733,14 @@ void wakeup_sysfs_remove(struct device *dev)
        sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
 }
 
-int pm_qos_sysfs_add_latency(struct device *dev)
+int pm_qos_sysfs_add_resume_latency(struct device *dev)
 {
-       return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
+       return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
 }
 
-void pm_qos_sysfs_remove_latency(struct device *dev)
+void pm_qos_sysfs_remove_resume_latency(struct device *dev)
 {
-       sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
+       sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
 }
 
 int pm_qos_sysfs_add_flags(struct device *dev)
@@ -708,6 +760,7 @@ void rpm_sysfs_remove(struct device *dev)
 
 void dpm_sysfs_remove(struct device *dev)
 {
+       sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
        dev_pm_qos_constraints_destroy(dev);
        rpm_sysfs_remove(dev);
        sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
index 33414b1de2013b8e3fc68f31a70f8a56f2900cbe..7d1326985bee8b4d3e9dcb326bf795622901bf49 100644 (file)
@@ -134,6 +134,8 @@ struct regmap {
 
        /* if set, converts bulk rw to single rw */
        bool use_single_rw;
+       /* if set, the device supports multi write mode */
+       bool can_multi_write;
 
        struct rb_root range_tree;
        void *selector_work_buf;        /* Scratch buffer used for selector */
index d4dd77134814bac1a8ba2bc91a817c1cc2002454..29b4128da0b08ce6b8eda048557ed9093b07efa0 100644 (file)
@@ -249,11 +249,12 @@ static int regcache_default_sync(struct regmap *map, unsigned int min,
 {
        unsigned int reg;
 
-       for (reg = min; reg <= max; reg++) {
+       for (reg = min; reg <= max; reg += map->reg_stride) {
                unsigned int val;
                int ret;
 
-               if (regmap_volatile(map, reg))
+               if (regmap_volatile(map, reg) ||
+                   !regmap_writeable(map, reg))
                        continue;
 
                ret = regcache_read(map, reg, &val);
@@ -312,10 +313,6 @@ int regcache_sync(struct regmap *map)
        /* Apply any patch first */
        map->cache_bypass = 1;
        for (i = 0; i < map->patch_regs; i++) {
-               if (map->patch[i].reg % map->reg_stride) {
-                       ret = -EINVAL;
-                       goto out;
-               }
                ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
                if (ret != 0) {
                        dev_err(map->dev, "Failed to write %x = %x: %d\n",
@@ -636,10 +633,10 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
        if (*data == NULL)
                return 0;
 
-       count = cur - base;
+       count = (cur - base) / map->reg_stride;
 
        dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
-               count * val_bytes, count, base, cur - 1);
+               count * val_bytes, count, base, cur - map->reg_stride);
 
        map->cache_bypass = 1;
 
index c5471cd6ebb7bc78f1e8e63e0d4330e37835e5ca..45d812c0ea7751868d3d14d7691da6f74493b54a 100644 (file)
@@ -511,7 +511,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
        debugfs_create_file("range", 0400, map->debugfs,
                            map, &regmap_reg_ranges_fops);
 
-       if (map->max_register) {
+       if (map->max_register || regmap_readable(map, 0)) {
                debugfs_create_file("registers", 0400, map->debugfs,
                                    map, &regmap_map_fops);
                debugfs_create_file("access", 0400, map->debugfs,
index 82692068d3cbe8dfa4cc5fdb6b544b74168255ea..edf88f20cbce8df434058b18f772393e781b4547 100644 (file)
@@ -368,8 +368,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
        if (!d)
                return -ENOMEM;
 
-       *data = d;
-
        d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
                                GFP_KERNEL);
        if (!d->status_buf)
@@ -506,6 +504,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
                goto err_domain;
        }
 
+       *data = d;
+
        return 0;
 
 err_domain:
@@ -533,7 +533,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
                return;
 
        free_irq(irq, d);
-       /* We should unmap the domain but... */
+       irq_domain_remove(d->domain);
        kfree(d->wake_buf);
        kfree(d->mask_buf_def);
        kfree(d->mask_buf);
index 81f977510775460fa2bf8b0bd500c69027c02274..de45a1e1548fa31894c2afd3af1b658869939afe 100644 (file)
 
 struct regmap_mmio_context {
        void __iomem *regs;
+       unsigned reg_bytes;
        unsigned val_bytes;
+       unsigned pad_bytes;
        struct clk *clk;
 };
 
+static inline void regmap_mmio_regsize_check(size_t reg_size)
+{
+       switch (reg_size) {
+       case 1:
+       case 2:
+       case 4:
+#ifdef CONFIG_64BIT
+       case 8:
+#endif
+               break;
+       default:
+               BUG();
+       }
+}
+
+static int regmap_mmio_regbits_check(size_t reg_bits)
+{
+       switch (reg_bits) {
+       case 8:
+       case 16:
+       case 32:
+#ifdef CONFIG_64BIT
+       case 64:
+#endif
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static inline void regmap_mmio_count_check(size_t count)
+{
+       BUG_ON(count % 2 != 0);
+}
+
 static int regmap_mmio_gather_write(void *context,
                                    const void *reg, size_t reg_size,
                                    const void *val, size_t val_size)
@@ -38,7 +75,7 @@ static int regmap_mmio_gather_write(void *context,
        u32 offset;
        int ret;
 
-       BUG_ON(reg_size != 4);
+       regmap_mmio_regsize_check(reg_size);
 
        if (!IS_ERR(ctx->clk)) {
                ret = clk_enable(ctx->clk);
@@ -81,9 +118,13 @@ static int regmap_mmio_gather_write(void *context,
 
 static int regmap_mmio_write(void *context, const void *data, size_t count)
 {
-       BUG_ON(count < 4);
+       struct regmap_mmio_context *ctx = context;
+       u32 offset = ctx->reg_bytes + ctx->pad_bytes;
+
+       regmap_mmio_count_check(count);
 
-       return regmap_mmio_gather_write(context, data, 4, data + 4, count - 4);
+       return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
+                                       data + offset, count - offset);
 }
 
 static int regmap_mmio_read(void *context,
@@ -94,7 +135,7 @@ static int regmap_mmio_read(void *context,
        u32 offset;
        int ret;
 
-       BUG_ON(reg_size != 4);
+       regmap_mmio_regsize_check(reg_size);
 
        if (!IS_ERR(ctx->clk)) {
                ret = clk_enable(ctx->clk);
@@ -165,8 +206,9 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
        int min_stride;
        int ret;
 
-       if (config->reg_bits != 32)
-               return ERR_PTR(-EINVAL);
+       ret = regmap_mmio_regbits_check(config->reg_bits);
+       if (ret)
+               return ERR_PTR(ret);
 
        if (config->pad_bits)
                return ERR_PTR(-EINVAL);
@@ -209,6 +251,8 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
 
        ctx->regs = regs;
        ctx->val_bytes = config->val_bits / 8;
+       ctx->reg_bytes = config->reg_bits / 8;
+       ctx->pad_bytes = config->pad_bits / 8;
        ctx->clk = ERR_PTR(-ENODEV);
 
        if (clk_id == NULL)
index 6a19515f8a458b3719fce7325f265a60a4d147a4..d0a072463a04ff1c4d3f83cbd66de587be4c5e44 100644 (file)
@@ -380,6 +380,28 @@ static void regmap_range_exit(struct regmap *map)
        kfree(map->selector_work_buf);
 }
 
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+                     const struct regmap_config *config)
+{
+       struct regmap **m;
+
+       map->dev = dev;
+
+       regmap_debugfs_init(map, config->name);
+
+       /* Add a devres resource for dev_get_regmap() */
+       m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
+       if (!m) {
+               regmap_debugfs_exit(map);
+               return -ENOMEM;
+       }
+       *m = map;
+       devres_add(dev, m);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_attach_dev);
+
 /**
  * regmap_init(): Initialise register map
  *
@@ -397,7 +419,7 @@ struct regmap *regmap_init(struct device *dev,
                           void *bus_context,
                           const struct regmap_config *config)
 {
-       struct regmap *map, **m;
+       struct regmap *map;
        int ret = -EINVAL;
        enum regmap_endian reg_endian, val_endian;
        int i, j;
@@ -439,6 +461,7 @@ struct regmap *regmap_init(struct device *dev,
        else
                map->reg_stride = 1;
        map->use_single_rw = config->use_single_rw;
+       map->can_multi_write = config->can_multi_write;
        map->dev = dev;
        map->bus = bus;
        map->bus_context = bus_context;
@@ -718,7 +741,7 @@ skip_format_initialization:
                new->window_start = range_cfg->window_start;
                new->window_len = range_cfg->window_len;
 
-               if (_regmap_range_add(map, new) == false) {
+               if (!_regmap_range_add(map, new)) {
                        dev_err(map->dev, "Failed to add range %d\n", i);
                        kfree(new);
                        goto err_range;
@@ -734,25 +757,18 @@ skip_format_initialization:
                }
        }
 
-       regmap_debugfs_init(map, config->name);
-
        ret = regcache_init(map, config);
        if (ret != 0)
                goto err_range;
 
-       /* Add a devres resource for dev_get_regmap() */
-       m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
-       if (!m) {
-               ret = -ENOMEM;
-               goto err_debugfs;
-       }
-       *m = map;
-       devres_add(dev, m);
+       if (dev)
+               ret = regmap_attach_dev(dev, map, config);
+               if (ret != 0)
+                       goto err_regcache;
 
        return map;
 
-err_debugfs:
-       regmap_debugfs_exit(map);
+err_regcache:
        regcache_exit(map);
 err_range:
        regmap_range_exit(map);
@@ -1520,12 +1536,12 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
        if (reg % map->reg_stride)
                return -EINVAL;
 
-       map->lock(map->lock_arg);
        /*
         * Some devices don't support bulk write, for
         * them we have a series of single write operations.
         */
        if (!map->bus || map->use_single_rw) {
+               map->lock(map->lock_arg);
                for (i = 0; i < val_count; i++) {
                        unsigned int ival;
 
@@ -1554,31 +1570,239 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
                        if (ret != 0)
                                goto out;
                }
+out:
+               map->unlock(map->lock_arg);
        } else {
                void *wval;
 
                wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
                if (!wval) {
-                       ret = -ENOMEM;
                        dev_err(map->dev, "Error in memory allocation\n");
-                       goto out;
+                       return -ENOMEM;
                }
                for (i = 0; i < val_count * val_bytes; i += val_bytes)
                        map->format.parse_inplace(wval + i);
 
+               map->lock(map->lock_arg);
                ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+               map->unlock(map->lock_arg);
 
                kfree(wval);
        }
-out:
-       map->unlock(map->lock_arg);
        return ret;
 }
 EXPORT_SYMBOL_GPL(regmap_bulk_write);
 
+/*
+ * _regmap_raw_multi_reg_write()
+ *
+ * the (register,newvalue) pairs in regs have not been formatted, but
+ * they are all in the same page and have been changed to being page
+ * relative. The page register has been written if that was neccessary.
+ */
+static int _regmap_raw_multi_reg_write(struct regmap *map,
+                                      const struct reg_default *regs,
+                                      size_t num_regs)
+{
+       int ret;
+       void *buf;
+       int i;
+       u8 *u8;
+       size_t val_bytes = map->format.val_bytes;
+       size_t reg_bytes = map->format.reg_bytes;
+       size_t pad_bytes = map->format.pad_bytes;
+       size_t pair_size = reg_bytes + pad_bytes + val_bytes;
+       size_t len = pair_size * num_regs;
+
+       buf = kzalloc(len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       /* We have to linearise by hand. */
+
+       u8 = buf;
+
+       for (i = 0; i < num_regs; i++) {
+               int reg = regs[i].reg;
+               int val = regs[i].def;
+               trace_regmap_hw_write_start(map->dev, reg, 1);
+               map->format.format_reg(u8, reg, map->reg_shift);
+               u8 += reg_bytes + pad_bytes;
+               map->format.format_val(u8, val, 0);
+               u8 += val_bytes;
+       }
+       u8 = buf;
+       *u8 |= map->write_flag_mask;
+
+       ret = map->bus->write(map->bus_context, buf, len);
+
+       kfree(buf);
+
+       for (i = 0; i < num_regs; i++) {
+               int reg = regs[i].reg;
+               trace_regmap_hw_write_done(map->dev, reg, 1);
+       }
+       return ret;
+}
+
+static unsigned int _regmap_register_page(struct regmap *map,
+                                         unsigned int reg,
+                                         struct regmap_range_node *range)
+{
+       unsigned int win_page = (reg - range->range_min) / range->window_len;
+
+       return win_page;
+}
+
+static int _regmap_range_multi_paged_reg_write(struct regmap *map,
+                                              struct reg_default *regs,
+                                              size_t num_regs)
+{
+       int ret;
+       int i, n;
+       struct reg_default *base;
+       unsigned int this_page;
+       /*
+        * the set of registers are not neccessarily in order, but
+        * since the order of write must be preserved this algorithm
+        * chops the set each time the page changes
+        */
+       base = regs;
+       for (i = 0, n = 0; i < num_regs; i++, n++) {
+               unsigned int reg = regs[i].reg;
+               struct regmap_range_node *range;
+
+               range = _regmap_range_lookup(map, reg);
+               if (range) {
+                       unsigned int win_page = _regmap_register_page(map, reg,
+                                                                     range);
+
+                       if (i == 0)
+                               this_page = win_page;
+                       if (win_page != this_page) {
+                               this_page = win_page;
+                               ret = _regmap_raw_multi_reg_write(map, base, n);
+                               if (ret != 0)
+                                       return ret;
+                               base += n;
+                               n = 0;
+                       }
+                       ret = _regmap_select_page(map, &base[n].reg, range, 1);
+                       if (ret != 0)
+                               return ret;
+               }
+       }
+       if (n > 0)
+               return _regmap_raw_multi_reg_write(map, base, n);
+       return 0;
+}
+
+static int _regmap_multi_reg_write(struct regmap *map,
+                                  const struct reg_default *regs,
+                                  size_t num_regs)
+{
+       int i;
+       int ret;
+
+       if (!map->can_multi_write) {
+               for (i = 0; i < num_regs; i++) {
+                       ret = _regmap_write(map, regs[i].reg, regs[i].def);
+                       if (ret != 0)
+                               return ret;
+               }
+               return 0;
+       }
+
+       if (!map->format.parse_inplace)
+               return -EINVAL;
+
+       if (map->writeable_reg)
+               for (i = 0; i < num_regs; i++) {
+                       int reg = regs[i].reg;
+                       if (!map->writeable_reg(map->dev, reg))
+                               return -EINVAL;
+                       if (reg % map->reg_stride)
+                               return -EINVAL;
+               }
+
+       if (!map->cache_bypass) {
+               for (i = 0; i < num_regs; i++) {
+                       unsigned int val = regs[i].def;
+                       unsigned int reg = regs[i].reg;
+                       ret = regcache_write(map, reg, val);
+                       if (ret) {
+                               dev_err(map->dev,
+                               "Error in caching of register: %x ret: %d\n",
+                                                               reg, ret);
+                               return ret;
+                       }
+               }
+               if (map->cache_only) {
+                       map->cache_dirty = true;
+                       return 0;
+               }
+       }
+
+       WARN_ON(!map->bus);
+
+       for (i = 0; i < num_regs; i++) {
+               unsigned int reg = regs[i].reg;
+               struct regmap_range_node *range;
+               range = _regmap_range_lookup(map, reg);
+               if (range) {
+                       size_t len = sizeof(struct reg_default)*num_regs;
+                       struct reg_default *base = kmemdup(regs, len,
+                                                          GFP_KERNEL);
+                       if (!base)
+                               return -ENOMEM;
+                       ret = _regmap_range_multi_paged_reg_write(map, base,
+                                                                 num_regs);
+                       kfree(base);
+
+                       return ret;
+               }
+       }
+       return _regmap_raw_multi_reg_write(map, regs, num_regs);
+}
+
 /*
  * regmap_multi_reg_write(): Write multiple registers to the device
  *
+ * where the set of register,value pairs are supplied in any order,
+ * possibly not all in a single range.
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register,value to be written
+ * @num_regs: Number of registers to write
+ *
+ * The 'normal' block write mode will send ultimately send data on the
+ * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
+ * addressed. However, this alternative block multi write mode will send
+ * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
+ * must of course support the mode.
+ *
+ * A value of zero will be returned on success, a negative errno will be
+ * returned in error cases.
+ */
+int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+                          int num_regs)
+{
+       int ret;
+
+       map->lock(map->lock_arg);
+
+       ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+       map->unlock(map->lock_arg);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+
+/*
+ * regmap_multi_reg_write_bypassed(): Write multiple registers to the
+ *                                    device but not the cache
+ *
  * where the set of register are supplied in any order
  *
  * @map: Register map to write to
@@ -1592,30 +1816,27 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
  * A value of zero will be returned on success, a negative errno will
  * be returned in error cases.
  */
-int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
-                               int num_regs)
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+                                   const struct reg_default *regs,
+                                   int num_regs)
 {
-       int ret = 0, i;
-
-       for (i = 0; i < num_regs; i++) {
-               int reg = regs[i].reg;
-               if (reg % map->reg_stride)
-                       return -EINVAL;
-       }
+       int ret;
+       bool bypass;
 
        map->lock(map->lock_arg);
 
-       for (i = 0; i < num_regs; i++) {
-               ret = _regmap_write(map, regs[i].reg, regs[i].def);
-               if (ret != 0)
-                       goto out;
-       }
-out:
+       bypass = map->cache_bypass;
+       map->cache_bypass = true;
+
+       ret = _regmap_multi_reg_write(map, regs, num_regs);
+
+       map->cache_bypass = bypass;
+
        map->unlock(map->lock_arg);
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
+EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
 
 /**
  * regmap_raw_write_async(): Write raw values to one or more registers
@@ -1736,6 +1957,9 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
        if (map->cache_only)
                return -EBUSY;
 
+       if (!regmap_readable(map, reg))
+               return -EIO;
+
        ret = map->reg_read(context, reg, val);
        if (ret == 0) {
 #ifdef LOG_DEVICE
@@ -1966,9 +2190,11 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 
        if (tmp != orig) {
                ret = _regmap_write(map, reg, tmp);
-               *change = true;
+               if (change)
+                       *change = true;
        } else {
-               *change = false;
+               if (change)
+                       *change = false;
        }
 
        return ret;
@@ -1987,11 +2213,10 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 int regmap_update_bits(struct regmap *map, unsigned int reg,
                       unsigned int mask, unsigned int val)
 {
-       bool change;
        int ret;
 
        map->lock(map->lock_arg);
-       ret = _regmap_update_bits(map, reg, mask, val, &change);
+       ret = _regmap_update_bits(map, reg, mask, val, NULL);
        map->unlock(map->lock_arg);
 
        return ret;
@@ -2016,14 +2241,13 @@ EXPORT_SYMBOL_GPL(regmap_update_bits);
 int regmap_update_bits_async(struct regmap *map, unsigned int reg,
                             unsigned int mask, unsigned int val)
 {
-       bool change;
        int ret;
 
        map->lock(map->lock_arg);
 
        map->async = true;
 
-       ret = _regmap_update_bits(map, reg, mask, val, &change);
+       ret = _regmap_update_bits(map, reg, mask, val, NULL);
 
        map->async = false;
 
@@ -2173,35 +2397,21 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
  * apply them immediately.  Typically this is used to apply
  * corrections to be applied to the device defaults on startup, such
  * as the updates some vendors provide to undocumented registers.
+ *
+ * The caller must ensure that this function cannot be called
+ * concurrently with either itself or regcache_sync().
  */
 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
                          int num_regs)
 {
        struct reg_default *p;
-       int i, ret;
+       int ret;
        bool bypass;
 
        if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
            num_regs))
                return 0;
 
-       map->lock(map->lock_arg);
-
-       bypass = map->cache_bypass;
-
-       map->cache_bypass = true;
-       map->async = true;
-
-       /* Write out first; it's useful to apply even if we fail later. */
-       for (i = 0; i < num_regs; i++) {
-               ret = _regmap_write(map, regs[i].reg, regs[i].def);
-               if (ret != 0) {
-                       dev_err(map->dev, "Failed to write %x = %x: %d\n",
-                               regs[i].reg, regs[i].def, ret);
-                       goto out;
-               }
-       }
-
        p = krealloc(map->patch,
                     sizeof(struct reg_default) * (map->patch_regs + num_regs),
                     GFP_KERNEL);
@@ -2210,9 +2420,20 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
                map->patch = p;
                map->patch_regs += num_regs;
        } else {
-               ret = -ENOMEM;
+               return -ENOMEM;
        }
 
+       map->lock(map->lock_arg);
+
+       bypass = map->cache_bypass;
+
+       map->cache_bypass = true;
+       map->async = true;
+
+       ret = _regmap_multi_reg_write(map, regs, num_regs);
+       if (ret != 0)
+               goto out;
+
 out:
        map->async = false;
        map->cache_bypass = bypass;
@@ -2240,6 +2461,18 @@ int regmap_get_val_bytes(struct regmap *map)
 }
 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
 
+int regmap_parse_val(struct regmap *map, const void *buf,
+                       unsigned int *val)
+{
+       if (!map->format.parse_val)
+               return -EINVAL;
+
+       *val = map->format.parse_val(buf);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(regmap_parse_val);
+
 static int __init regmap_initcall(void)
 {
        regmap_debugfs_initcall();
index 2023043ce7c0e94b0e3c3618eb0787a9ad5ddb73..8f5565bf34cda31504e526ccc3d79d4e7fe20fd2 100644 (file)
@@ -961,17 +961,31 @@ static void empty(void)
 {
 }
 
-static DECLARE_WORK(floppy_work, NULL);
+static void (*floppy_work_fn)(void);
+
+static void floppy_work_workfn(struct work_struct *work)
+{
+       floppy_work_fn();
+}
+
+static DECLARE_WORK(floppy_work, floppy_work_workfn);
 
 static void schedule_bh(void (*handler)(void))
 {
        WARN_ON(work_pending(&floppy_work));
 
-       PREPARE_WORK(&floppy_work, (work_func_t)handler);
+       floppy_work_fn = handler;
        queue_work(floppy_wq, &floppy_work);
 }
 
-static DECLARE_DELAYED_WORK(fd_timer, NULL);
+static void (*fd_timer_fn)(void) = NULL;
+
+static void fd_timer_workfn(struct work_struct *work)
+{
+       fd_timer_fn();
+}
+
+static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
 
 static void cancel_activity(void)
 {
@@ -982,7 +996,7 @@ static void cancel_activity(void)
 
 /* this function makes sure that the disk stays in the drive during the
  * transfer */
-static void fd_watchdog(struct work_struct *arg)
+static void fd_watchdog(void)
 {
        debug_dcl(DP->flags, "calling disk change from watchdog\n");
 
@@ -993,7 +1007,7 @@ static void fd_watchdog(struct work_struct *arg)
                reset_fdc();
        } else {
                cancel_delayed_work(&fd_timer);
-               PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+               fd_timer_fn = fd_watchdog;
                queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
        }
 }
@@ -1005,7 +1019,8 @@ static void main_command_interrupt(void)
 }
 
 /* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long expires, work_func_t function)
+static int fd_wait_for_completion(unsigned long expires,
+                                 void (*function)(void))
 {
        if (FDCS->reset) {
                reset_fdc();    /* do the reset during sleep to win time
@@ -1016,7 +1031,7 @@ static int fd_wait_for_completion(unsigned long expires, work_func_t function)
 
        if (time_before(jiffies, expires)) {
                cancel_delayed_work(&fd_timer);
-               PREPARE_DELAYED_WORK(&fd_timer, function);
+               fd_timer_fn = function;
                queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
                return 1;
        }
@@ -1334,8 +1349,7 @@ static int fdc_dtr(void)
         * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
         */
        FDCS->dtr = raw_cmd->rate & 3;
-       return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
-                                     (work_func_t)floppy_ready);
+       return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready);
 }                              /* fdc_dtr */
 
 static void tell_sector(void)
@@ -1440,7 +1454,7 @@ static void setup_rw_floppy(void)
        int flags;
        int dflags;
        unsigned long ready_date;
-       work_func_t function;
+       void (*function)(void);
 
        flags = raw_cmd->flags;
        if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1454,9 +1468,9 @@ static void setup_rw_floppy(void)
                 */
                if (time_after(ready_date, jiffies + DP->select_delay)) {
                        ready_date -= DP->select_delay;
-                       function = (work_func_t)floppy_start;
+                       function = floppy_start;
                } else
-                       function = (work_func_t)setup_rw_floppy;
+                       function = setup_rw_floppy;
 
                /* wait until the floppy is spinning fast enough */
                if (fd_wait_for_completion(ready_date, function))
@@ -1486,7 +1500,7 @@ static void setup_rw_floppy(void)
                inr = result();
                cont->interrupt();
        } else if (flags & FD_RAW_NEED_DISK)
-               fd_watchdog(NULL);
+               fd_watchdog();
 }
 
 static int blind_seek;
@@ -1863,7 +1877,7 @@ static int start_motor(void (*function)(void))
 
        /* wait_for_completion also schedules reset if needed. */
        return fd_wait_for_completion(DRS->select_date + DP->select_delay,
-                                     (work_func_t)function);
+                                     function);
 }
 
 static void floppy_ready(void)
index 51824d1f23ea53df5d49cd6d4e10e07b6b45b1fb..8459e4e7c71940462134eb6a7ad68c86f03ebb7a 100644 (file)
@@ -993,7 +993,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
                dev_warn(&dev->pci_dev->dev,
                        "I/O %d QID %d timeout, reset controller\n", cmdid,
                                                                nvmeq->qid);
-               PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+               dev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &dev->reset_work);
                return;
        }
@@ -1696,8 +1696,7 @@ static int nvme_kthread(void *data)
                                list_del_init(&dev->node);
                                dev_warn(&dev->pci_dev->dev,
                                        "Failed status, reset controller\n");
-                               PREPARE_WORK(&dev->reset_work,
-                                                       nvme_reset_failed_dev);
+                               dev->reset_workfn = nvme_reset_failed_dev;
                                queue_work(nvme_workq, &dev->reset_work);
                                continue;
                        }
@@ -2406,7 +2405,7 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                return ret;
        if (ret == -EBUSY) {
                spin_lock(&dev_list_lock);
-               PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+               dev->reset_workfn = nvme_remove_disks;
                queue_work(nvme_workq, &dev->reset_work);
                spin_unlock(&dev_list_lock);
        }
@@ -2435,6 +2434,12 @@ static void nvme_reset_failed_dev(struct work_struct *ws)
        nvme_dev_reset(dev);
 }
 
+static void nvme_reset_workfn(struct work_struct *work)
+{
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
+       dev->reset_workfn(work);
+}
+
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int result = -ENOMEM;
@@ -2453,7 +2458,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto free;
 
        INIT_LIST_HEAD(&dev->namespaces);
-       INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
+       dev->reset_workfn = nvme_reset_failed_dev;
+       INIT_WORK(&dev->reset_work, nvme_reset_workfn);
        dev->pci_dev = pdev;
        pci_set_drvdata(pdev, dev);
        result = nvme_set_instance(dev);
@@ -2553,7 +2559,7 @@ static int nvme_resume(struct device *dev)
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
        if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
-               PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+               ndev->reset_workfn = nvme_reset_failed_dev;
                queue_work(nvme_workq, &ndev->reset_work);
        }
        return 0;
index b365e0dfccb66f7c256a9d07d7fd976fba17ae95..34898d53395b10eae386e06fef3d12b9635fe93a 100644 (file)
@@ -2109,7 +2109,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
        rbd_assert(img_request->obj_request_count > 0);
        rbd_assert(which != BAD_WHICH);
        rbd_assert(which < img_request->obj_request_count);
-       rbd_assert(which >= img_request->next_completion);
 
        spin_lock_irq(&img_request->completion_lock);
        if (which != img_request->next_completion)
index cd6950fd8caf063417f6717db581f39d69100ed4..52e9329e3c51c05419a2d811ad3ed8fd271baa66 100644 (file)
@@ -140,3 +140,51 @@ config VF_PIT_TIMER
        bool
        help
          Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
+
+config SYS_SUPPORTS_SH_CMT
+        bool
+
+config SYS_SUPPORTS_SH_MTU2
+        bool
+
+config SYS_SUPPORTS_SH_TMU
+        bool
+
+config SYS_SUPPORTS_EM_STI
+        bool
+
+config SH_TIMER_CMT
+       bool "Renesas CMT timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       default SYS_SUPPORTS_SH_CMT
+       help
+         This enables build of a clocksource and clockevent driver for
+         the Compare Match Timer (CMT) hardware available in 16/32/48-bit
+         variants on a wide range of Mobile and Automotive SoCs from Renesas.
+
+config SH_TIMER_MTU2
+       bool "Renesas MTU2 timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       default SYS_SUPPORTS_SH_MTU2
+       help
+         This enables build of a clockevent driver for the Multi-Function
+         Timer Pulse Unit 2 (TMU2) hardware available on SoCs from Renesas.
+         This hardware comes with 16 bit-timer registers.
+
+config SH_TIMER_TMU
+       bool "Renesas TMU timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       default SYS_SUPPORTS_SH_TMU
+       help
+         This enables build of a clocksource and clockevent driver for
+         the 32-bit Timer Unit (TMU) hardware available on a wide range
+         SoCs from Renesas.
+
+config EM_TIMER_STI
+       bool "Renesas STI timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       default SYS_SUPPORTS_EM_STI
+       help
+         This enables build of a clocksource and clockevent driver for
+         the 48-bit System Timer (STI) hardware available on a SoCs
+         such as EMEV2 from former NEC Electronics.
index c7ca50a9c232bdcc246fbce2508f71ff6dbcab97..aed3488d942656f087fbc639dce87b48ad76c277 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_ARCH_MARCO)      += timer-marco.o
 obj-$(CONFIG_ARCH_MOXART)      += moxart_timer.o
 obj-$(CONFIG_ARCH_MXS)         += mxs_timer.o
 obj-$(CONFIG_ARCH_PRIMA2)      += timer-prima2.o
+obj-$(CONFIG_ARCH_U300)                += timer-u300.o
 obj-$(CONFIG_SUN4I_TIMER)      += sun4i_timer.o
 obj-$(CONFIG_SUN5I_HSTIMER)    += timer-sun5i.o
 obj-$(CONFIG_ARCH_TEGRA)       += tegra20_timer.o
@@ -37,3 +38,4 @@ obj-$(CONFIG_ARM_ARCH_TIMER)          += arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)         += arm_global_timer.o
 obj-$(CONFIG_CLKSRC_METAG_GENERIC)     += metag_generic.o
 obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST)  += dummy_timer.o
+obj-$(CONFIG_ARCH_KEYSTONE)            += timer-keystone.o
index 95fb944e15ee0579a6f437f8440ba93fe8443524..57e823c44d2ad326eeaaa788fbb67d80ba620c7d 100644 (file)
@@ -277,6 +277,7 @@ static void __arch_timer_setup(unsigned type,
                        clk->set_next_event = arch_timer_set_next_event_phys;
                }
        } else {
+               clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
                clk->name = "arch_mem_timer";
                clk->rating = 400;
                clk->cpumask = cpu_all_mask;
index 63f176de0d0228c63ba58f606d21afd98cdc5e9e..49fbe2847c8474cfd9398ee70e5bba713fe381c4 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/interrupt.h>
 #include <linux/clockchips.h>
 #include <linux/of_address.h>
@@ -52,6 +53,8 @@
 #define TTC_CNT_CNTRL_DISABLE_MASK     0x1
 
 #define TTC_CLK_CNTRL_CSRC_MASK                (1 << 5)        /* clock source */
+#define TTC_CLK_CNTRL_PSV_MASK         0x1e
+#define TTC_CLK_CNTRL_PSV_SHIFT                1
 
 /*
  * Setup the timers to use pre-scaling, using a fixed value for now that will
@@ -63,6 +66,8 @@
 #define CLK_CNTRL_PRESCALE_EN  1
 #define CNT_CNTRL_RESET                (1 << 4)
 
+#define MAX_F_ERR 50
+
 /**
  * struct ttc_timer - This definition defines local timer structure
  *
@@ -82,6 +87,8 @@ struct ttc_timer {
                container_of(x, struct ttc_timer, clk_rate_change_nb)
 
 struct ttc_timer_clocksource {
+       u32                     scale_clk_ctrl_reg_old;
+       u32                     scale_clk_ctrl_reg_new;
        struct ttc_timer        ttc;
        struct clocksource      cs;
 };
@@ -229,32 +236,89 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
                        struct ttc_timer_clocksource, ttc);
 
        switch (event) {
-       case POST_RATE_CHANGE:
+       case PRE_RATE_CHANGE:
+       {
+               u32 psv;
+               unsigned long factor, rate_low, rate_high;
+
+               if (ndata->new_rate > ndata->old_rate) {
+                       factor = DIV_ROUND_CLOSEST(ndata->new_rate,
+                                       ndata->old_rate);
+                       rate_low = ndata->old_rate;
+                       rate_high = ndata->new_rate;
+               } else {
+                       factor = DIV_ROUND_CLOSEST(ndata->old_rate,
+                                       ndata->new_rate);
+                       rate_low = ndata->new_rate;
+                       rate_high = ndata->old_rate;
+               }
+
+               if (!is_power_of_2(factor))
+                               return NOTIFY_BAD;
+
+               if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR)
+                       return NOTIFY_BAD;
+
+               factor = __ilog2_u32(factor);
+
                /*
-                * Do whatever is necessary to maintain a proper time base
-                *
-                * I cannot find a way to adjust the currently used clocksource
-                * to the new frequency. __clocksource_updatefreq_hz() sounds
-                * good, but does not work. Not sure what's that missing.
-                *
-                * This approach works, but triggers two clocksource switches.
-                * The first after unregister to clocksource jiffies. And
-                * another one after the register to the newly registered timer.
-                *
-                * Alternatively we could 'waste' another HW timer to ping pong
-                * between clock sources. That would also use one register and
-                * one unregister call, but only trigger one clocksource switch
-                * for the cost of another HW timer used by the OS.
+                * store timer clock ctrl register so we can restore it in case
+                * of an abort.
                 */
-               clocksource_unregister(&ttccs->cs);
-               clocksource_register_hz(&ttccs->cs,
-                               ndata->new_rate / PRESCALE);
-               /* fall through */
-       case PRE_RATE_CHANGE:
+               ttccs->scale_clk_ctrl_reg_old =
+                       __raw_readl(ttccs->ttc.base_addr +
+                                       TTC_CLK_CNTRL_OFFSET);
+
+               psv = (ttccs->scale_clk_ctrl_reg_old &
+                               TTC_CLK_CNTRL_PSV_MASK) >>
+                               TTC_CLK_CNTRL_PSV_SHIFT;
+               if (ndata->new_rate < ndata->old_rate)
+                       psv -= factor;
+               else
+                       psv += factor;
+
+               /* prescaler within legal range? */
+               if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT))
+                       return NOTIFY_BAD;
+
+               ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old &
+                       ~TTC_CLK_CNTRL_PSV_MASK;
+               ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT;
+
+
+               /* scale down: adjust divider in post-change notification */
+               if (ndata->new_rate < ndata->old_rate)
+                       return NOTIFY_DONE;
+
+               /* scale up: adjust divider now - before frequency change */
+               __raw_writel(ttccs->scale_clk_ctrl_reg_new,
+                               ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+               break;
+       }
+       case POST_RATE_CHANGE:
+               /* scale up: pre-change notification did the adjustment */
+               if (ndata->new_rate > ndata->old_rate)
+                       return NOTIFY_OK;
+
+               /* scale down: adjust divider now - after frequency change */
+               __raw_writel(ttccs->scale_clk_ctrl_reg_new,
+                               ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+               break;
+
        case ABORT_RATE_CHANGE:
+               /* we have to undo the adjustment in case we scale up */
+               if (ndata->new_rate < ndata->old_rate)
+                       return NOTIFY_OK;
+
+               /* restore original register value */
+               __raw_writel(ttccs->scale_clk_ctrl_reg_old,
+                               ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
+               /* fall through */
        default:
                return NOTIFY_DONE;
        }
+
+       return NOTIFY_DONE;
 }
 
 static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
@@ -321,25 +385,12 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
 
        switch (event) {
        case POST_RATE_CHANGE:
-       {
-               unsigned long flags;
-
-               /*
-                * clockevents_update_freq should be called with IRQ disabled on
-                * the CPU the timer provides events for. The timer we use is
-                * common to both CPUs, not sure if we need to run on both
-                * cores.
-                */
-               local_irq_save(flags);
-               clockevents_update_freq(&ttcce->ce,
-                               ndata->new_rate / PRESCALE);
-               local_irq_restore(flags);
-
                /* update cached frequency */
                ttc->freq = ndata->new_rate;
 
+               clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
+
                /* fall through */
-       }
        case PRE_RATE_CHANGE:
        case ABORT_RATE_CHANGE:
        default:
index 48f76bc05da0d8fb5b5515cd6c919a9cf3c373f6..c2e390efbdca6fc2b71a3e39356ddb9c2156200a 100644 (file)
@@ -410,7 +410,7 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
        mevt = container_of(evt, struct mct_clock_event_device, evt);
 
        mevt->base = EXYNOS4_MCT_L_BASE(cpu);
-       sprintf(mevt->name, "mct_tick%d", cpu);
+       snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
 
        evt->name = mevt->name;
        evt->cpumask = cpumask_of(cpu);
index bf497afba9ad1ef0c6c8ec57c9761f05d3acf799..efb17c3ee120e5ee28fa05099c4c3c7ce09f0ac1 100644 (file)
@@ -196,5 +196,5 @@ static void __init sun4i_timer_init(struct device_node *node)
        clockevents_config_and_register(&sun4i_clockevent, rate,
                                        TIMER_SYNC_TICKS, 0xffffffff);
 }
-CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
+CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
                       sun4i_timer_init);
index ee8691b89944e3fcbc3dbf7eeaadf00ec38ffd93..0451e62fac7a8e31fd2bc370a83b4fdc4083bf01 100644 (file)
@@ -85,12 +85,6 @@ static u32 ticks_per_jiffy;
 
 static struct clock_event_device __percpu *armada_370_xp_evt;
 
-static void timer_ctrl_clrset(u32 clr, u32 set)
-{
-       writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
-               timer_base + TIMER_CTRL_OFF);
-}
-
 static void local_timer_ctrl_clrset(u32 clr, u32 set)
 {
        writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
@@ -245,7 +239,7 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
                clr = TIMER0_25MHZ;
                enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
        }
-       timer_ctrl_clrset(clr, set);
+       atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
        local_timer_ctrl_clrset(clr, set);
 
        /*
@@ -263,7 +257,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
        writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
        writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
 
-       timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
+       atomic_io_modify(timer_base + TIMER_CTRL_OFF,
+               TIMER0_RELOAD_EN | enable_mask,
+               TIMER0_RELOAD_EN | enable_mask);
 
        /*
         * Set scale and timer for sched_clock.
index 20066222f3f29cdb052c2282eafd1dadb8519d8b..0b3ce0399c519e354117a7a5c06a754f63453da6 100644 (file)
 #define ORION_ONESHOT_MAX      0xfffffffe
 
 static void __iomem *timer_base;
-static DEFINE_SPINLOCK(timer_ctrl_lock);
-
-/*
- * Thread-safe access to TIMER_CTRL register
- * (shared with watchdog timer)
- */
-void orion_timer_ctrl_clrset(u32 clr, u32 set)
-{
-       spin_lock(&timer_ctrl_lock);
-       writel((readl(timer_base + TIMER_CTRL) & ~clr) | set,
-               timer_base + TIMER_CTRL);
-       spin_unlock(&timer_ctrl_lock);
-}
-EXPORT_SYMBOL(orion_timer_ctrl_clrset);
 
 /*
  * Free-running clocksource handling.
@@ -68,7 +54,8 @@ static int orion_clkevt_next_event(unsigned long delta,
 {
        /* setup and enable one-shot timer */
        writel(delta, timer_base + TIMER1_VAL);
-       orion_timer_ctrl_clrset(TIMER1_RELOAD_EN, TIMER1_EN);
+       atomic_io_modify(timer_base + TIMER_CTRL,
+               TIMER1_RELOAD_EN | TIMER1_EN, TIMER1_EN);
 
        return 0;
 }
@@ -80,10 +67,13 @@ static void orion_clkevt_mode(enum clock_event_mode mode,
                /* setup and enable periodic timer at 1/HZ intervals */
                writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
                writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
-               orion_timer_ctrl_clrset(0, TIMER1_RELOAD_EN | TIMER1_EN);
+               atomic_io_modify(timer_base + TIMER_CTRL,
+                       TIMER1_RELOAD_EN | TIMER1_EN,
+                       TIMER1_RELOAD_EN | TIMER1_EN);
        } else {
                /* disable timer */
-               orion_timer_ctrl_clrset(TIMER1_RELOAD_EN | TIMER1_EN, 0);
+               atomic_io_modify(timer_base + TIMER_CTRL,
+                       TIMER1_RELOAD_EN | TIMER1_EN, 0);
        }
 }
 
@@ -131,7 +121,9 @@ static void __init orion_timer_init(struct device_node *np)
        /* setup timer0 as free-running clocksource */
        writel(~0, timer_base + TIMER0_VAL);
        writel(~0, timer_base + TIMER0_RELOAD);
-       orion_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | TIMER0_EN);
+       atomic_io_modify(timer_base + TIMER_CTRL,
+               TIMER0_RELOAD_EN | TIMER0_EN,
+               TIMER0_RELOAD_EN | TIMER0_EN);
        clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
                              clk_get_rate(clk), 300, 32,
                              clocksource_mmio_readl_down);
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
new file mode 100644 (file)
index 0000000..0250354
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * Keystone broadcast clock-event
+ *
+ * Copyright 2013 Texas Instruments, Inc.
+ *
+ * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define TIMER_NAME                     "timer-keystone"
+
+/* Timer register offsets */
+#define TIM12                          0x10
+#define TIM34                          0x14
+#define PRD12                          0x18
+#define PRD34                          0x1c
+#define TCR                            0x20
+#define TGCR                           0x24
+#define INTCTLSTAT                     0x44
+
+/* Timer register bitfields */
+#define TCR_ENAMODE_MASK               0xC0
+#define TCR_ENAMODE_ONESHOT_MASK       0x40
+#define TCR_ENAMODE_PERIODIC_MASK      0x80
+
+#define TGCR_TIM_UNRESET_MASK          0x03
+#define INTCTLSTAT_ENINT_MASK          0x01
+
+/**
+ * struct keystone_timer: holds timer's data
+ * @base: timer memory base address
+ * @hz_period: cycles per HZ period
+ * @event_dev: event device based on timer
+ */
+static struct keystone_timer {
+       void __iomem *base;
+       unsigned long hz_period;
+       struct clock_event_device event_dev;
+} timer;
+
+static inline u32 keystone_timer_readl(unsigned long rg)
+{
+       return readl_relaxed(timer.base + rg);
+}
+
+static inline void keystone_timer_writel(u32 val, unsigned long rg)
+{
+       writel_relaxed(val, timer.base + rg);
+}
+
+/**
+ * keystone_timer_barrier: write memory barrier
+ * use explicit barrier to avoid using readl/writel non relaxed function
+ * variants, because in our case non relaxed variants hide the true places
+ * where barrier is needed.
+ */
+static inline void keystone_timer_barrier(void)
+{
+       __iowmb();
+}
+
+/**
+ * keystone_timer_config: configures timer to work in oneshot/periodic modes.
+ * @ mode: mode to configure
+ * @ period: cycles number to configure for
+ */
+static int keystone_timer_config(u64 period, enum clock_event_mode mode)
+{
+       u32 tcr;
+       u32 off;
+
+       tcr = keystone_timer_readl(TCR);
+       off = tcr & ~(TCR_ENAMODE_MASK);
+
+       /* set enable mode */
+       switch (mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               tcr |= TCR_ENAMODE_ONESHOT_MASK;
+               break;
+       case CLOCK_EVT_MODE_PERIODIC:
+               tcr |= TCR_ENAMODE_PERIODIC_MASK;
+               break;
+       default:
+               return -1;
+       }
+
+       /* disable timer */
+       keystone_timer_writel(off, TCR);
+       /* here we have to be sure the timer has been disabled */
+       keystone_timer_barrier();
+
+       /* reset counter to zero, set new period */
+       keystone_timer_writel(0, TIM12);
+       keystone_timer_writel(0, TIM34);
+       keystone_timer_writel(period & 0xffffffff, PRD12);
+       keystone_timer_writel(period >> 32, PRD34);
+
+       /*
+        * enable timer
+        * here we have to be sure that CNTLO, CNTHI, PRDLO, PRDHI registers
+        * have been written.
+        */
+       keystone_timer_barrier();
+       keystone_timer_writel(tcr, TCR);
+       return 0;
+}
+
+static void keystone_timer_disable(void)
+{
+       u32 tcr;
+
+       tcr = keystone_timer_readl(TCR);
+
+       /* disable timer */
+       tcr &= ~(TCR_ENAMODE_MASK);
+       keystone_timer_writel(tcr, TCR);
+}
+
+static irqreturn_t keystone_timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *evt = dev_id;
+
+       evt->event_handler(evt);
+       return IRQ_HANDLED;
+}
+
+static int keystone_set_next_event(unsigned long cycles,
+                                 struct clock_event_device *evt)
+{
+       return keystone_timer_config(cycles, evt->mode);
+}
+
+static void keystone_set_mode(enum clock_event_mode mode,
+                            struct clock_event_device *evt)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC);
+               break;
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+       case CLOCK_EVT_MODE_ONESHOT:
+               keystone_timer_disable();
+               break;
+       default:
+               break;
+       }
+}
+
+static void __init keystone_timer_init(struct device_node *np)
+{
+       struct clock_event_device *event_dev = &timer.event_dev;
+       unsigned long rate;
+       struct clk *clk;
+       int irq, error;
+
+       irq  = irq_of_parse_and_map(np, 0);
+       if (irq == NO_IRQ) {
+               pr_err("%s: failed to map interrupts\n", __func__);
+               return;
+       }
+
+       timer.base = of_iomap(np, 0);
+       if (!timer.base) {
+               pr_err("%s: failed to map registers\n", __func__);
+               return;
+       }
+
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk)) {
+               pr_err("%s: failed to get clock\n", __func__);
+               iounmap(timer.base);
+               return;
+       }
+
+       error = clk_prepare_enable(clk);
+       if (error) {
+               pr_err("%s: failed to enable clock\n", __func__);
+               goto err;
+       }
+
+       rate = clk_get_rate(clk);
+
+       /* disable, use internal clock source */
+       keystone_timer_writel(0, TCR);
+       /* here we have to be sure the timer has been disabled */
+       keystone_timer_barrier();
+
+       /* reset timer as 64-bit, no pre-scaler, plus features are disabled */
+       keystone_timer_writel(0, TGCR);
+
+       /* unreset timer */
+       keystone_timer_writel(TGCR_TIM_UNRESET_MASK, TGCR);
+
+       /* init counter to zero */
+       keystone_timer_writel(0, TIM12);
+       keystone_timer_writel(0, TIM34);
+
+       timer.hz_period = DIV_ROUND_UP(rate, HZ);
+
+       /* enable timer interrupts */
+       keystone_timer_writel(INTCTLSTAT_ENINT_MASK, INTCTLSTAT);
+
+       error = request_irq(irq, keystone_timer_interrupt, IRQF_TIMER,
+                           TIMER_NAME, event_dev);
+       if (error) {
+               pr_err("%s: failed to setup irq\n", __func__);
+               goto err;
+       }
+
+       /* setup clockevent */
+       event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+       event_dev->set_next_event = keystone_set_next_event;
+       event_dev->set_mode = keystone_set_mode;
+       event_dev->cpumask = cpu_all_mask;
+       event_dev->owner = THIS_MODULE;
+       event_dev->name = TIMER_NAME;
+       event_dev->irq = irq;
+
+       clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
+
+       pr_info("keystone timer clock @%lu Hz\n", rate);
+       return;
+err:
+       clk_put(clk);
+       iounmap(timer.base);
+}
+
+CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
+                                       keystone_timer_init);
similarity index 99%
rename from arch/arm/mach-u300/timer.c
rename to drivers/clocksource/timer-u300.c
index fe08fd34c0ce8422b00508efb20cb94ff3a2dda2..e63d469661fd547d49154643ed58bb2dcf100023 100644 (file)
@@ -1,8 +1,4 @@
 /*
- *
- * arch/arm/mach-u300/timer.c
- *
- *
  * Copyright (C) 2007-2009 ST-Ericsson AB
  * License terms: GNU General Public License (GPL) version 2
  * Timer COH 901 328, runs the OS timer interrupt.
index 4b029c0944af5ef55b9bdb02a266cbcb3ecd2d23..1fbe11f2a14603e499042974e7fc8064414978a4 100644 (file)
@@ -200,7 +200,7 @@ source "drivers/cpufreq/Kconfig.x86"
 endmenu
 
 menu "ARM CPU frequency scaling drivers"
-depends on ARM
+depends on ARM || ARM64
 source "drivers/cpufreq/Kconfig.arm"
 endmenu
 
index 31297499a60ad94d8f7636271bb64e1832b4209a..9fb627046e17ad33c5f1c2cc1078a27e1df1245b 100644 (file)
@@ -2,6 +2,7 @@
 # ARM CPU Frequency scaling drivers
 #
 
+# big LITTLE core layer and glue drivers
 config ARM_BIG_LITTLE_CPUFREQ
        tristate "Generic ARM big LITTLE CPUfreq driver"
        depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
@@ -16,6 +17,14 @@ config ARM_DT_BL_CPUFREQ
          This enables probing via DT for Generic CPUfreq driver for ARM
          big.LITTLE platform. This gets frequency tables from DT.
 
+config ARM_VEXPRESS_SPC_CPUFREQ
+        tristate "Versatile Express SPC based CPUfreq driver"
+       depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC
+        help
+          This add the CPUfreq driver support for Versatile Express
+         big.LITTLE platforms using SPC for power management.
+
+
 config ARM_EXYNOS_CPUFREQ
        bool
 
@@ -241,11 +250,3 @@ config ARM_TEGRA_CPUFREQ
        default y
        help
          This adds the CPUFreq driver support for TEGRA SOCs.
-
-config ARM_VEXPRESS_SPC_CPUFREQ
-        tristate "Versatile Express SPC based CPUfreq driver"
-        select ARM_BIG_LITTLE_CPUFREQ
-        depends on ARCH_VEXPRESS_SPC
-        help
-          This add the CPUfreq driver support for Versatile Express
-         big.LITTLE platforms using SPC for power management.
index 18448a7e9f8654e1560d2ec7eb354dd85ba094f4..822ca03a87f796ae321cb9c2bd54ffe7380b5581 100644 (file)
@@ -855,7 +855,6 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        pr_debug("acpi_cpufreq_cpu_exit\n");
 
        if (data) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
                per_cpu(acfreq_data, policy->cpu) = NULL;
                acpi_processor_unregister_performance(data->acpi_data,
                                                      policy->cpu);
index 72f87e9317e31057525b0785b36d78467890a34c..bad2ed317ba294462212bc00cc3cc94132e0aeb2 100644 (file)
@@ -446,9 +446,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
        }
 
        if (cur_cluster < MAX_CLUSTERS) {
+               int cpu;
+
                cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 
-               per_cpu(physical_cluster, policy->cpu) = cur_cluster;
+               for_each_cpu(cpu, policy->cpus)
+                       per_cpu(physical_cluster, cpu) = cur_cluster;
        } else {
                /* Assumption: during init, we are always running on A15 */
                per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
@@ -478,7 +481,6 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
-       cpufreq_frequency_table_put_attr(policy->cpu);
        put_cluster_clk_and_freq_table(cpu_dev);
        dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 
index e9e63fc9c2c9d09b7710f61d3c8769ac77584bcc..a9f8e5bd0716f508858627d33c381437153fc3a3 100644 (file)
@@ -195,7 +195,6 @@ static struct cpufreq_driver bfin_driver = {
        .target_index = bfin_target,
        .get = bfin_getfreq_khz,
        .init = __bfin_cpu_init,
-       .exit = cpufreq_generic_exit,
        .name = "bfin cpufreq",
        .attr = cpufreq_generic_attr,
 };
index 0c12ffc0ebcbf2a5daad62e4a47674fdcedadf75..1bf6bbac3e03ae1bd9ffc4f001a71f7a0d0befcb 100644 (file)
@@ -109,7 +109,6 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
        .target_index = cpu0_set_target,
        .get = cpufreq_generic_get,
        .init = cpu0_cpufreq_init,
-       .exit = cpufreq_generic_exit,
        .name = "generic_cpu0",
        .attr = cpufreq_generic_attr,
 };
index 199b52b7c3e1ad6e9d00102905a215137901b71d..3aa7a7a226b3f23f2977975651fc0fc2869ffd7b 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
-#include <linux/syscore_ops.h>
+#include <linux/suspend.h>
 #include <linux/tick.h>
 #include <trace/events/power.h>
 
@@ -42,10 +42,11 @@ static DEFINE_RWLOCK(cpufreq_driver_lock);
 DEFINE_MUTEX(cpufreq_governor_lock);
 static LIST_HEAD(cpufreq_policy_list);
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* This one keeps track of the previously set governor of a removed CPU */
 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
-#endif
+
+/* Flag to suspend/resume CPUFreq governors */
+static bool cpufreq_suspended;
 
 static inline bool has_target(void)
 {
@@ -181,8 +182,8 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
        if (!policy || IS_ERR(policy->clk)) {
-               pr_err("%s: No %s associated to cpu: %d\n", __func__,
-                               policy ? "clk" : "policy", cpu);
+               pr_err("%s: No %s associated to cpu: %d\n",
+                      __func__, policy ? "clk" : "policy", cpu);
                return 0;
        }
 
@@ -190,6 +191,12 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
 
+/* Only for cpufreq core internal use */
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+{
+       return per_cpu(cpufreq_cpu_data, cpu);
+}
+
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 {
        struct cpufreq_policy *policy = NULL;
@@ -254,15 +261,14 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
        if (!l_p_j_ref_freq) {
                l_p_j_ref = loops_per_jiffy;
                l_p_j_ref_freq = ci->old;
-               pr_debug("saving %lu as reference value for loops_per_jiffy; "
-                       "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
+               pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
+                        l_p_j_ref, l_p_j_ref_freq);
        }
-       if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
-           (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+       if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
                loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
                                                                ci->new);
-               pr_debug("scaling loops_per_jiffy to %lu "
-                       "for frequency %u kHz\n", loops_per_jiffy, ci->new);
+               pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
+                        loops_per_jiffy, ci->new);
        }
 }
 #else
@@ -282,7 +288,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
 
        freqs->flags = cpufreq_driver->flags;
        pr_debug("notification %u of frequency transition to %u kHz\n",
-               state, freqs->new);
+                state, freqs->new);
 
        switch (state) {
 
@@ -294,9 +300,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
                if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
                        if ((policy) && (policy->cpu == freqs->cpu) &&
                            (policy->cur) && (policy->cur != freqs->old)) {
-                               pr_debug("Warning: CPU frequency is"
-                                       " %u, cpufreq assumed %u kHz.\n",
-                                       freqs->old, policy->cur);
+                               pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
+                                        freqs->old, policy->cur);
                                freqs->old = policy->cur;
                        }
                }
@@ -307,8 +312,8 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
 
        case CPUFREQ_POSTCHANGE:
                adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
-               pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
-                       (unsigned long)freqs->cpu);
+               pr_debug("FREQ: %lu - CPU: %lu\n",
+                        (unsigned long)freqs->new, (unsigned long)freqs->cpu);
                trace_cpu_frequency(freqs->new, freqs->cpu);
                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
                                CPUFREQ_POSTCHANGE, freqs);
@@ -352,7 +357,7 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
 /*********************************************************************
  *                          SYSFS INTERFACE                          *
  *********************************************************************/
-ssize_t show_boost(struct kobject *kobj,
+static ssize_t show_boost(struct kobject *kobj,
                                 struct attribute *attr, char *buf)
 {
        return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
@@ -368,13 +373,13 @@ static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
                return -EINVAL;
 
        if (cpufreq_boost_trigger_state(enable)) {
-               pr_err("%s: Cannot %s BOOST!\n", __func__,
-                      enable ? "enable" : "disable");
+               pr_err("%s: Cannot %s BOOST!\n",
+                      __func__, enable ? "enable" : "disable");
                return -EINVAL;
        }
 
-       pr_debug("%s: cpufreq BOOST %s\n", __func__,
-                enable ? "enabled" : "disabled");
+       pr_debug("%s: cpufreq BOOST %s\n",
+                __func__, enable ? "enabled" : "disabled");
 
        return count;
 }
@@ -879,18 +884,25 @@ err_out_kobj_put:
 
 static void cpufreq_init_policy(struct cpufreq_policy *policy)
 {
+       struct cpufreq_governor *gov = NULL;
        struct cpufreq_policy new_policy;
        int ret = 0;
 
        memcpy(&new_policy, policy, sizeof(*policy));
 
+       /* Update governor of new_policy to the governor used before hotplug */
+       gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
+       if (gov)
+               pr_debug("Restoring governor %s for cpu %d\n",
+                               policy->governor->name, policy->cpu);
+       else
+               gov = CPUFREQ_DEFAULT_GOVERNOR;
+
+       new_policy.governor = gov;
+
        /* Use the default policy if its valid. */
        if (cpufreq_driver->setpolicy)
-               cpufreq_parse_governor(policy->governor->name,
-                                       &new_policy.policy, NULL);
-
-       /* assure that the starting sequence is run in cpufreq_set_policy */
-       policy->governor = NULL;
+               cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
 
        /* set default policy */
        ret = cpufreq_set_policy(policy, &new_policy);
@@ -927,8 +939,11 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
        up_write(&policy->rwsem);
 
        if (has_target()) {
-               if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
-                       (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               if (!ret)
+                       ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+               if (ret) {
                        pr_err("%s: Failed to start governor\n", __func__);
                        return ret;
                }
@@ -949,6 +964,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 
        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+       policy->governor = NULL;
+
        return policy;
 }
 
@@ -1022,21 +1039,19 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 
        up_write(&policy->rwsem);
 
-       cpufreq_frequency_table_update_policy_cpu(policy);
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_UPDATE_POLICY_CPU, policy);
 }
 
-static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
-                            bool frozen)
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
        unsigned int j, cpu = dev->id;
        int ret = -ENOMEM;
        struct cpufreq_policy *policy;
        unsigned long flags;
+       bool recover_policy = cpufreq_suspended;
 #ifdef CONFIG_HOTPLUG_CPU
        struct cpufreq_policy *tpolicy;
-       struct cpufreq_governor *gov;
 #endif
 
        if (cpu_is_offline(cpu))
@@ -1075,9 +1090,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         * Restore the saved policy when doing light-weight init and fall back
         * to the full init if that fails.
         */
-       policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+       policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
        if (!policy) {
-               frozen = false;
+               recover_policy = false;
                policy = cpufreq_policy_alloc();
                if (!policy)
                        goto nomem_out;
@@ -1089,12 +1104,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         * the creation of a brand new one. So we need to perform this update
         * by invoking update_policy_cpu().
         */
-       if (frozen && cpu != policy->cpu)
+       if (recover_policy && cpu != policy->cpu)
                update_policy_cpu(policy, cpu);
        else
                policy->cpu = cpu;
 
-       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
        init_completion(&policy->kobj_unregister);
@@ -1118,7 +1132,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
         */
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
-       if (!frozen) {
+       if (!recover_policy) {
                policy->user_policy.min = policy->min;
                policy->user_policy.max = policy->max;
        }
@@ -1180,16 +1194,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                     CPUFREQ_START, policy);
 
-#ifdef CONFIG_HOTPLUG_CPU
-       gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
-       if (gov) {
-               policy->governor = gov;
-               pr_debug("Restoring governor %s for cpu %d\n",
-                      policy->governor->name, cpu);
-       }
-#endif
-
-       if (!frozen) {
+       if (!recover_policy) {
                ret = cpufreq_add_dev_interface(policy, dev);
                if (ret)
                        goto err_out_unregister;
@@ -1203,7 +1208,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
 
        cpufreq_init_policy(policy);
 
-       if (!frozen) {
+       if (!recover_policy) {
                policy->user_policy.policy = policy->policy;
                policy->user_policy.governor = policy->governor;
        }
@@ -1226,7 +1231,7 @@ err_get_freq:
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 err_set_policy_cpu:
-       if (frozen) {
+       if (recover_policy) {
                /* Do not leave stale fallback data behind. */
                per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
                cpufreq_policy_put_kobj(policy);
@@ -1250,7 +1255,7 @@ nomem_out:
  */
 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 {
-       return __cpufreq_add_dev(dev, sif, false);
+       return __cpufreq_add_dev(dev, sif);
 }
 
 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
@@ -1265,7 +1270,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
        ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
        if (ret) {
-               pr_err("%s: Failed to move kobj: %d", __func__, ret);
+               pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
 
                down_write(&policy->rwsem);
                cpumask_set_cpu(old_cpu, policy->cpus);
@@ -1281,8 +1286,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
 }
 
 static int __cpufreq_remove_dev_prepare(struct device *dev,
-                                       struct subsys_interface *sif,
-                                       bool frozen)
+                                       struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id, cpus;
        int new_cpu, ret;
@@ -1296,7 +1300,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
        policy = per_cpu(cpufreq_cpu_data, cpu);
 
        /* Save the policy somewhere when doing a light-weight tear-down */
-       if (frozen)
+       if (cpufreq_suspended)
                per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
 
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -1314,11 +1318,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                }
        }
 
-#ifdef CONFIG_HOTPLUG_CPU
        if (!cpufreq_driver->setpolicy)
                strncpy(per_cpu(cpufreq_cpu_governor, cpu),
                        policy->governor->name, CPUFREQ_NAME_LEN);
-#endif
 
        down_read(&policy->rwsem);
        cpus = cpumask_weight(policy->cpus);
@@ -1331,19 +1333,19 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                if (new_cpu >= 0) {
                        update_policy_cpu(policy, new_cpu);
 
-                       if (!frozen) {
+                       if (!cpufreq_suspended)
                                pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
-                                               __func__, new_cpu, cpu);
-                       }
+                                        __func__, new_cpu, cpu);
                }
+       } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
+               cpufreq_driver->stop_cpu(policy);
        }
 
        return 0;
 }
 
 static int __cpufreq_remove_dev_finish(struct device *dev,
-                                      struct subsys_interface *sif,
-                                      bool frozen)
+                                      struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id, cpus;
        int ret;
@@ -1373,12 +1375,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                                        CPUFREQ_GOV_POLICY_EXIT);
                        if (ret) {
                                pr_err("%s: Failed to exit governor\n",
-                                               __func__);
+                                      __func__);
                                return ret;
                        }
                }
 
-               if (!frozen)
+               if (!cpufreq_suspended)
                        cpufreq_policy_put_kobj(policy);
 
                /*
@@ -1394,16 +1396,16 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                list_del(&policy->policy_list);
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-               if (!frozen)
+               if (!cpufreq_suspended)
                        cpufreq_policy_free(policy);
-       } else {
-               if (has_target()) {
-                       if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
-                                       (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
-                               pr_err("%s: Failed to start governor\n",
-                                               __func__);
-                               return ret;
-                       }
+       } else if (has_target()) {
+               ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+               if (!ret)
+                       ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+
+               if (ret) {
+                       pr_err("%s: Failed to start governor\n", __func__);
+                       return ret;
                }
        }
 
@@ -1424,10 +1426,10 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
        if (cpu_is_offline(cpu))
                return 0;
 
-       ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+       ret = __cpufreq_remove_dev_prepare(dev, sif);
 
        if (!ret)
-               ret = __cpufreq_remove_dev_finish(dev, sif, false);
+               ret = __cpufreq_remove_dev_finish(dev, sif);
 
        return ret;
 }
@@ -1458,8 +1460,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
        struct cpufreq_freqs freqs;
        unsigned long flags;
 
-       pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
-              "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
+       pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
+                old_freq, new_freq);
 
        freqs.old = old_freq;
        freqs.new = new_freq;
@@ -1570,83 +1572,104 @@ static struct subsys_interface cpufreq_interface = {
        .remove_dev     = cpufreq_remove_dev,
 };
 
+/*
+ * In case platform wants some specific frequency to be configured
+ * during suspend..
+ */
+int cpufreq_generic_suspend(struct cpufreq_policy *policy)
+{
+       int ret;
+
+       if (!policy->suspend_freq) {
+               pr_err("%s: suspend_freq can't be zero\n", __func__);
+               return -EINVAL;
+       }
+
+       pr_debug("%s: Setting suspend-freq: %u\n", __func__,
+                       policy->suspend_freq);
+
+       ret = __cpufreq_driver_target(policy, policy->suspend_freq,
+                       CPUFREQ_RELATION_H);
+       if (ret)
+               pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
+                               __func__, policy->suspend_freq, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL(cpufreq_generic_suspend);
+
 /**
- * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
+ * cpufreq_suspend() - Suspend CPUFreq governors
  *
- * This function is only executed for the boot processor.  The other CPUs
- * have been put offline by means of CPU hotplug.
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * as some platforms can't change frequency after this point in suspend cycle.
+ * Because some of the devices (like: i2c, regulators, etc) they use for
+ * changing frequency are suspended quickly after this point.
  */
-static int cpufreq_bp_suspend(void)
+void cpufreq_suspend(void)
 {
-       int ret = 0;
-
-       int cpu = smp_processor_id();
        struct cpufreq_policy *policy;
 
-       pr_debug("suspending cpu %u\n", cpu);
+       if (!cpufreq_driver)
+               return;
 
-       /* If there's no policy for the boot CPU, we have nothing to do. */
-       policy = cpufreq_cpu_get(cpu);
-       if (!policy)
-               return 0;
+       if (!has_target())
+               return;
 
-       if (cpufreq_driver->suspend) {
-               ret = cpufreq_driver->suspend(policy);
-               if (ret)
-                       printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
-                                       "step on CPU %u\n", policy->cpu);
+       pr_debug("%s: Suspending Governors\n", __func__);
+
+       list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+               if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
+                       pr_err("%s: Failed to stop governor for policy: %p\n",
+                               __func__, policy);
+               else if (cpufreq_driver->suspend
+                   && cpufreq_driver->suspend(policy))
+                       pr_err("%s: Failed to suspend driver: %p\n", __func__,
+                               policy);
        }
 
-       cpufreq_cpu_put(policy);
-       return ret;
+       cpufreq_suspended = true;
 }
 
 /**
- * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
+ * cpufreq_resume() - Resume CPUFreq governors
  *
- *     1.) resume CPUfreq hardware support (cpufreq_driver->resume())
- *     2.) schedule call cpufreq_update_policy() ASAP as interrupts are
- *         restored. It will verify that the current freq is in sync with
- *         what we believe it to be. This is a bit later than when it
- *         should be, but nonethteless it's better than calling
- *         cpufreq_driver->get() here which might re-enable interrupts...
- *
- * This function is only executed for the boot CPU.  The other CPUs have not
- * been turned on yet.
+ * Called during system wide Suspend/Hibernate cycle for resuming governors that
+ * are suspended with cpufreq_suspend().
  */
-static void cpufreq_bp_resume(void)
+void cpufreq_resume(void)
 {
-       int ret = 0;
-
-       int cpu = smp_processor_id();
        struct cpufreq_policy *policy;
 
-       pr_debug("resuming cpu %u\n", cpu);
+       if (!cpufreq_driver)
+               return;
 
-       /* If there's no policy for the boot CPU, we have nothing to do. */
-       policy = cpufreq_cpu_get(cpu);
-       if (!policy)
+       if (!has_target())
                return;
 
-       if (cpufreq_driver->resume) {
-               ret = cpufreq_driver->resume(policy);
-               if (ret) {
-                       printk(KERN_ERR "cpufreq: resume failed in ->resume "
-                                       "step on CPU %u\n", policy->cpu);
-                       goto fail;
-               }
-       }
+       pr_debug("%s: Resuming Governors\n", __func__);
 
-       schedule_work(&policy->update);
+       cpufreq_suspended = false;
 
-fail:
-       cpufreq_cpu_put(policy);
-}
+       list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
+               if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
+                   || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
+                       pr_err("%s: Failed to start governor for policy: %p\n",
+                               __func__, policy);
+               else if (cpufreq_driver->resume
+                   && cpufreq_driver->resume(policy))
+                       pr_err("%s: Failed to resume driver: %p\n", __func__,
+                               policy);
 
-static struct syscore_ops cpufreq_syscore_ops = {
-       .suspend        = cpufreq_bp_suspend,
-       .resume         = cpufreq_bp_resume,
-};
+               /*
+                * schedule call cpufreq_update_policy() for boot CPU, i.e. last
+                * policy in list. It will verify that the current freq is in
+                * sync with what we believe it to be.
+                */
+               if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
+                       schedule_work(&policy->update);
+       }
+}
 
 /**
  *     cpufreq_get_current_driver - return current driver's name
@@ -1762,7 +1785,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                target_freq = policy->min;
 
        pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
-                       policy->cpu, target_freq, relation, old_target_freq);
+                policy->cpu, target_freq, relation, old_target_freq);
 
        /*
         * This might look like a redundant call as we are checking it again
@@ -1807,8 +1830,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                        freqs.flags = 0;
 
                        pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
-                                       __func__, policy->cpu, freqs.old,
-                                       freqs.new);
+                                __func__, policy->cpu, freqs.old, freqs.new);
 
                        cpufreq_notify_transition(policy, &freqs,
                                        CPUFREQ_PRECHANGE);
@@ -1817,7 +1839,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
                retval = cpufreq_driver->target_index(policy, index);
                if (retval)
                        pr_err("%s: Failed to change cpu frequency: %d\n",
-                                       __func__, retval);
+                              __func__, retval);
 
                if (notify)
                        cpufreq_notify_post_transition(policy, &freqs, retval);
@@ -1863,17 +1885,18 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        struct cpufreq_governor *gov = NULL;
 #endif
 
+       /* Don't start any governor operations if we are entering suspend */
+       if (cpufreq_suspended)
+               return 0;
+
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
            policy->governor->max_transition_latency) {
                if (!gov)
                        return -EINVAL;
                else {
-                       printk(KERN_WARNING "%s governor failed, too long"
-                              " transition latency of HW, fallback"
-                              " to %s governor\n",
-                              policy->governor->name,
-                              gov->name);
+                       pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
+                               policy->governor->name, gov->name);
                        policy->governor = gov;
                }
        }
@@ -1883,7 +1906,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
                        return -EINVAL;
 
        pr_debug("__cpufreq_governor for CPU %u, event %u\n",
-                                               policy->cpu, event);
+                policy->cpu, event);
 
        mutex_lock(&cpufreq_governor_lock);
        if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
@@ -1950,9 +1973,7 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
 
 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
 {
-#ifdef CONFIG_HOTPLUG_CPU
        int cpu;
-#endif
 
        if (!governor)
                return;
@@ -1960,14 +1981,12 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
        if (cpufreq_disabled())
                return;
 
-#ifdef CONFIG_HOTPLUG_CPU
        for_each_present_cpu(cpu) {
                if (cpu_online(cpu))
                        continue;
                if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
                        strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
        }
-#endif
 
        mutex_lock(&cpufreq_governor_mutex);
        list_del(&governor->governor_list);
@@ -2012,22 +2031,21 @@ EXPORT_SYMBOL(cpufreq_get_policy);
 static int cpufreq_set_policy(struct cpufreq_policy *policy,
                                struct cpufreq_policy *new_policy)
 {
-       int ret = 0, failed = 1;
+       struct cpufreq_governor *old_gov;
+       int ret;
 
-       pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
-               new_policy->min, new_policy->max);
+       pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
+                new_policy->cpu, new_policy->min, new_policy->max);
 
        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
 
-       if (new_policy->min > policy->max || new_policy->max < policy->min) {
-               ret = -EINVAL;
-               goto error_out;
-       }
+       if (new_policy->min > policy->max || new_policy->max < policy->min)
+               return -EINVAL;
 
        /* verify the cpu speed can be set within this limit */
        ret = cpufreq_driver->verify(new_policy);
        if (ret)
-               goto error_out;
+               return ret;
 
        /* adjust if necessary - all reasons */
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2043,7 +2061,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
         */
        ret = cpufreq_driver->verify(new_policy);
        if (ret)
-               goto error_out;
+               return ret;
 
        /* notification of the new policy */
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -2053,63 +2071,53 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
        policy->max = new_policy->max;
 
        pr_debug("new min and max freqs are %u - %u kHz\n",
-                                       policy->min, policy->max);
+                policy->min, policy->max);
 
        if (cpufreq_driver->setpolicy) {
                policy->policy = new_policy->policy;
                pr_debug("setting range\n");
-               ret = cpufreq_driver->setpolicy(new_policy);
-       } else {
-               if (new_policy->governor != policy->governor) {
-                       /* save old, working values */
-                       struct cpufreq_governor *old_gov = policy->governor;
-
-                       pr_debug("governor switch\n");
-
-                       /* end old governor */
-                       if (policy->governor) {
-                               __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-                               up_write(&policy->rwsem);
-                               __cpufreq_governor(policy,
-                                               CPUFREQ_GOV_POLICY_EXIT);
-                               down_write(&policy->rwsem);
-                       }
+               return cpufreq_driver->setpolicy(new_policy);
+       }
 
-                       /* start new governor */
-                       policy->governor = new_policy->governor;
-                       if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
-                               if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
-                                       failed = 0;
-                               } else {
-                                       up_write(&policy->rwsem);
-                                       __cpufreq_governor(policy,
-                                                       CPUFREQ_GOV_POLICY_EXIT);
-                                       down_write(&policy->rwsem);
-                               }
-                       }
+       if (new_policy->governor == policy->governor)
+               goto out;
 
-                       if (failed) {
-                               /* new governor failed, so re-start old one */
-                               pr_debug("starting governor %s failed\n",
-                                                       policy->governor->name);
-                               if (old_gov) {
-                                       policy->governor = old_gov;
-                                       __cpufreq_governor(policy,
-                                                       CPUFREQ_GOV_POLICY_INIT);
-                                       __cpufreq_governor(policy,
-                                                          CPUFREQ_GOV_START);
-                               }
-                               ret = -EINVAL;
-                               goto error_out;
-                       }
-                       /* might be a policy change, too, so fall through */
-               }
-               pr_debug("governor: change or update limits\n");
-               ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+       pr_debug("governor switch\n");
+
+       /* save old, working values */
+       old_gov = policy->governor;
+       /* end old governor */
+       if (old_gov) {
+               __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+               up_write(&policy->rwsem);
+               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               down_write(&policy->rwsem);
        }
 
-error_out:
-       return ret;
+       /* start new governor */
+       policy->governor = new_policy->governor;
+       if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+               if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+                       goto out;
+
+               up_write(&policy->rwsem);
+               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+               down_write(&policy->rwsem);
+       }
+
+       /* new governor failed, so re-start old one */
+       pr_debug("starting governor %s failed\n", policy->governor->name);
+       if (old_gov) {
+               policy->governor = old_gov;
+               __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+               __cpufreq_governor(policy, CPUFREQ_GOV_START);
+       }
+
+       return -EINVAL;
+
+ out:
+       pr_debug("governor: change or update limits\n");
+       return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 }
 
 /**
@@ -2145,8 +2153,13 @@ int cpufreq_update_policy(unsigned int cpu)
         */
        if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
                new_policy.cur = cpufreq_driver->get(cpu);
+               if (WARN_ON(!new_policy.cur)) {
+                       ret = -EIO;
+                       goto no_policy;
+               }
+
                if (!policy->cur) {
-                       pr_debug("Driver did not initialize current freq");
+                       pr_debug("Driver did not initialize current freq\n");
                        policy->cur = new_policy.cur;
                } else {
                        if (policy->cur != new_policy.cur && has_target())
@@ -2170,30 +2183,24 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
 {
        unsigned int cpu = (unsigned long)hcpu;
        struct device *dev;
-       bool frozen = false;
 
        dev = get_cpu_device(cpu);
        if (dev) {
-
-               if (action & CPU_TASKS_FROZEN)
-                       frozen = true;
-
                switch (action & ~CPU_TASKS_FROZEN) {
                case CPU_ONLINE:
-                       __cpufreq_add_dev(dev, NULL, frozen);
-                       cpufreq_update_policy(cpu);
+                       __cpufreq_add_dev(dev, NULL);
                        break;
 
                case CPU_DOWN_PREPARE:
-                       __cpufreq_remove_dev_prepare(dev, NULL, frozen);
+                       __cpufreq_remove_dev_prepare(dev, NULL);
                        break;
 
                case CPU_POST_DEAD:
-                       __cpufreq_remove_dev_finish(dev, NULL, frozen);
+                       __cpufreq_remove_dev_finish(dev, NULL);
                        break;
 
                case CPU_DOWN_FAILED:
-                       __cpufreq_add_dev(dev, NULL, frozen);
+                       __cpufreq_add_dev(dev, NULL);
                        break;
                }
        }
@@ -2249,8 +2256,8 @@ int cpufreq_boost_trigger_state(int state)
                cpufreq_driver->boost_enabled = !state;
                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-               pr_err("%s: Cannot %s BOOST\n", __func__,
-                      state ? "enable" : "disable");
+               pr_err("%s: Cannot %s BOOST\n",
+                      __func__, state ? "enable" : "disable");
        }
 
        return ret;
@@ -2295,7 +2302,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
 
        if (!driver_data || !driver_data->verify || !driver_data->init ||
            !(driver_data->setpolicy || driver_data->target_index ||
-                   driver_data->target))
+                   driver_data->target) ||
+            (driver_data->setpolicy && (driver_data->target_index ||
+                   driver_data->target)))
                return -EINVAL;
 
        pr_debug("trying to register driver %s\n", driver_data->name);
@@ -2322,7 +2331,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                ret = cpufreq_sysfs_create_file(&boost.attr);
                if (ret) {
                        pr_err("%s: cannot register global BOOST sysfs file\n",
-                               __func__);
+                              __func__);
                        goto err_null_driver;
                }
        }
@@ -2345,7 +2354,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                /* if all ->init() calls failed, unregister */
                if (ret) {
                        pr_debug("no CPU initialized for driver %s\n",
-                                                       driver_data->name);
+                                driver_data->name);
                        goto err_if_unreg;
                }
        }
@@ -2409,7 +2418,6 @@ static int __init cpufreq_core_init(void)
 
        cpufreq_global_kobject = kobject_create();
        BUG_ON(!cpufreq_global_kobject);
-       register_syscore_ops(&cpufreq_syscore_ops);
 
        return 0;
 }
index 5793e1447fb177f476f5f38ee117857444ba98c0..ecaaebf969fc4e5427464dfce53d0bb321e3c39b 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/cpufreq.h>
 #include <linux/module.h>
 #include <linux/slab.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
 
 static spinlock_t cpufreq_stats_lock;
 
@@ -180,27 +180,25 @@ static void cpufreq_stats_free_table(unsigned int cpu)
        cpufreq_cpu_put(policy);
 }
 
-static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
-               struct cpufreq_frequency_table *table)
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
 {
        unsigned int i, j, count = 0, ret = 0;
        struct cpufreq_stats *stat;
-       struct cpufreq_policy *current_policy;
        unsigned int alloc_size;
        unsigned int cpu = policy->cpu;
+       struct cpufreq_frequency_table *table;
+
+       table = cpufreq_frequency_get_table(cpu);
+       if (unlikely(!table))
+               return 0;
+
        if (per_cpu(cpufreq_stats_table, cpu))
                return -EBUSY;
        stat = kzalloc(sizeof(*stat), GFP_KERNEL);
        if ((stat) == NULL)
                return -ENOMEM;
 
-       current_policy = cpufreq_cpu_get(cpu);
-       if (current_policy == NULL) {
-               ret = -EINVAL;
-               goto error_get_fail;
-       }
-
-       ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
+       ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
        if (ret)
                goto error_out;
 
@@ -223,7 +221,7 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
        stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
        if (!stat->time_in_state) {
                ret = -ENOMEM;
-               goto error_out;
+               goto error_alloc;
        }
        stat->freq_table = (unsigned int *)(stat->time_in_state + count);
 
@@ -243,11 +241,10 @@ static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
        stat->last_time = get_jiffies_64();
        stat->last_index = freq_table_get_index(stat, policy->cur);
        spin_unlock(&cpufreq_stats_lock);
-       cpufreq_cpu_put(current_policy);
        return 0;
+error_alloc:
+       sysfs_remove_group(&policy->kobj, &stats_attr_group);
 error_out:
-       cpufreq_cpu_put(current_policy);
-error_get_fail:
        kfree(stat);
        per_cpu(cpufreq_stats_table, cpu) = NULL;
        return ret;
@@ -256,7 +253,6 @@ error_get_fail:
 static void cpufreq_stats_create_table(unsigned int cpu)
 {
        struct cpufreq_policy *policy;
-       struct cpufreq_frequency_table *table;
 
        /*
         * "likely(!policy)" because normally cpufreq_stats will be registered
@@ -266,9 +262,7 @@ static void cpufreq_stats_create_table(unsigned int cpu)
        if (likely(!policy))
                return;
 
-       table = cpufreq_frequency_get_table(policy->cpu);
-       if (likely(table))
-               __cpufreq_stats_create_table(policy, table);
+       __cpufreq_stats_create_table(policy);
 
        cpufreq_cpu_put(policy);
 }
@@ -291,20 +285,14 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
 {
        int ret = 0;
        struct cpufreq_policy *policy = data;
-       struct cpufreq_frequency_table *table;
-       unsigned int cpu = policy->cpu;
 
        if (val == CPUFREQ_UPDATE_POLICY_CPU) {
                cpufreq_stats_update_policy_cpu(policy);
                return 0;
        }
 
-       table = cpufreq_frequency_get_table(cpu);
-       if (!table)
-               return 0;
-
        if (val == CPUFREQ_CREATE_POLICY)
-               ret = __cpufreq_stats_create_table(policy, table);
+               ret = __cpufreq_stats_create_table(policy);
        else if (val == CPUFREQ_REMOVE_POLICY)
                __cpufreq_stats_free_table(policy);
 
index 86559040c54c83daad0778160cccdb7b19f43c22..d4573032cbbc6e5cdf522951a1ee4e3d86c2c47e 100644 (file)
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = cris_freq_target,
        .init   = cris_freq_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "cris_freq",
        .attr   = cpufreq_generic_attr,
 };
index 26d940d40b1dcb1e19208a17176afe88e5632f35..13c3361437f7be274fec5b49be8f8aba1c43dd55 100644 (file)
@@ -57,7 +57,6 @@ static struct cpufreq_driver cris_freq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = cris_freq_target,
        .init = cris_freq_cpu_init,
-       .exit = cpufreq_generic_exit,
        .name = "cris_freq",
        .attr = cpufreq_generic_attr,
 };
index 2cf33848d86e44d25bba1ca9b440d752337f8227..28a16dc6e02ec8786e529ad176185846efcaf735 100644 (file)
@@ -125,7 +125,6 @@ static struct cpufreq_driver davinci_driver = {
        .target_index   = davinci_target,
        .get            = cpufreq_generic_get,
        .init           = davinci_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "davinci",
        .attr           = cpufreq_generic_attr,
 };
index 9012b8bb6b649abc18e6e1a623df56de69249a38..a0d2a423cea97e70b6745d48b63884c03e57d1de 100644 (file)
@@ -382,7 +382,6 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
        unsigned int cpu = policy->cpu;
 
        /* Bye */
-       cpufreq_frequency_table_put_attr(policy->cpu);
        kfree(eps_cpu[cpu]);
        eps_cpu[cpu] = NULL;
        return 0;
index de08acff5101dd64b93a913905a8961bfd08c69f..c987e94708f5015b7c48ce62795d0f93c0ed8c49 100644 (file)
@@ -198,7 +198,6 @@ static struct cpufreq_driver elanfreq_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = elanfreq_target,
        .init           = elanfreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "elanfreq",
        .attr           = cpufreq_generic_attr,
 };
index fcd2914d081a8852620d3f420b433393fd66ec5e..f99cfe24e7bca6489dcbf592372a38d1d019f4f1 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/slab.h>
 #include <linux/regulator/consumer.h>
 #include <linux/cpufreq.h>
-#include <linux/suspend.h>
 #include <linux/platform_device.h>
 
 #include <plat/cpu.h>
 #include "exynos-cpufreq.h"
 
 static struct exynos_dvfs_info *exynos_info;
-
 static struct regulator *arm_regulator;
-
 static unsigned int locking_frequency;
-static bool frequency_locked;
-static DEFINE_MUTEX(cpufreq_lock);
 
 static int exynos_cpufreq_get_index(unsigned int freq)
 {
@@ -134,83 +129,13 @@ out:
 
 static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
 {
-       struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
-       int ret = 0;
-
-       mutex_lock(&cpufreq_lock);
-
-       if (frequency_locked)
-               goto out;
-
-       ret = exynos_cpufreq_scale(freq_table[index].frequency);
-
-out:
-       mutex_unlock(&cpufreq_lock);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM
-static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-
-static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-#endif
-
-/**
- * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
- *                     context
- * @notifier
- * @pm_event
- * @v
- *
- * While frequency_locked == true, target() ignores every frequency but
- * locking_frequency. The locking_frequency value is the initial frequency,
- * which is set by the bootloader. In order to eliminate possible
- * inconsistency in clock values, we save and restore frequencies during
- * suspend and resume and block CPUFREQ activities. Note that the standard
- * suspend/resume cannot be used as they are too deep (syscore_ops) for
- * regulator actions.
- */
-static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
-                                      unsigned long pm_event, void *v)
-{
-       int ret;
-
-       switch (pm_event) {
-       case PM_SUSPEND_PREPARE:
-               mutex_lock(&cpufreq_lock);
-               frequency_locked = true;
-               mutex_unlock(&cpufreq_lock);
-
-               ret = exynos_cpufreq_scale(locking_frequency);
-               if (ret < 0)
-                       return NOTIFY_BAD;
-
-               break;
-
-       case PM_POST_SUSPEND:
-               mutex_lock(&cpufreq_lock);
-               frequency_locked = false;
-               mutex_unlock(&cpufreq_lock);
-               break;
-       }
-
-       return NOTIFY_OK;
+       return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
 }
 
-static struct notifier_block exynos_cpufreq_nb = {
-       .notifier_call = exynos_cpufreq_pm_notifier,
-};
-
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        policy->clk = exynos_info->cpu_clk;
+       policy->suspend_freq = locking_frequency;
        return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
 }
 
@@ -220,15 +145,13 @@ static struct cpufreq_driver exynos_driver = {
        .target_index   = exynos_target,
        .get            = cpufreq_generic_get,
        .init           = exynos_cpufreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "exynos_cpufreq",
        .attr           = cpufreq_generic_attr,
 #ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
        .boost_supported = true,
 #endif
 #ifdef CONFIG_PM
-       .suspend        = exynos_cpufreq_suspend,
-       .resume         = exynos_cpufreq_resume,
+       .suspend        = cpufreq_generic_suspend,
 #endif
 };
 
@@ -263,19 +186,13 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                goto err_vdd_arm;
        }
 
+       /* Done here as we want to capture boot frequency */
        locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
 
-       register_pm_notifier(&exynos_cpufreq_nb);
-
-       if (cpufreq_register_driver(&exynos_driver)) {
-               pr_err("%s: failed to register cpufreq driver\n", __func__);
-               goto err_cpufreq;
-       }
-
-       return 0;
-err_cpufreq:
-       unregister_pm_notifier(&exynos_cpufreq_nb);
+       if (!cpufreq_register_driver(&exynos_driver))
+               return 0;
 
+       pr_err("%s: failed to register cpufreq driver\n", __func__);
        regulator_put(arm_regulator);
 err_vdd_arm:
        kfree(exynos_info);
index 49b756015316948fd783a0450441e30e285c1d8c..7f776aa91e2ff431f972bbb3995fcee82b941c66 100644 (file)
@@ -312,7 +312,6 @@ static struct cpufreq_driver exynos_driver = {
        .target_index   = exynos_target,
        .get            = cpufreq_generic_get,
        .init           = exynos_cpufreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = CPUFREQ_NAME,
        .attr           = cpufreq_generic_attr,
 };
index 8e54f97899ba6feb7225275b7ddf2473ff8dc35b..65a477075b3f2d8c424bec5e39682550140051c0 100644 (file)
@@ -91,8 +91,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
 
 /*
- * Generic routine to verify policy & frequency table, requires driver to call
- * cpufreq_frequency_table_get_attr() prior to it.
+ * Generic routine to verify policy & frequency table, requires driver to set
+ * policy->freq_table prior to it.
  */
 int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
 {
@@ -203,8 +203,6 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);
 
-static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
-
 /**
  * show_available_freqs - show available frequencies for the specified CPU
  */
@@ -212,15 +210,12 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
                                    bool show_boost)
 {
        unsigned int i = 0;
-       unsigned int cpu = policy->cpu;
        ssize_t count = 0;
-       struct cpufreq_frequency_table *table;
+       struct cpufreq_frequency_table *table = policy->freq_table;
 
-       if (!per_cpu(cpufreq_show_table, cpu))
+       if (!table)
                return -ENODEV;
 
-       table = per_cpu(cpufreq_show_table, cpu);
-
        for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
                if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
                        continue;
@@ -283,49 +278,24 @@ struct freq_attr *cpufreq_generic_attr[] = {
 };
 EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
 
-/*
- * if you use these, you must assure that the frequency table is valid
- * all the time between get_attr and put_attr!
- */
-void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
-                                     unsigned int cpu)
-{
-       pr_debug("setting show_table for cpu %u to %p\n", cpu, table);
-       per_cpu(cpufreq_show_table, cpu) = table;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
-
-void cpufreq_frequency_table_put_attr(unsigned int cpu)
-{
-       pr_debug("clearing show_table for cpu %u\n", cpu);
-       per_cpu(cpufreq_show_table, cpu) = NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
-
 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
                                      struct cpufreq_frequency_table *table)
 {
        int ret = cpufreq_frequency_table_cpuinfo(policy, table);
 
        if (!ret)
-               cpufreq_frequency_table_get_attr(table, policy->cpu);
+               policy->freq_table = table;
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
 
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
-{
-       pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
-                       policy->cpu, policy->last_cpu);
-       per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
-                       policy->last_cpu);
-       per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
-}
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
 
 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
 {
-       return per_cpu(cpufreq_show_table, cpu);
+       struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
+       return policy ? policy->freq_table : NULL;
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
 
index 53c6ac637e10c437e12570d77c39e496b273276a..a22b5d182e0eb1767f96fd65063d1eebc935ec7c 100644 (file)
@@ -332,7 +332,6 @@ acpi_cpufreq_cpu_exit (
        pr_debug("acpi_cpufreq_cpu_exit\n");
 
        if (data) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
                acpi_io_data[policy->cpu] = NULL;
                acpi_processor_unregister_performance(&data->acpi_data,
                                                      policy->cpu);
index ce69059be1fc95318284a9aecf0c21916244e1c9..e27fca86fe4f8e22547fa3f90d3bde5cec81fca6 100644 (file)
@@ -144,7 +144,6 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
        .target_index = imx6q_set_target,
        .get = cpufreq_generic_get,
        .init = imx6q_cpufreq_init,
-       .exit = cpufreq_generic_exit,
        .name = "imx6q-cpufreq",
        .attr = cpufreq_generic_attr,
 };
index 2cd36b9297f3de01a4b5f2ed246d738e123d7991..bcb9a6d0ae115fbb50ae369296f142a981fa0ceb 100644 (file)
@@ -99,8 +99,7 @@ struct cpudata {
        u64     prev_aperf;
        u64     prev_mperf;
        unsigned long long prev_tsc;
-       int     sample_ptr;
-       struct sample samples[SAMPLE_COUNT];
+       struct sample sample;
 };
 
 static struct cpudata **all_cpu_data;
@@ -154,7 +153,7 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
        pid->setpoint = setpoint;
        pid->deadband  = deadband;
        pid->integral  = int_tofp(integral);
-       pid->last_err  = setpoint - busy;
+       pid->last_err  = int_tofp(setpoint) - int_tofp(busy);
 }
 
 static inline void pid_p_gain_set(struct _pid *pid, int percent)
@@ -447,7 +446,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
        if (limits.no_turbo)
                val |= (u64)1 << 32;
 
-       wrmsrl(MSR_IA32_PERF_CTL, val);
+       wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
 }
 
 static struct cpu_defaults core_params = {
@@ -586,15 +585,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
        mperf = mperf >> FRAC_BITS;
        tsc = tsc >> FRAC_BITS;
 
-       cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
-       cpu->samples[cpu->sample_ptr].aperf = aperf;
-       cpu->samples[cpu->sample_ptr].mperf = mperf;
-       cpu->samples[cpu->sample_ptr].tsc = tsc;
-       cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
-       cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
-       cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
+       cpu->sample.aperf = aperf;
+       cpu->sample.mperf = mperf;
+       cpu->sample.tsc = tsc;
+       cpu->sample.aperf -= cpu->prev_aperf;
+       cpu->sample.mperf -= cpu->prev_mperf;
+       cpu->sample.tsc -= cpu->prev_tsc;
 
-       intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
+       intel_pstate_calc_busy(cpu, &cpu->sample);
 
        cpu->prev_aperf = aperf;
        cpu->prev_mperf = mperf;
@@ -614,7 +612,7 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
        int32_t core_busy, max_pstate, current_pstate;
 
-       core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
+       core_busy = cpu->sample.core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
        core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
@@ -648,7 +646,7 @@ static void intel_pstate_timer_func(unsigned long __data)
 
        intel_pstate_sample(cpu);
 
-       sample = &cpu->samples[cpu->sample_ptr];
+       sample = &cpu->sample;
 
        intel_pstate_adjust_busy_pstate(cpu);
 
@@ -729,7 +727,7 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
        cpu = all_cpu_data[cpu_num];
        if (!cpu)
                return 0;
-       sample = &cpu->samples[cpu->sample_ptr];
+       sample = &cpu->sample;
        return sample->freq;
 }
 
@@ -773,14 +771,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
        return 0;
 }
 
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
 {
-       int cpu = policy->cpu;
+       int cpu_num = policy->cpu;
+       struct cpudata *cpu = all_cpu_data[cpu_num];
 
-       del_timer(&all_cpu_data[cpu]->timer);
-       kfree(all_cpu_data[cpu]);
-       all_cpu_data[cpu] = NULL;
-       return 0;
+       pr_info("intel_pstate CPU %d exiting\n", cpu_num);
+
+       del_timer(&all_cpu_data[cpu_num]->timer);
+       intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+       kfree(all_cpu_data[cpu_num]);
+       all_cpu_data[cpu_num] = NULL;
 }
 
 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -818,7 +819,7 @@ static struct cpufreq_driver intel_pstate_driver = {
        .setpolicy      = intel_pstate_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
-       .exit           = intel_pstate_cpu_exit,
+       .stop_cpu       = intel_pstate_stop_cpu,
        .name           = "intel_pstate",
 };
 
index eb7abe345b50dd147351d8f9c22d6be892e5ad0a..3d114bc5a97ad56999b03d2f03d9c701a8c72f6b 100644 (file)
@@ -102,7 +102,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = kirkwood_cpufreq_target,
        .init   = kirkwood_cpufreq_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "kirkwood-cpufreq",
        .attr   = cpufreq_generic_attr,
 };
index 45bafddfd8ea488ba24362b44a8aa58bf58304eb..7b94da3d2d1012e3075f73eb5dcb49523841a985 100644 (file)
@@ -913,7 +913,6 @@ static struct cpufreq_driver longhaul_driver = {
        .target_index = longhaul_target,
        .get    = longhaul_get,
        .init   = longhaul_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "longhaul",
        .attr   = cpufreq_generic_attr,
 };
index b6581abc92078e44f40135045b38fddff3556584..a3588d61d933f62f313767b7034ca749e5dce163 100644 (file)
@@ -104,7 +104,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
 static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
        clk_put(policy->clk);
        return 0;
 }
index 590f5b66d18171e1034567c900164d06f7809ce1..5f69c9aa703cc3a58db0d254c1a3c77a816649f3 100644 (file)
@@ -143,7 +143,6 @@ fail:
 
 static int omap_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
        freq_table_free();
        clk_put(policy->clk);
        return 0;
index 3d1cba9fd5f93525d7e5117fe90e537fa019fae7..74f593e70e191ffdd8c297377cf347a833a9dbce 100644 (file)
@@ -237,7 +237,6 @@ static struct cpufreq_driver p4clockmod_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = cpufreq_p4_target,
        .init           = cpufreq_p4_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .get            = cpufreq_p4_get,
        .name           = "p4-clockmod",
        .attr           = cpufreq_generic_attr,
index 0426008380d863d0d428526450efe9533c402a52..6a2b7d3e85a7bf408c926037a0949e4d0cf0143c 100644 (file)
@@ -234,7 +234,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        if (sdcpwr_mapbase)
                iounmap(sdcpwr_mapbase);
 
-       cpufreq_frequency_table_put_attr(policy->cpu);
        return 0;
 }
 
index b9a444e358b5cfeb25da13739b374d02124cf011..ce27e6c26c94de3de132407b168acb1c06ac7782 100644 (file)
@@ -231,7 +231,6 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
                if (i == max_multiplier)
                        powernow_k6_target(policy, i);
        }
-       cpufreq_frequency_table_put_attr(policy->cpu);
        return 0;
 }
 
index 946708a1d7452d87adc932aac1e1edf7ce1fd29d..0e68e027562135fd34214d440abeaab4dc2405ed 100644 (file)
@@ -664,8 +664,6 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
 
 static int powernow_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
-
 #ifdef CONFIG_X86_POWERNOW_K7_ACPI
        if (acpi_processor_perf) {
                acpi_processor_unregister_performance(acpi_processor_perf, 0);
index 6684e0342792517577fde32d56542b3ad84d15af..27eb2be44de56cbae978069c5251fd74c2b76afb 100644 (file)
@@ -1164,8 +1164,6 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
 
        powernow_k8_cpu_exit_acpi(data);
 
-       cpufreq_frequency_table_put_attr(pol->cpu);
-
        kfree(data->powernow_table);
        kfree(data);
        for_each_cpu(cpu, pol->cpus)
index 051000f44ca2c788a839326ce4e99b81d90f82fb..3bd9123e702667e29f33d62ffae237a5e44ffdcd 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <sysdev/fsl_soc.h>
 
 /**
  * struct cpu_data - per CPU data struct
@@ -205,7 +206,8 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        for_each_cpu(i, per_cpu(cpu_mask, cpu))
                per_cpu(cpu_data, i) = data;
 
-       policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+       policy->cpuinfo.transition_latency =
+                               (12 * NSEC_PER_SEC) / fsl_get_sys_freq();
        of_node_put(np);
 
        return 0;
@@ -228,7 +230,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        struct cpu_data *data = per_cpu(cpu_data, policy->cpu);
        unsigned int cpu;
 
-       cpufreq_frequency_table_put_attr(policy->cpu);
        of_node_put(data->parent);
        kfree(data->table);
        kfree(data);
index e42ca9c31ceaf03852206c92419b25e81dc45883..af7b1cabd1e76f643ae652de1b05e4b57107429c 100644 (file)
@@ -141,7 +141,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = cbe_cpufreq_target,
        .init           = cbe_cpufreq_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .name           = "cbe-cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
 };
index a9195a86b069806e0c54e694a1c15e76b6bb56e2..e24269ab4e9bd91208b9971f2378b4a69a92648d 100644 (file)
@@ -427,7 +427,6 @@ static struct cpufreq_driver pxa_cpufreq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = pxa_set_target,
        .init   = pxa_cpufreq_init,
-       .exit   = cpufreq_generic_exit,
        .get    = pxa_cpufreq_get,
        .name   = "PXA2xx",
 };
index 3785687e9d70f2d04dc16769e7188a1b2e8e6326..a012759003897e2340baf74b1d1aa3519f99f278 100644 (file)
@@ -205,7 +205,6 @@ static struct cpufreq_driver pxa3xx_cpufreq_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = pxa3xx_cpufreq_set,
        .init           = pxa3xx_cpufreq_init,
-       .exit           = cpufreq_generic_exit,
        .get            = pxa3xx_cpufreq_get,
        .name           = "pxa3xx-cpufreq",
 };
index 55a8e9fa9435f3226982de7e0f3133b2146b728e..72421534fff57753a5fec3b9660c4842578acd69 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/cpufreq.h>
 #include <linux/reboot.h>
 #include <linux/regulator/consumer.h>
-#include <linux/suspend.h>
 
 #include <mach/map.h>
 #include <mach/regs-clock.h>
@@ -435,18 +434,6 @@ exit:
        return ret;
 }
 
-#ifdef CONFIG_PM
-static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-
-static int s5pv210_cpufreq_resume(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-#endif
-
 static int check_mem_type(void __iomem *dmc_reg)
 {
        unsigned long val;
@@ -502,6 +489,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
        s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
        s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
 
+       policy->suspend_freq = SLEEP_FREQ;
        return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
 
 out_dmc1:
@@ -511,32 +499,6 @@ out_dmc0:
        return ret;
 }
 
-static int s5pv210_cpufreq_notifier_event(struct notifier_block *this,
-                                         unsigned long event, void *ptr)
-{
-       int ret;
-
-       switch (event) {
-       case PM_SUSPEND_PREPARE:
-               ret = cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
-               if (ret < 0)
-                       return NOTIFY_BAD;
-
-               /* Disable updation of cpu frequency */
-               no_cpufreq_access = true;
-               return NOTIFY_OK;
-       case PM_POST_RESTORE:
-       case PM_POST_SUSPEND:
-               /* Enable updation of cpu frequency */
-               no_cpufreq_access = false;
-               cpufreq_driver_target(cpufreq_cpu_get(0), SLEEP_FREQ, 0);
-
-               return NOTIFY_OK;
-       }
-
-       return NOTIFY_DONE;
-}
-
 static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
                                                 unsigned long event, void *ptr)
 {
@@ -558,15 +520,11 @@ static struct cpufreq_driver s5pv210_driver = {
        .init           = s5pv210_cpu_init,
        .name           = "s5pv210",
 #ifdef CONFIG_PM
-       .suspend        = s5pv210_cpufreq_suspend,
-       .resume         = s5pv210_cpufreq_resume,
+       .suspend        = cpufreq_generic_suspend,
+       .resume         = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
 #endif
 };
 
-static struct notifier_block s5pv210_cpufreq_notifier = {
-       .notifier_call = s5pv210_cpufreq_notifier_event,
-};
-
 static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
        .notifier_call = s5pv210_cpufreq_reboot_notifier_event,
 };
@@ -586,7 +544,6 @@ static int __init s5pv210_cpufreq_init(void)
                return PTR_ERR(int_regulator);
        }
 
-       register_pm_notifier(&s5pv210_cpufreq_notifier);
        register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier);
 
        return cpufreq_register_driver(&s5pv210_driver);
index 6adb354e359cc5fef14b1844592abc905117b486..69371bf0886d11a17a86bb169dd624777f4b6048 100644 (file)
@@ -93,7 +93,6 @@ static struct cpufreq_driver sc520_freq_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = sc520_freq_target,
        .init   = sc520_freq_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .name   = "sc520_freq",
        .attr   = cpufreq_generic_attr,
 };
index 387af12503a64e43f15d8fd0fa1d6e108a741f62..696170ebd3a3efb8a92f1946215e863d58ce7e9a 100644 (file)
@@ -143,7 +143,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        unsigned int cpu = policy->cpu;
        struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
 
-       cpufreq_frequency_table_put_attr(cpu);
        clk_put(cpuclk);
 
        return 0;
index 62aa23e219d4a993da1233cdaa23e1f85ee94f21..b73feeb666f9f9ecdf64e1afc3ecb605f4bd262b 100644 (file)
@@ -301,10 +301,8 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
 
 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us2e_driver) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
+       if (cpufreq_us2e_driver)
                us2e_freq_target(policy, 0);
-       }
 
        return 0;
 }
index 724ffbd7105d3611a0cb7112ba184a912954dc90..9bb42ba50efaf90d9b773fa1767422dd3c1f7c98 100644 (file)
@@ -156,10 +156,8 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
 
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us3_driver) {
-               cpufreq_frequency_table_put_attr(policy->cpu);
+       if (cpufreq_us3_driver)
                us3_freq_target(policy, 0);
-       }
 
        return 0;
 }
index 5c86e3fa55934c686ce5bc44fea45e254977e478..4cfdcff8a3109826195979495f307c4ff1902d65 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
@@ -163,11 +164,10 @@ static struct cpufreq_driver spear_cpufreq_driver = {
        .target_index   = spear_cpufreq_target,
        .get            = cpufreq_generic_get,
        .init           = spear_cpufreq_init,
-       .exit           = cpufreq_generic_exit,
        .attr           = cpufreq_generic_attr,
 };
 
-static int spear_cpufreq_driver_init(void)
+static int spear_cpufreq_probe(struct platform_device *pdev)
 {
        struct device_node *np;
        const struct property *prop;
@@ -235,7 +235,15 @@ out_put_node:
        of_node_put(np);
        return ret;
 }
-late_initcall(spear_cpufreq_driver_init);
+
+static struct platform_driver spear_cpufreq_platdrv = {
+       .driver = {
+               .name   = "spear-cpufreq",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = spear_cpufreq_probe,
+};
+module_platform_driver(spear_cpufreq_platdrv);
 
 MODULE_AUTHOR("Deepak Sikri <deepak.sikri@st.com>");
 MODULE_DESCRIPTION("SPEAr CPUFreq driver");
index 4e1daca5ce3b900c21955dda15b62c11415f6bee..6723f0390f20dace530f77f4d94c4109af48b351 100644 (file)
@@ -406,8 +406,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
        if (!per_cpu(centrino_model, cpu))
                return -ENODEV;
 
-       cpufreq_frequency_table_put_attr(cpu);
-
        per_cpu(centrino_model, cpu) = NULL;
 
        return 0;
index 7639b2be2a90495ef68c1fca8db8363a1b0caf08..394ac159312a03ffbd359db7c9e30efda9c044eb 100644 (file)
@@ -311,7 +311,6 @@ static struct cpufreq_driver speedstep_driver = {
        .verify = cpufreq_generic_frequency_table_verify,
        .target_index = speedstep_target,
        .init   = speedstep_cpu_init,
-       .exit   = cpufreq_generic_exit,
        .get    = speedstep_get,
        .attr   = cpufreq_generic_attr,
 };
index 998c17b4220073f5fddcbe784ba8e6850f6e0d20..db5d274dc13ad4e800c0bd51547c92b6619668ed 100644 (file)
@@ -280,7 +280,6 @@ static struct cpufreq_driver speedstep_driver = {
        .verify         = cpufreq_generic_frequency_table_verify,
        .target_index   = speedstep_target,
        .init           = speedstep_cpu_init,
-       .exit           = cpufreq_generic_exit,
        .get            = speedstep_get,
        .resume         = speedstep_resume,
        .attr           = cpufreq_generic_attr,
index e652c1bd8d0f57433c348053f0cc2edb13deba00..63f00598a251e9304d27092f74b2a4b9e7c0e636 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/suspend.h>
 
 static struct cpufreq_frequency_table freq_table[] = {
        { .frequency = 216000 },
@@ -47,9 +46,6 @@ static struct clk *pll_x_clk;
 static struct clk *pll_p_clk;
 static struct clk *emc_clk;
 
-static DEFINE_MUTEX(tegra_cpu_lock);
-static bool is_suspended;
-
 static int tegra_cpu_clk_set_rate(unsigned long rate)
 {
        int ret;
@@ -112,42 +108,9 @@ static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
 
 static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
 {
-       int ret = -EBUSY;
-
-       mutex_lock(&tegra_cpu_lock);
-
-       if (!is_suspended)
-               ret = tegra_update_cpu_speed(policy,
-                               freq_table[index].frequency);
-
-       mutex_unlock(&tegra_cpu_lock);
-       return ret;
+       return tegra_update_cpu_speed(policy, freq_table[index].frequency);
 }
 
-static int tegra_pm_notify(struct notifier_block *nb, unsigned long event,
-       void *dummy)
-{
-       mutex_lock(&tegra_cpu_lock);
-       if (event == PM_SUSPEND_PREPARE) {
-               struct cpufreq_policy *policy = cpufreq_cpu_get(0);
-               is_suspended = true;
-               pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
-                       freq_table[0].frequency);
-               if (clk_get_rate(cpu_clk) / 1000 != freq_table[0].frequency)
-                       tegra_update_cpu_speed(policy, freq_table[0].frequency);
-               cpufreq_cpu_put(policy);
-       } else if (event == PM_POST_SUSPEND) {
-               is_suspended = false;
-       }
-       mutex_unlock(&tegra_cpu_lock);
-
-       return NOTIFY_OK;
-}
-
-static struct notifier_block tegra_cpu_pm_notifier = {
-       .notifier_call = tegra_pm_notify,
-};
-
 static int tegra_cpu_init(struct cpufreq_policy *policy)
 {
        int ret;
@@ -166,16 +129,13 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
                return ret;
        }
 
-       if (policy->cpu == 0)
-               register_pm_notifier(&tegra_cpu_pm_notifier);
-
        policy->clk = cpu_clk;
+       policy->suspend_freq = freq_table[0].frequency;
        return 0;
 }
 
 static int tegra_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_put_attr(policy->cpu);
        clk_disable_unprepare(cpu_clk);
        clk_disable_unprepare(emc_clk);
        return 0;
@@ -190,6 +150,9 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
        .exit           = tegra_cpu_exit,
        .name           = "tegra",
        .attr           = cpufreq_generic_attr,
+#ifdef CONFIG_PM
+       .suspend        = cpufreq_generic_suspend,
+#endif
 };
 
 static int __init tegra_cpufreq_init(void)
index 78fd174c57e86dfb4608a12f4af44d334ab1da7c..f48607cd254024f07501bd802112e90991d697b1 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
+#include <asm/runlatch.h>
 
 struct cpuidle_driver powernv_idle_driver = {
        .name             = "powernv_idle",
@@ -30,12 +31,14 @@ static int snooze_loop(struct cpuidle_device *dev,
        local_irq_enable();
        set_thread_flag(TIF_POLLING_NRFLAG);
 
+       ppc64_runlatch_off();
        while (!need_resched()) {
                HMT_low();
                HMT_very_low();
        }
 
        HMT_medium();
+       ppc64_runlatch_on();
        clear_thread_flag(TIF_POLLING_NRFLAG);
        smp_mb();
        return index;
@@ -45,7 +48,9 @@ static int nap_loop(struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
                        int index)
 {
+       ppc64_runlatch_off();
        power7_idle();
+       ppc64_runlatch_on();
        return index;
 }
 
index 7ab564aa0b1c8c141e0b64f750592dc44bbbba3f..6f7b019568850c68478abcaae6de2238a4fac934 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/reg.h>
 #include <asm/machdep.h>
 #include <asm/firmware.h>
+#include <asm/runlatch.h>
 #include <asm/plpar_wrappers.h>
 
 struct cpuidle_driver pseries_idle_driver = {
@@ -29,6 +30,7 @@ static struct cpuidle_state *cpuidle_state_table;
 
 static inline void idle_loop_prolog(unsigned long *in_purr)
 {
+       ppc64_runlatch_off();
        *in_purr = mfspr(SPRN_PURR);
        /*
         * Indicate to the HV that we are idle. Now would be
@@ -45,6 +47,10 @@ static inline void idle_loop_epilog(unsigned long in_purr)
        wait_cycles += mfspr(SPRN_PURR) - in_purr;
        get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
        get_lppaca()->idle = 0;
+
+       if (irqs_disabled())
+               local_irq_enable();
+       ppc64_runlatch_on();
 }
 
 static int snooze_loop(struct cpuidle_device *dev,
index a55e68f2cfc8bfad02a85957c9d9ddc55823c6fe..cb20fd915be8cefb87c0a35a338dfedc70374a08 100644 (file)
@@ -85,7 +85,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 
        time_end = ktime_get();
 
-       local_irq_enable();
+       if (!cpuidle_state_is_coupled(dev, drv, entered_state))
+               local_irq_enable();
 
        diff = ktime_to_us(ktime_sub(time_end, time_start));
        if (diff > INT_MAX)
@@ -140,12 +141,14 @@ int cpuidle_idle_call(void)
                return 0;
        }
 
-       trace_cpu_idle_rcuidle(next_state, dev->cpu);
-
        broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
 
-       if (broadcast)
-               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+       if (broadcast &&
+           clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
+               return -EBUSY;
+
+
+       trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
        if (cpuidle_state_is_coupled(dev, drv, next_state))
                entered_state = cpuidle_enter_state_coupled(dev, drv,
@@ -153,11 +156,11 @@ int cpuidle_idle_call(void)
        else
                entered_state = cpuidle_enter_state(dev, drv, next_state);
 
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+
        if (broadcast)
                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
 
-       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
-
        /* give the governor an opportunity to reflect on the outcome */
        if (cpuidle_curr_governor->reflect)
                cpuidle_curr_governor->reflect(dev, entered_state);
index 06dbe7c86199808aefdf3d9e6b447c0e0b697b1c..136d6a283e0a3818846eab4e78e7e9245b7d6f07 100644 (file)
@@ -209,7 +209,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
        state->exit_latency = 0;
        state->target_residency = 0;
        state->power_usage = -1;
-       state->flags = 0;
+       state->flags = CPUIDLE_FLAG_TIME_VALID;
        state->enter = poll_idle;
        state->disabled = false;
 }
index cf7f2f0e4ef54d05ee1cc3babeb0555314c894f9..71b52329335472b7a888e16b4075c6ea7a4f58c5 100644 (file)
@@ -122,9 +122,8 @@ struct menu_device {
        int             last_state_idx;
        int             needs_update;
 
-       unsigned int    expected_us;
+       unsigned int    next_timer_us;
        unsigned int    predicted_us;
-       unsigned int    exit_us;
        unsigned int    bucket;
        unsigned int    correction_factor[BUCKETS];
        unsigned int    intervals[INTERVALS];
@@ -257,7 +256,7 @@ again:
                stddev = int_sqrt(stddev);
                if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
                                                        || stddev <= 20) {
-                       if (data->expected_us > avg)
+                       if (data->next_timer_us > avg)
                                data->predicted_us = avg;
                        return;
                }
@@ -289,7 +288,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        struct menu_device *data = &__get_cpu_var(menu_devices);
        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
        int i;
-       int multiplier;
+       unsigned int interactivity_req;
        struct timespec t;
 
        if (data->needs_update) {
@@ -298,7 +297,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        }
 
        data->last_state_idx = 0;
-       data->exit_us = 0;
 
        /* Special case when user has set very strict latency requirement */
        if (unlikely(latency_req == 0))
@@ -306,13 +304,11 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 
        /* determine the expected residency time, round up */
        t = ktime_to_timespec(tick_nohz_get_sleep_length());
-       data->expected_us =
+       data->next_timer_us =
                t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
 
 
-       data->bucket = which_bucket(data->expected_us);
-
-       multiplier = performance_multiplier();
+       data->bucket = which_bucket(data->next_timer_us);
 
        /*
         * if the correction factor is 0 (eg first time init or cpu hotplug
@@ -326,17 +322,26 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * operands are 32 bits.
         * Make sure to round up for half microseconds.
         */
-       data->predicted_us = div_round64((uint64_t)data->expected_us *
+       data->predicted_us = div_round64((uint64_t)data->next_timer_us *
                                         data->correction_factor[data->bucket],
                                         RESOLUTION * DECAY);
 
        get_typical_interval(data);
 
+       /*
+        * Performance multiplier defines a minimum predicted idle
+        * duration / latency ratio. Adjust the latency limit if
+        * necessary.
+        */
+       interactivity_req = data->predicted_us / performance_multiplier();
+       if (latency_req > interactivity_req)
+               latency_req = interactivity_req;
+
        /*
         * We want to default to C1 (hlt), not to busy polling
         * unless the timer is happening really really soon.
         */
-       if (data->expected_us > 5 &&
+       if (data->next_timer_us > 5 &&
            !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
                dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
                data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
@@ -355,11 +360,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
                        continue;
                if (s->exit_latency > latency_req)
                        continue;
-               if (s->exit_latency * multiplier > data->predicted_us)
-                       continue;
 
                data->last_state_idx = i;
-               data->exit_us = s->exit_latency;
        }
 
        return data->last_state_idx;
@@ -390,36 +392,47 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
        struct menu_device *data = &__get_cpu_var(menu_devices);
        int last_idx = data->last_state_idx;
-       unsigned int last_idle_us = cpuidle_get_last_residency(dev);
        struct cpuidle_state *target = &drv->states[last_idx];
        unsigned int measured_us;
        unsigned int new_factor;
 
        /*
-        * Ugh, this idle state doesn't support residency measurements, so we
-        * are basically lost in the dark.  As a compromise, assume we slept
-        * for the whole expected time.
+        * Try to figure out how much time passed between entry to low
+        * power state and occurrence of the wakeup event.
+        *
+        * If the entered idle state didn't support residency measurements,
+        * we are basically lost in the dark how much time passed.
+        * As a compromise, assume we slept for the whole expected time.
+        *
+        * Any measured amount of time will include the exit latency.
+        * Since we are interested in when the wakeup begun, not when it
+        * was completed, we must substract the exit latency. However, if
+        * the measured amount of time is less than the exit latency,
+        * assume the state was never reached and the exit latency is 0.
         */
-       if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
-               last_idle_us = data->expected_us;
+       if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) {
+               /* Use timer value as is */
+               measured_us = data->next_timer_us;
 
+       } else {
+               /* Use measured value */
+               measured_us = cpuidle_get_last_residency(dev);
 
-       measured_us = last_idle_us;
-
-       /*
-        * We correct for the exit latency; we are assuming here that the
-        * exit latency happens after the event that we're interested in.
-        */
-       if (measured_us > data->exit_us)
-               measured_us -= data->exit_us;
+               /* Deduct exit latency */
+               if (measured_us > target->exit_latency)
+                       measured_us -= target->exit_latency;
 
+               /* Make sure our coefficients do not exceed unity */
+               if (measured_us > data->next_timer_us)
+                       measured_us = data->next_timer_us;
+       }
 
        /* Update our correction ratio */
        new_factor = data->correction_factor[data->bucket];
        new_factor -= new_factor / DECAY;
 
-       if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
-               new_factor += RESOLUTION * measured_us / data->expected_us;
+       if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
+               new_factor += RESOLUTION * measured_us / data->next_timer_us;
        else
                /*
                 * we were idle so long that we count it as a perfect
@@ -439,7 +452,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
        data->correction_factor[data->bucket] = new_factor;
 
        /* update the repeating-pattern data */
-       data->intervals[data->interval_ptr++] = last_idle_us;
+       data->intervals[data->interval_ptr++] = measured_us;
        if (data->interval_ptr >= INTERVALS)
                data->interval_ptr = 0;
 }
index a0b2f7e0eedb774fbfa3e8fc0e41b73f10cfefd0..2042ec3656bac3b4227d63df358d6454923a5ef5 100644 (file)
@@ -91,26 +91,35 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
  */
 static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
 {
-       int lev, prev_lev;
+       int lev, prev_lev, ret = 0;
        unsigned long cur_time;
 
-       lev = devfreq_get_freq_level(devfreq, freq);
-       if (lev < 0)
-               return lev;
-
        cur_time = jiffies;
-       devfreq->time_in_state[lev] +=
+
+       prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
+       if (prev_lev < 0) {
+               ret = prev_lev;
+               goto out;
+       }
+
+       devfreq->time_in_state[prev_lev] +=
                         cur_time - devfreq->last_stat_updated;
-       if (freq != devfreq->previous_freq) {
-               prev_lev = devfreq_get_freq_level(devfreq,
-                                               devfreq->previous_freq);
+
+       lev = devfreq_get_freq_level(devfreq, freq);
+       if (lev < 0) {
+               ret = lev;
+               goto out;
+       }
+
+       if (lev != prev_lev) {
                devfreq->trans_table[(prev_lev *
                                devfreq->profile->max_state) + lev]++;
                devfreq->total_trans++;
        }
-       devfreq->last_stat_updated = cur_time;
 
-       return 0;
+out:
+       devfreq->last_stat_updated = cur_time;
+       return ret;
 }
 
 /**
index 1b5e8e46226d5f3d6bebdb770d8b29de9f04fc43..7160c43c59fcc31af1fbbbf79c1b44d87a68e22f 100644 (file)
@@ -584,7 +584,7 @@ static struct platform_driver dcdbas_driver = {
        .remove         = dcdbas_remove,
 };
 
-static const struct platform_device_info dcdbas_dev_info __initdata = {
+static const struct platform_device_info dcdbas_dev_info __initconst = {
        .name           = DRIVER_NAME,
        .id             = -1,
        .dma_mask       = DMA_BIT_MASK(32),
index b6bffbfd3be7a14131c7735960a12db3f43b122d..ff50aeebf0d98828072eea4b40944ebfa7594c77 100644 (file)
@@ -16,18 +16,6 @@ struct file_info {
        u64 size;
 };
 
-
-
-
-static void efi_char16_printk(efi_system_table_t *sys_table_arg,
-                             efi_char16_t *str)
-{
-       struct efi_simple_text_output_protocol *out;
-
-       out = (struct efi_simple_text_output_protocol *)sys_table_arg->con_out;
-       efi_call_phys2(out->output_string, out, str);
-}
-
 static void efi_printk(efi_system_table_t *sys_table_arg, char *str)
 {
        char *s8;
@@ -65,20 +53,23 @@ again:
         * allocation which may be in a new descriptor region.
         */
        *map_size += sizeof(*m);
-       status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
-                               EFI_LOADER_DATA, *map_size, (void **)&m);
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               *map_size, (void **)&m);
        if (status != EFI_SUCCESS)
                goto fail;
 
-       status = efi_call_phys5(sys_table_arg->boottime->get_memory_map,
-                               map_size, m, &key, desc_size, &desc_version);
+       *desc_size = 0;
+       key = 0;
+       status = efi_call_early(get_memory_map, map_size, m,
+                               &key, desc_size, &desc_version);
        if (status == EFI_BUFFER_TOO_SMALL) {
-               efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+               efi_call_early(free_pool, m);
                goto again;
        }
 
        if (status != EFI_SUCCESS)
-               efi_call_phys1(sys_table_arg->boottime->free_pool, m);
+               efi_call_early(free_pool, m);
+
        if (key_ptr && status == EFI_SUCCESS)
                *key_ptr = key;
        if (desc_ver && status == EFI_SUCCESS)
@@ -158,7 +149,7 @@ again:
        if (!max_addr)
                status = EFI_NOT_FOUND;
        else {
-               status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+               status = efi_call_early(allocate_pages,
                                        EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
                                        nr_pages, &max_addr);
                if (status != EFI_SUCCESS) {
@@ -170,8 +161,7 @@ again:
                *addr = max_addr;
        }
 
-       efi_call_phys1(sys_table_arg->boottime->free_pool, map);
-
+       efi_call_early(free_pool, map);
 fail:
        return status;
 }
@@ -231,7 +221,7 @@ static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                if ((start + size) > end)
                        continue;
 
-               status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+               status = efi_call_early(allocate_pages,
                                        EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
                                        nr_pages, &start);
                if (status == EFI_SUCCESS) {
@@ -243,7 +233,7 @@ static efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
        if (i == map_size / desc_size)
                status = EFI_NOT_FOUND;
 
-       efi_call_phys1(sys_table_arg->boottime->free_pool, map);
+       efi_call_early(free_pool, map);
 fail:
        return status;
 }
@@ -257,7 +247,7 @@ static void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
                return;
 
        nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
-       efi_call_phys2(sys_table_arg->boottime->free_pages, addr, nr_pages);
+       efi_call_early(free_pages, addr, nr_pages);
 }
 
 
@@ -276,9 +266,7 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 {
        struct file_info *files;
        unsigned long file_addr;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
        u64 file_size_total;
-       efi_file_io_interface_t *io;
        efi_file_handle_t *fh;
        efi_status_t status;
        int nr_files;
@@ -319,10 +307,8 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
        if (!nr_files)
                return EFI_SUCCESS;
 
-       status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
-                               EFI_LOADER_DATA,
-                               nr_files * sizeof(*files),
-                               (void **)&files);
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               nr_files * sizeof(*files), (void **)&files);
        if (status != EFI_SUCCESS) {
                efi_printk(sys_table_arg, "Failed to alloc mem for file handle list\n");
                goto fail;
@@ -331,13 +317,8 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
        str = cmd_line;
        for (i = 0; i < nr_files; i++) {
                struct file_info *file;
-               efi_file_handle_t *h;
-               efi_file_info_t *info;
                efi_char16_t filename_16[256];
-               unsigned long info_sz;
-               efi_guid_t info_guid = EFI_FILE_INFO_ID;
                efi_char16_t *p;
-               u64 file_sz;
 
                str = strstr(str, option_string);
                if (!str)
@@ -368,71 +349,18 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
                /* Only open the volume once. */
                if (!i) {
-                       efi_boot_services_t *boottime;
-
-                       boottime = sys_table_arg->boottime;
-
-                       status = efi_call_phys3(boottime->handle_protocol,
-                                       image->device_handle, &fs_proto,
-                                               (void **)&io);
-                       if (status != EFI_SUCCESS) {
-                               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
-                               goto free_files;
-                       }
-
-                       status = efi_call_phys2(io->open_volume, io, &fh);
-                       if (status != EFI_SUCCESS) {
-                               efi_printk(sys_table_arg, "Failed to open volume\n");
+                       status = efi_open_volume(sys_table_arg, image,
+                                                (void **)&fh);
+                       if (status != EFI_SUCCESS)
                                goto free_files;
-                       }
                }
 
-               status = efi_call_phys5(fh->open, fh, &h, filename_16,
-                                       EFI_FILE_MODE_READ, (u64)0);
-               if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table_arg, "Failed to open file: ");
-                       efi_char16_printk(sys_table_arg, filename_16);
-                       efi_printk(sys_table_arg, "\n");
+               status = efi_file_size(sys_table_arg, fh, filename_16,
+                                      (void **)&file->handle, &file->size);
+               if (status != EFI_SUCCESS)
                        goto close_handles;
-               }
 
-               file->handle = h;
-
-               info_sz = 0;
-               status = efi_call_phys4(h->get_info, h, &info_guid,
-                                       &info_sz, NULL);
-               if (status != EFI_BUFFER_TOO_SMALL) {
-                       efi_printk(sys_table_arg, "Failed to get file info size\n");
-                       goto close_handles;
-               }
-
-grow:
-               status = efi_call_phys3(sys_table_arg->boottime->allocate_pool,
-                                       EFI_LOADER_DATA, info_sz,
-                                       (void **)&info);
-               if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table_arg, "Failed to alloc mem for file info\n");
-                       goto close_handles;
-               }
-
-               status = efi_call_phys4(h->get_info, h, &info_guid,
-                                       &info_sz, info);
-               if (status == EFI_BUFFER_TOO_SMALL) {
-                       efi_call_phys1(sys_table_arg->boottime->free_pool,
-                                      info);
-                       goto grow;
-               }
-
-               file_sz = info->file_size;
-               efi_call_phys1(sys_table_arg->boottime->free_pool, info);
-
-               if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table_arg, "Failed to get file info\n");
-                       goto close_handles;
-               }
-
-               file->size = file_sz;
-               file_size_total += file_sz;
+               file_size_total += file->size;
        }
 
        if (file_size_total) {
@@ -468,10 +396,10 @@ grow:
                                        chunksize = EFI_READ_CHUNK_SIZE;
                                else
                                        chunksize = size;
-                               status = efi_call_phys3(fh->read,
-                                                       files[j].handle,
-                                                       &chunksize,
-                                                       (void *)addr);
+
+                               status = efi_file_read(fh, files[j].handle,
+                                                      &chunksize,
+                                                      (void *)addr);
                                if (status != EFI_SUCCESS) {
                                        efi_printk(sys_table_arg, "Failed to read file\n");
                                        goto free_file_total;
@@ -480,12 +408,12 @@ grow:
                                size -= chunksize;
                        }
 
-                       efi_call_phys1(fh->close, files[j].handle);
+                       efi_file_close(fh, files[j].handle);
                }
 
        }
 
-       efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+       efi_call_early(free_pool, files);
 
        *load_addr = file_addr;
        *load_size = file_size_total;
@@ -497,9 +425,9 @@ free_file_total:
 
 close_handles:
        for (k = j; k < i; k++)
-               efi_call_phys1(fh->close, files[k].handle);
+               efi_file_close(fh, files[k].handle);
 free_files:
-       efi_call_phys1(sys_table_arg->boottime->free_pool, files);
+       efi_call_early(free_pool, files);
 fail:
        *load_addr = 0;
        *load_size = 0;
@@ -545,7 +473,7 @@ static efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
         * as possible while respecting the required alignment.
         */
        nr_pages = round_up(alloc_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
-       status = efi_call_phys4(sys_table_arg->boottime->allocate_pages,
+       status = efi_call_early(allocate_pages,
                                EFI_ALLOCATE_ADDRESS, EFI_LOADER_DATA,
                                nr_pages, &efi_addr);
        new_addr = efi_addr;
index 4753bac652798501cc79caeb50bd6c9ebd2b2dd3..af20f17123374f9ba4f6b4ab2d1e4faa47ef43c5 100644 (file)
@@ -233,7 +233,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
        {SAL_SYSTEM_TABLE_GUID, "SALsystab", &efi.sal_systab},
        {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
        {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
-       {NULL_GUID, NULL, 0},
+       {NULL_GUID, NULL, NULL},
 };
 
 static __init int match_config_table(efi_guid_t *guid,
@@ -313,5 +313,8 @@ int __init efi_config_init(efi_config_table_type_t *arch_tables)
        }
        pr_cont("\n");
        early_iounmap(config_tables, efi.systab->nr_tables * sz);
+
+       set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
        return 0;
 }
index 3dc24823919749ebb110f62b9ca5696cdaf67511..50ea412a25e64058a58b3de92b8f141f5db4e5a5 100644 (file)
@@ -227,7 +227,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
        memcpy(&entry->var, new_var, count);
 
        err = efivar_entry_set(entry, new_var->Attributes,
-                              new_var->DataSize, new_var->Data, false);
+                              new_var->DataSize, new_var->Data, NULL);
        if (err) {
                printk(KERN_WARNING "efivars: set_variable() failed: status=%d\n", err);
                return -EIO;
index bb8f58012189af7651c20c2d9b1da1ed390ebcfd..534cb89b160d686d60218dad53082be9b0f331ea 100644 (file)
 #include <drm/drmP.h>
 
 #if defined(CONFIG_X86)
+
+/*
+ * clflushopt is an unordered instruction which needs fencing with mfence or
+ * sfence to avoid ordering issues.  For drm_clflush_page this fencing happens
+ * in the caller.
+ */
 static void
 drm_clflush_page(struct page *page)
 {
@@ -44,7 +50,7 @@ drm_clflush_page(struct page *page)
 
        page_virtual = kmap_atomic(page);
        for (i = 0; i < PAGE_SIZE; i += size)
-               clflush(page_virtual + i);
+               clflushopt(page_virtual + i);
        kunmap_atomic(page_virtual);
 }
 
@@ -133,7 +139,7 @@ drm_clflush_virt_range(char *addr, unsigned long length)
                mb();
                for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
                        clflush(addr);
-               clflush(end - 1);
+               clflushopt(end - 1);
                mb();
                return;
        }
index 508cf99a292df18c569cc9ffd3466e8b598629e0..17f928ec84ea77790e4fef756bee55757f42d8e1 100644 (file)
@@ -10,7 +10,6 @@ config DRM_GMA500
        # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
        select ACPI_VIDEO if ACPI
        select BACKLIGHT_CLASS_DEVICE if ACPI
-       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        help
          Say yes for an experimental 2D KMS framebuffer driver for the
index 49bac41beefb3251ecc6a63f04e86d0d36de9974..c3e67ba94446d34a24b420c72804e029384948d7 100644 (file)
@@ -520,7 +520,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers,
 
        driver->has_clflush = 0;
 
-       if (boot_cpu_has(X86_FEATURE_CLFLSH)) {
+       if (boot_cpu_has(X86_FEATURE_CLFLUSH)) {
                uint32_t tfms, misc, cap0, cap4, clflush_size;
 
                /*
index 73ed59eff139a88948c7766dcc0c999398f59254..bea2d67196fb47b95f87d03ed80cd6dd53f250d4 100644 (file)
@@ -14,7 +14,6 @@ config DRM_I915
        # but for select to work, need to select ACPI_VIDEO's dependencies, ick
        select BACKLIGHT_LCD_SUPPORT if ACPI
        select BACKLIGHT_CLASS_DEVICE if ACPI
-       select VIDEO_OUTPUT_CONTROL if ACPI
        select INPUT if ACPI
        select ACPI_VIDEO if ACPI
        select ACPI_BUTTON if ACPI
index 40a2b36b276baa774028b56ae60b6ae6c59e919d..d278be110805ba50965f1e071213720434cb9e78 100644 (file)
@@ -842,7 +842,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
                                       dev_priv->gtt.base.start / PAGE_SIZE,
                                       dev_priv->gtt.base.total / PAGE_SIZE,
-                                      false);
+                                      true);
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
index 7cf787d697b111aade40bf0cb34a1db52a2c41a6..637c29a33127f98ad341ba6d4aa097857c5ea9c9 100644 (file)
@@ -11,7 +11,7 @@ config DRM_NOUVEAU
        select FB
        select FRAMEBUFFER_CONSOLE if !EXPERT
        select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
-       select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
+       select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
        select X86_PLATFORM_DEVICES if ACPI && X86
        select ACPI_WMI if ACPI && X86
        select MXM_WMI if ACPI && X86
@@ -19,7 +19,6 @@ config DRM_NOUVEAU
        # Similar to i915, we need to select ACPI_VIDEO and it's dependencies
        select BACKLIGHT_LCD_SUPPORT if ACPI && X86
        select BACKLIGHT_CLASS_DEVICE if ACPI && X86
-       select VIDEO_OUTPUT_CONTROL if ACPI && X86
        select INPUT if ACPI && X86
        select THERMAL if ACPI && X86
        select ACPI_VIDEO if ACPI && X86
index 89c484d8ac2666d8b3c04bb818cb5eec2665bb38..4ee702ac8907bcc3d486000b525ad61857cdf5ff 100644 (file)
@@ -866,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (nouveau_runtime_pm == 0)
-               return -EINVAL;
+       if (nouveau_runtime_pm == 0) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
 
        /* are we optimus enabled? */
        if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
                DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
-               return -EINVAL;
+               pm_runtime_forbid(dev);
+               return -EBUSY;
        }
 
        nv_debug_level(SILENT);
@@ -923,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
        struct nouveau_drm *drm = nouveau_drm(drm_dev);
        struct drm_crtc *crtc;
 
-       if (nouveau_runtime_pm == 0)
+       if (nouveau_runtime_pm == 0) {
+               pm_runtime_forbid(dev);
                return -EBUSY;
+       }
 
        /* are we optimus enabled? */
        if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
                DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+               pm_runtime_forbid(dev);
                return -EBUSY;
        }
 
index 84a1bbb75f914a7bd914ad9120b8a2e2ac9aac5a..f633c2782170b09bcc8722d1568cbe0a8c665f12 100644 (file)
@@ -403,11 +403,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (radeon_runtime_pm == 0)
-               return -EINVAL;
+       if (radeon_runtime_pm == 0) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
 
-       if (radeon_runtime_pm == -1 && !radeon_is_px())
-               return -EINVAL;
+       if (radeon_runtime_pm == -1 && !radeon_is_px()) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
 
        drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
        drm_kms_helper_poll_disable(drm_dev);
@@ -456,12 +460,15 @@ static int radeon_pmops_runtime_idle(struct device *dev)
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        struct drm_crtc *crtc;
 
-       if (radeon_runtime_pm == 0)
+       if (radeon_runtime_pm == 0) {
+               pm_runtime_forbid(dev);
                return -EBUSY;
+       }
 
        /* are we PX enabled? */
        if (radeon_runtime_pm == -1 && !radeon_is_px()) {
                DRM_DEBUG_DRIVER("failing to power off - not px\n");
+               pm_runtime_forbid(dev);
                return -EBUSY;
        }
 
index 8d67b943ac05ce2d1dd44597d00a1e5a07fd9bdb..0394811251bd7e8dd75159d9d01b045d089d2290 100644 (file)
@@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
        if (obj->vmapping)
                udl_gem_vunmap(obj);
 
-       if (gem_obj->import_attach)
+       if (gem_obj->import_attach) {
                drm_prime_gem_destroy(gem_obj, obj->sg);
+               put_device(gem_obj->dev->dev);
+       }
 
        if (obj->pages)
                udl_gem_put_pages(obj);
@@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
        int ret;
 
        /* need to attach */
+       get_device(dev->dev);
        attach = dma_buf_attach(dma_buf, dev->dev);
-       if (IS_ERR(attach))
+       if (IS_ERR(attach)) {
+               put_device(dev->dev);
                return ERR_CAST(attach);
+       }
 
        get_dma_buf(dma_buf);
 
@@ -282,6 +287,6 @@ fail_unmap:
 fail_detach:
        dma_buf_detach(dma_buf, attach);
        dma_buf_put(dma_buf);
-
+       put_device(dev->dev);
        return ERR_PTR(ret);
 }
index 077bb1bdac34ef4ed87c65c7bf0d204fd42001b4..3f0a95290e140daa99880cfffe8d0545dcaa7e0b 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/device.h>
-#include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/sysctl.h>
 #include <linux/slab.h>
@@ -558,9 +557,6 @@ static struct bus_type  hv_bus = {
        .dev_groups =           vmbus_groups,
 };
 
-static const char *driver_name = "hyperv";
-
-
 struct onmessage_work_context {
        struct work_struct work;
        struct hv_message msg;
@@ -619,7 +615,7 @@ static void vmbus_on_msg_dpc(unsigned long data)
        }
 }
 
-static irqreturn_t vmbus_isr(int irq, void *dev_id)
+static void vmbus_isr(void)
 {
        int cpu = smp_processor_id();
        void *page_addr;
@@ -629,7 +625,7 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
 
        page_addr = hv_context.synic_event_page[cpu];
        if (page_addr == NULL)
-               return IRQ_NONE;
+               return;
 
        event = (union hv_synic_event_flags *)page_addr +
                                         VMBUS_MESSAGE_SINT;
@@ -665,28 +661,8 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
        msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
 
        /* Check if there are actual msgs to be processed */
-       if (msg->header.message_type != HVMSG_NONE) {
-               handled = true;
+       if (msg->header.message_type != HVMSG_NONE)
                tasklet_schedule(&msg_dpc);
-       }
-
-       if (handled)
-               return IRQ_HANDLED;
-       else
-               return IRQ_NONE;
-}
-
-/*
- * vmbus interrupt flow handler:
- * vmbus interrupts can concurrently occur on multiple CPUs and
- * can be handled concurrently.
- */
-
-static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
-{
-       kstat_incr_irqs_this_cpu(irq, desc);
-
-       desc->action->handler(irq, desc->action->dev_id);
 }
 
 /*
@@ -715,25 +691,7 @@ static int vmbus_bus_init(int irq)
        if (ret)
                goto err_cleanup;
 
-       ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev);
-
-       if (ret != 0) {
-               pr_err("Unable to request IRQ %d\n",
-                          irq);
-               goto err_unregister;
-       }
-
-       /*
-        * Vmbus interrupts can be handled concurrently on
-        * different CPUs. Establish an appropriate interrupt flow
-        * handler that can support this model.
-        */
-       irq_set_handler(irq, vmbus_flow_handler);
-
-       /*
-        * Register our interrupt handler.
-        */
-       hv_register_vmbus_handler(irq, vmbus_isr);
+       hv_setup_vmbus_irq(vmbus_isr);
 
        ret = hv_synic_alloc();
        if (ret)
@@ -753,9 +711,8 @@ static int vmbus_bus_init(int irq)
 
 err_alloc:
        hv_synic_free();
-       free_irq(irq, hv_acpi_dev);
+       hv_remove_vmbus_irq();
 
-err_unregister:
        bus_unregister(&hv_bus);
 
 err_cleanup:
@@ -947,7 +904,6 @@ static int __init hv_acpi_init(void)
        /*
         * Get irq resources first.
         */
-
        ret = acpi_bus_register_driver(&vmbus_acpi_driver);
 
        if (ret)
@@ -978,8 +934,7 @@ cleanup:
 
 static void __exit vmbus_exit(void)
 {
-
-       free_irq(irq, hv_acpi_dev);
+       hv_remove_vmbus_irq();
        vmbus_free_channels();
        bus_unregister(&hv_bus);
        hv_cleanup();
index be7f0a20d634d1107a7bb3147b32edf0256546ed..f3b89a4698b6192a24d031bab358796e51fbb767 100644 (file)
@@ -39,7 +39,9 @@
 #include <linux/i2c.h>
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <sysdev/fsl_soc.h>
 #include <asm/cpm.h>
index a06e12552886fa57ffbe1731eb744762666cd787..ce953d895f5b2b92d3bbb0d1a19be8a86d563d6b 100644 (file)
@@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                        return -EFAULT;
 
                error = input_ff_upload(dev, &effect, file);
+               if (error)
+                       return error;
 
                if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
                        return -EFAULT;
 
-               return error;
+               return 0;
        }
 
        /* Multi-number variable-length handlers */
index bb3b57bea8ba4578df5d29c72230882ac29bd62a..5ef7fcf0e2509b8196cf69a993247febae4c20b7 100644 (file)
@@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
        struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
        unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
        unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
+       int val;
 
-       return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
+       mutex_lock(&kpad->gpio_lock);
+
+       if (kpad->dir[bank] & bit)
+               val = kpad->dat_out[bank];
+       else
+               val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
+
+       mutex_unlock(&kpad->gpio_lock);
+
+       return !!(val & bit);
 }
 
 static void adp5588_gpio_set_value(struct gpio_chip *chip,
index 1f695f229ea8988001da801bb60db7b6d939f293..184c8f21ab59a9736feb262f79331cae878af1ff 100644 (file)
@@ -27,29 +27,32 @@ struct da9052_onkey {
 
 static void da9052_onkey_query(struct da9052_onkey *onkey)
 {
-       int key_stat;
+       int ret;
 
-       key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
-       if (key_stat < 0) {
+       ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
+       if (ret < 0) {
                dev_err(onkey->da9052->dev,
-                       "Failed to read onkey event %d\n", key_stat);
+                       "Failed to read onkey event err=%d\n", ret);
        } else {
                /*
                 * Since interrupt for deassertion of ONKEY pin is not
                 * generated, onkey event state determines the onkey
                 * button state.
                 */
-               key_stat &= DA9052_EVENTB_ENONKEY;
-               input_report_key(onkey->input, KEY_POWER, key_stat);
+               bool pressed = !(ret & DA9052_STATUSA_NONKEY);
+
+               input_report_key(onkey->input, KEY_POWER, pressed);
                input_sync(onkey->input);
-       }
 
-       /*
-        * Interrupt is generated only when the ONKEY pin is asserted.
-        * Hence the deassertion of the pin is simulated through work queue.
-        */
-       if (key_stat)
-               schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
+               /*
+                * Interrupt is generated only when the ONKEY pin
+                * is asserted.  Hence the deassertion of the pin
+                * is simulated through work queue.
+                */
+               if (pressed)
+                       schedule_delayed_work(&onkey->work,
+                                               msecs_to_jiffies(50));
+       }
 }
 
 static void da9052_onkey_work(struct work_struct *work)
index 87095e2f5153c7dedeae38a18cd10efaa62a9e0c..8af34ffe208b16eff1ab240de45967fb84b6610f 100644 (file)
@@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input,
        __clear_bit(REL_X, input->relbit);
        __clear_bit(REL_Y, input->relbit);
 
-       __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
        __set_bit(EV_KEY, input->evbit);
        __set_bit(BTN_LEFT, input->keybit);
        __set_bit(BTN_RIGHT, input->keybit);
index 26386f9d25696841584f4b7f698a3cc8727fdc54..d8d49d10f9bb60d477124bba603fb9f61be1efdb 100644 (file)
@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
  * Read touchpad resolution and maximum reported coordinates
  * Resolution is left zero if touchpad does not support the query
  */
+
+static const int *quirk_min_max;
+
 static int synaptics_resolution(struct psmouse *psmouse)
 {
        struct synaptics_data *priv = psmouse->private;
        unsigned char resp[3];
 
+       if (quirk_min_max) {
+               priv->x_min = quirk_min_max[0];
+               priv->x_max = quirk_min_max[1];
+               priv->y_min = quirk_min_max[2];
+               priv->y_max = quirk_min_max[3];
+               return 0;
+       }
+
        if (SYN_ID_MAJOR(priv->identity) < 4)
                return 0;
 
@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
        { }
 };
 
+static const struct dmi_system_id min_max_dmi_table[] __initconst = {
+#if defined(CONFIG_DMI)
+       {
+               /* Lenovo ThinkPad Helix */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+               },
+               .driver_data = (int []){1024, 5052, 2258, 4832},
+       },
+       {
+               /* Lenovo ThinkPad X240 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
+               },
+               .driver_data = (int []){1232, 5710, 1156, 4696},
+       },
+       {
+               /* Lenovo ThinkPad T440s */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
+               },
+               .driver_data = (int []){1024, 5112, 2024, 4832},
+       },
+       {
+               /* Lenovo ThinkPad T540p */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
+               },
+               .driver_data = (int []){1024, 5056, 2058, 4832},
+       },
+#endif
+       { }
+};
+
 void __init synaptics_module_init(void)
 {
+       const struct dmi_system_id *min_max_dmi;
+
        impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
        broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+
+       min_max_dmi = dmi_first_match(min_max_dmi_table);
+       if (min_max_dmi)
+               quirk_min_max = min_max_dmi->driver_data;
 }
 
 static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
index 4c842c320c2ede9f2bf42d507ee5c91f00eef7e3..b604564dec5c9a5d8d154ac18a61c02341f77e5d 100644 (file)
@@ -67,7 +67,6 @@ struct mousedev {
        struct device dev;
        struct cdev cdev;
        bool exist;
-       bool is_mixdev;
 
        struct list_head mixdev_node;
        bool opened_by_mixdev;
@@ -77,6 +76,9 @@ struct mousedev {
        int old_x[4], old_y[4];
        int frac_dx, frac_dy;
        unsigned long touch;
+
+       int (*open_device)(struct mousedev *mousedev);
+       void (*close_device)(struct mousedev *mousedev);
 };
 
 enum mousedev_emul {
@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
 static struct mousedev *mousedev_mix;
 static LIST_HEAD(mousedev_mix_list);
 
-static void mixdev_open_devices(void);
-static void mixdev_close_devices(void);
-
 #define fx(i)  (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
 #define fy(i)  (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
 
@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
        if (retval)
                return retval;
 
-       if (mousedev->is_mixdev)
-               mixdev_open_devices();
-       else if (!mousedev->exist)
+       if (!mousedev->exist)
                retval = -ENODEV;
        else if (!mousedev->open++) {
                retval = input_open_device(&mousedev->handle);
@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
 {
        mutex_lock(&mousedev->mutex);
 
-       if (mousedev->is_mixdev)
-               mixdev_close_devices();
-       else if (mousedev->exist && !--mousedev->open)
+       if (mousedev->exist && !--mousedev->open)
                input_close_device(&mousedev->handle);
 
        mutex_unlock(&mousedev->mutex);
@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
  * stream. Note that this function is called with mousedev_mix->mutex
  * held.
  */
-static void mixdev_open_devices(void)
+static int mixdev_open_devices(struct mousedev *mixdev)
 {
-       struct mousedev *mousedev;
+       int error;
+
+       error = mutex_lock_interruptible(&mixdev->mutex);
+       if (error)
+               return error;
 
-       if (mousedev_mix->open++)
-               return;
+       if (!mixdev->open++) {
+               struct mousedev *mousedev;
 
-       list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
-               if (!mousedev->opened_by_mixdev) {
-                       if (mousedev_open_device(mousedev))
-                               continue;
+               list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+                       if (!mousedev->opened_by_mixdev) {
+                               if (mousedev_open_device(mousedev))
+                                       continue;
 
-                       mousedev->opened_by_mixdev = true;
+                               mousedev->opened_by_mixdev = true;
+                       }
                }
        }
+
+       mutex_unlock(&mixdev->mutex);
+       return 0;
 }
 
 /*
@@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
  * device. Note that this function is called with mousedev_mix->mutex
  * held.
  */
-static void mixdev_close_devices(void)
+static void mixdev_close_devices(struct mousedev *mixdev)
 {
-       struct mousedev *mousedev;
+       mutex_lock(&mixdev->mutex);
 
-       if (--mousedev_mix->open)
-               return;
+       if (!--mixdev->open) {
+               struct mousedev *mousedev;
 
-       list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
-               if (mousedev->opened_by_mixdev) {
-                       mousedev->opened_by_mixdev = false;
-                       mousedev_close_device(mousedev);
+               list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
+                       if (mousedev->opened_by_mixdev) {
+                               mousedev->opened_by_mixdev = false;
+                               mousedev_close_device(mousedev);
+                       }
                }
        }
+
+       mutex_unlock(&mixdev->mutex);
 }
 
 
@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
        mousedev_detach_client(mousedev, client);
        kfree(client);
 
-       mousedev_close_device(mousedev);
+       mousedev->close_device(mousedev);
 
        return 0;
 }
@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
        client->mousedev = mousedev;
        mousedev_attach_client(mousedev, client);
 
-       error = mousedev_open_device(mousedev);
+       error = mousedev->open_device(mousedev);
        if (error)
                goto err_free_client;
 
@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
 
        if (mixdev) {
                dev_set_name(&mousedev->dev, "mice");
+
+               mousedev->open_device = mixdev_open_devices;
+               mousedev->close_device = mixdev_close_devices;
        } else {
                int dev_no = minor;
                /* Normalize device number if it falls into legacy range */
                if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
                        dev_no -= MOUSEDEV_MINOR_BASE;
                dev_set_name(&mousedev->dev, "mouse%d", dev_no);
+
+               mousedev->open_device = mousedev_open_device;
+               mousedev->close_device = mousedev_close_device;
        }
 
        mousedev->exist = true;
-       mousedev->is_mixdev = mixdev;
        mousedev->handle.dev = input_get_device(dev);
        mousedev->handle.name = dev_name(&mousedev->dev);
        mousedev->handle.handler = handler;
@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
        device_del(&mousedev->dev);
        mousedev_cleanup(mousedev);
        input_free_minor(MINOR(mousedev->dev.devt));
-       if (!mousedev->is_mixdev)
+       if (mousedev != mousedev_mix)
                input_unregister_handle(&mousedev->handle);
        put_device(&mousedev->dev);
 }
index 5c342b3139e8989ed737a91180cb6b3fa0f58f9c..3c0f57efe7b16154f36b7b564f4210b5cf4e88f6 100644 (file)
@@ -134,7 +134,8 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
        } else if (!ts->low_latency_req.dev) {
                /* First contact, request 100 us latency. */
                dev_pm_qos_add_ancestor_request(&ts->client->dev,
-                                               &ts->low_latency_req, 100);
+                                               &ts->low_latency_req,
+                                               DEV_PM_QOS_RESUME_LATENCY, 100);
        }
 
        /* SYN_REPORT */
index 5194afb39e781062bc6e82d55163174494fbb751..1c0c151d108c2fe40d3cf8c4bda1d0b4c4d9112c 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_METAG_PERFCOUNTER_IRQS)  += irq-metag.o
 obj-$(CONFIG_ARCH_MOXART)              += irq-moxart.o
 obj-$(CONFIG_ORION_IRQCHIP)            += irq-orion.o
 obj-$(CONFIG_ARCH_SUNXI)               += irq-sun4i.o
+obj-$(CONFIG_ARCH_SUNXI)               += irq-sunxi-nmi.o
 obj-$(CONFIG_ARCH_SPEAR3XX)            += spear-shirq.o
 obj-$(CONFIG_ARM_GIC)                  += irq-gic.o
 obj-$(CONFIG_ARM_NVIC)                 += irq-nvic.o
index 540956465ed2db759ca72eae11a26b16a4c17047..41be897df8d5521250d79dee5362c08fe0f80067 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -42,6 +43,7 @@
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)      (0x100 + irq*4)
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS          (0x44)
+#define ARMADA_375_PPI_CAUSE                   (0x10)
 
 #define ARMADA_370_XP_SW_TRIG_INT_OFFS           (0x4)
 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
@@ -352,7 +354,63 @@ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
        .xlate = irq_domain_xlate_onecell,
 };
 
-static asmlinkage void __exception_irq_entry
+#ifdef CONFIG_PCI_MSI
+static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
+{
+       u32 msimask, msinr;
+
+       msimask = readl_relaxed(per_cpu_int_base +
+                               ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+               & PCI_MSI_DOORBELL_MASK;
+
+       writel(~msimask, per_cpu_int_base +
+              ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+       for (msinr = PCI_MSI_DOORBELL_START;
+            msinr < PCI_MSI_DOORBELL_END; msinr++) {
+               int irq;
+
+               if (!(msimask & BIT(msinr)))
+                       continue;
+
+               irq = irq_find_mapping(armada_370_xp_msi_domain,
+                                      msinr - 16);
+
+               if (is_chained)
+                       generic_handle_irq(irq);
+               else
+                       handle_IRQ(irq, regs);
+       }
+}
+#else
+static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
+#endif
+
+static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
+                                                 struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_get_chip(irq);
+       unsigned long irqmap, irqn;
+       unsigned int cascade_irq;
+
+       chained_irq_enter(chip, desc);
+
+       irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+
+       if (irqmap & BIT(0)) {
+               armada_370_xp_handle_msi_irq(NULL, true);
+               irqmap &= ~BIT(0);
+       }
+
+       for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+               cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
+               generic_handle_irq(cascade_irq);
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static void __exception_irq_entry
 armada_370_xp_handle_irq(struct pt_regs *regs)
 {
        u32 irqstat, irqnr;
@@ -372,31 +430,9 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
                        continue;
                }
 
-#ifdef CONFIG_PCI_MSI
                /* MSI handling */
-               if (irqnr == 1) {
-                       u32 msimask, msinr;
-
-                       msimask = readl_relaxed(per_cpu_int_base +
-                                               ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
-                               & PCI_MSI_DOORBELL_MASK;
-
-                       writel(~msimask, per_cpu_int_base +
-                              ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
-
-                       for (msinr = PCI_MSI_DOORBELL_START;
-                            msinr < PCI_MSI_DOORBELL_END; msinr++) {
-                               int irq;
-
-                               if (!(msimask & BIT(msinr)))
-                                       continue;
-
-                               irq = irq_find_mapping(armada_370_xp_msi_domain,
-                                                      msinr - 16);
-                               handle_IRQ(irq, regs);
-                       }
-               }
-#endif
+               if (irqnr == 1)
+                       armada_370_xp_handle_msi_irq(regs, false);
 
 #ifdef CONFIG_SMP
                /* IPI Handling */
@@ -427,6 +463,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
                                             struct device_node *parent)
 {
        struct resource main_int_res, per_cpu_int_res;
+       int parent_irq;
        u32 control;
 
        BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -455,8 +492,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 
        BUG_ON(!armada_370_xp_mpic_domain);
 
-       irq_set_default_host(armada_370_xp_mpic_domain);
-
 #ifdef CONFIG_SMP
        armada_xp_mpic_smp_cpu_init();
 
@@ -472,7 +507,14 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 
        armada_370_xp_msi_init(node, main_int_res.start);
 
-       set_handle_irq(armada_370_xp_handle_irq);
+       parent_irq = irq_of_parse_and_map(node, 0);
+       if (parent_irq <= 0) {
+               irq_set_default_host(armada_370_xp_mpic_domain);
+               set_handle_irq(armada_370_xp_handle_irq);
+       } else {
+               irq_set_chained_handler(parent_irq,
+                                       armada_370_xp_mpic_handle_cascade_irq);
+       }
 
        return 0;
 }
index 1693b8e7f26ad83c9686af3af7273e41c5e4af04..5916d6cdafa1c9b8b1909ce0aa77027e2ce2759f 100644 (file)
@@ -95,7 +95,7 @@ struct armctrl_ic {
 };
 
 static struct armctrl_ic intc __read_mostly;
-static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+static void __exception_irq_entry bcm2835_handle_irq(
        struct pt_regs *regs);
 
 static void armctrl_mask_irq(struct irq_data *d)
@@ -196,7 +196,7 @@ static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
        handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
 }
 
-static asmlinkage void __exception_irq_entry bcm2835_handle_irq(
+static void __exception_irq_entry bcm2835_handle_irq(
        struct pt_regs *regs)
 {
        u32 stat, irq;
index 341c6016812de0e17fbd4c1601708723409351c5..531769b2433a6dc3c305784e7836b11915f91ff2 100644 (file)
@@ -50,7 +50,7 @@
 
 union gic_base {
        void __iomem *common_base;
-       void __percpu __iomem **percpu_base;
+       void __percpu * __iomem *percpu_base;
 };
 
 struct gic_chip_data {
@@ -279,7 +279,7 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
 #define gic_set_wake   NULL
 #endif
 
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 {
        u32 irqstat, irqnr;
        struct gic_chip_data *gic = &gic_data[0];
@@ -648,7 +648,7 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
 #endif
 
 #ifdef CONFIG_SMP
-void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
        int cpu;
        unsigned long flags, map = 0;
@@ -869,7 +869,7 @@ static struct notifier_block gic_cpu_notifier = {
 };
 #endif
 
-const struct irq_domain_ops gic_irq_domain_ops = {
+static const struct irq_domain_ops gic_irq_domain_ops = {
        .map = gic_irq_domain_map,
        .xlate = gic_irq_domain_xlate,
 };
@@ -974,7 +974,8 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
 #ifdef CONFIG_OF
 static int gic_cnt __initdata;
 
-int __init gic_of_init(struct device_node *node, struct device_node *parent)
+static int __init
+gic_of_init(struct device_node *node, struct device_node *parent)
 {
        void __iomem *cpu_base;
        void __iomem *dist_base;
index 2cb7cd0bc2f527136d8e430137436480332d2512..3c8827fe83f37544a9a5b1e8a01ae1486c7c4440 100644 (file)
@@ -194,8 +194,7 @@ static struct mmp_intc_conf mmp2_conf = {
        .conf_mask      = 0x7f,
 };
 
-static asmlinkage void __exception_irq_entry
-mmp_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
 {
        int irq, hwirq;
 
@@ -207,8 +206,7 @@ mmp_handle_irq(struct pt_regs *regs)
        handle_IRQ(irq, regs);
 }
 
-static asmlinkage void __exception_irq_entry
-mmp2_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
 {
        int irq, hwirq;
 
index 5552fc2bf28a0147550d4cb22914af9dff12d3e0..00b3cc908f762d58fd92720d73fd2e3f1cb28e3f 100644 (file)
@@ -44,7 +44,7 @@ struct moxart_irq_data {
 
 static struct moxart_irq_data intc;
 
-static asmlinkage void __exception_irq_entry handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry handle_irq(struct pt_regs *regs)
 {
        u32 irqstat;
        int hwirq;
index 8e41be62812e1663df05b58f75c52ecfdc7b844f..e25f246cd2fb9a75d90ab57a4d1d1e12feb494f1 100644 (file)
@@ -30,7 +30,7 @@
 
 static struct irq_domain *orion_irq_domain;
 
-static asmlinkage void
+static void
 __exception_irq_entry orion_handle_irq(struct pt_regs *regs)
 {
        struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
index 3a070c587ed969f426bbaa2d4141a395e1f92424..581eefe331ae44a58512680bc5462137ae6b21d4 100644 (file)
@@ -47,7 +47,7 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
        ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
 }
 
-static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
 {
        void __iomem *base = sirfsoc_irqdomain->host_data;
        u32 irqstat, irqnr;
index a5438d8892454971b07b1f917f849de0e3e56f45..6fcef4a95a18af9462431a1d1ef39cd0f100aaa2 100644 (file)
 static void __iomem *sun4i_irq_base;
 static struct irq_domain *sun4i_irq_domain;
 
-static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
 
 static void sun4i_irq_ack(struct irq_data *irqd)
 {
        unsigned int irq = irqd_to_hwirq(irqd);
-       unsigned int irq_off = irq % 32;
-       int reg = irq / 32;
-       u32 val;
 
-       val = readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
-       writel(val | (1 << irq_off),
-              sun4i_irq_base + SUN4I_IRQ_PENDING_REG(reg));
+       if (irq != 0)
+               return; /* Only IRQ 0 / the ENMI needs to be acked */
+
+       writel(BIT(0), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
 }
 
 static void sun4i_irq_mask(struct irq_data *irqd)
@@ -76,16 +74,16 @@ static void sun4i_irq_unmask(struct irq_data *irqd)
 
 static struct irq_chip sun4i_irq_chip = {
        .name           = "sun4i_irq",
-       .irq_ack        = sun4i_irq_ack,
+       .irq_eoi        = sun4i_irq_ack,
        .irq_mask       = sun4i_irq_mask,
        .irq_unmask     = sun4i_irq_unmask,
+       .flags          = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
 };
 
 static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
                         irq_hw_number_t hw)
 {
-       irq_set_chip_and_handler(virq, &sun4i_irq_chip,
-                                handle_level_irq);
+       irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
        set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
 
        return 0;
@@ -109,7 +107,7 @@ static int __init sun4i_of_init(struct device_node *node,
        writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1));
        writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2));
 
-       /* Mask all the interrupts */
+       /* Unmask all the interrupts, ENABLE_REG(x) is used for masking */
        writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0));
        writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1));
        writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2));
@@ -134,16 +132,30 @@ static int __init sun4i_of_init(struct device_node *node,
 
        return 0;
 }
-IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-ic", sun4i_of_init);
+IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_of_init);
 
-static asmlinkage void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
 {
        u32 irq, hwirq;
 
+       /*
+        * hwirq == 0 can mean one of 3 things:
+        * 1) no more irqs pending
+        * 2) irq 0 pending
+        * 3) spurious irq
+        * So if we immediately get a reading of 0, check the irq-pending reg
+        * to differentiate between 2 and 3. We only do this once to avoid
+        * the extra check in the common case of 1 hapening after having
+        * read the vector-reg once.
+        */
        hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
-       while (hwirq != 0) {
+       if (hwirq == 0 &&
+                 !(readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)) & BIT(0)))
+               return;
+
+       do {
                irq = irq_find_mapping(sun4i_irq_domain, hwirq);
                handle_IRQ(irq, regs);
                hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
-       }
+       } while (hwirq != 0);
 }
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
new file mode 100644 (file)
index 0000000..12f547a
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Allwinner A20/A31 SoCs NMI IRQ chip driver.
+ *
+ * Carlo Caione <carlo.caione@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/irqchip/chained_irq.h>
+#include "irqchip.h"
+
+#define SUNXI_NMI_SRC_TYPE_MASK        0x00000003
+
+enum {
+       SUNXI_SRC_TYPE_LEVEL_LOW = 0,
+       SUNXI_SRC_TYPE_EDGE_FALLING,
+       SUNXI_SRC_TYPE_LEVEL_HIGH,
+       SUNXI_SRC_TYPE_EDGE_RISING,
+};
+
+struct sunxi_sc_nmi_reg_offs {
+       u32 ctrl;
+       u32 pend;
+       u32 enable;
+};
+
+static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
+       .ctrl   = 0x00,
+       .pend   = 0x04,
+       .enable = 0x08,
+};
+
+static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
+       .ctrl   = 0x00,
+       .pend   = 0x04,
+       .enable = 0x34,
+};
+
+static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
+                                     u32 val)
+{
+       irq_reg_writel(val, gc->reg_base + off);
+}
+
+static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
+{
+       return irq_reg_readl(gc->reg_base + off);
+}
+
+static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+       struct irq_domain *domain = irq_desc_get_handler_data(desc);
+       struct irq_chip *chip = irq_get_chip(irq);
+       unsigned int virq = irq_find_mapping(domain, 0);
+
+       chained_irq_enter(chip, desc);
+       generic_handle_irq(virq);
+       chained_irq_exit(chip, desc);
+}
+
+static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+       struct irq_chip_type *ct = gc->chip_types;
+       u32 src_type_reg;
+       u32 ctrl_off = ct->regs.type;
+       unsigned int src_type;
+       unsigned int i;
+
+       irq_gc_lock(gc);
+
+       switch (flow_type & IRQF_TRIGGER_MASK) {
+       case IRQ_TYPE_EDGE_FALLING:
+               src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               src_type = SUNXI_SRC_TYPE_EDGE_RISING;
+               break;
+       case IRQ_TYPE_LEVEL_HIGH:
+               src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
+               break;
+       case IRQ_TYPE_NONE:
+       case IRQ_TYPE_LEVEL_LOW:
+               src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
+               break;
+       default:
+               irq_gc_unlock(gc);
+               pr_err("%s: Cannot assign multiple trigger modes to IRQ %d.\n",
+                       __func__, data->irq);
+               return -EBADR;
+       }
+
+       irqd_set_trigger_type(data, flow_type);
+       irq_setup_alt_chip(data, flow_type);
+
+       for (i = 0; i <= gc->num_ct; i++, ct++)
+               if (ct->type & flow_type)
+                       ctrl_off = ct->regs.type;
+
+       src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
+       src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
+       src_type_reg |= src_type;
+       sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
+
+       irq_gc_unlock(gc);
+
+       return IRQ_SET_MASK_OK;
+}
+
+static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
+                                       struct sunxi_sc_nmi_reg_offs *reg_offs)
+{
+       struct irq_domain *domain;
+       struct irq_chip_generic *gc;
+       unsigned int irq;
+       unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+       int ret;
+
+
+       domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
+       if (!domain) {
+               pr_err("%s: Could not register interrupt domain.\n", node->name);
+               return -ENOMEM;
+       }
+
+       ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name,
+                                            handle_fasteoi_irq, clr, 0,
+                                            IRQ_GC_INIT_MASK_CACHE);
+       if (ret) {
+                pr_err("%s: Could not allocate generic interrupt chip.\n",
+                        node->name);
+                goto fail_irqd_remove;
+       }
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq <= 0) {
+               pr_err("%s: unable to parse irq\n", node->name);
+               ret = -EINVAL;
+               goto fail_irqd_remove;
+       }
+
+       gc = irq_get_domain_generic_chip(domain, 0);
+       gc->reg_base = of_iomap(node, 0);
+       if (!gc->reg_base) {
+               pr_err("%s: unable to map resource\n", node->name);
+               ret = -ENOMEM;
+               goto fail_irqd_remove;
+       }
+
+       gc->chip_types[0].type                  = IRQ_TYPE_LEVEL_MASK;
+       gc->chip_types[0].chip.irq_mask         = irq_gc_mask_clr_bit;
+       gc->chip_types[0].chip.irq_unmask       = irq_gc_mask_set_bit;
+       gc->chip_types[0].chip.irq_eoi          = irq_gc_ack_set_bit;
+       gc->chip_types[0].chip.irq_set_type     = sunxi_sc_nmi_set_type;
+       gc->chip_types[0].chip.flags            = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+       gc->chip_types[0].regs.ack              = reg_offs->pend;
+       gc->chip_types[0].regs.mask             = reg_offs->enable;
+       gc->chip_types[0].regs.type             = reg_offs->ctrl;
+
+       gc->chip_types[1].type                  = IRQ_TYPE_EDGE_BOTH;
+       gc->chip_types[1].chip.name             = gc->chip_types[0].chip.name;
+       gc->chip_types[1].chip.irq_ack          = irq_gc_ack_set_bit;
+       gc->chip_types[1].chip.irq_mask         = irq_gc_mask_clr_bit;
+       gc->chip_types[1].chip.irq_unmask       = irq_gc_mask_set_bit;
+       gc->chip_types[1].chip.irq_set_type     = sunxi_sc_nmi_set_type;
+       gc->chip_types[1].regs.ack              = reg_offs->pend;
+       gc->chip_types[1].regs.mask             = reg_offs->enable;
+       gc->chip_types[1].regs.type             = reg_offs->ctrl;
+       gc->chip_types[1].handler               = handle_edge_irq;
+
+       sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
+       sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
+
+       irq_set_handler_data(irq, domain);
+       irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
+
+       return 0;
+
+fail_irqd_remove:
+       irq_domain_remove(domain);
+
+       return ret;
+}
+
+static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
+                                       struct device_node *parent)
+{
+       return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
+}
+IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
+
+static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
+                                       struct device_node *parent)
+{
+       return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
+}
+IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
index 8e21ae0bab4658a2ee2beb15a07a7a396411261f..473f09a74d4d4c53f80777f3173753e89d82b33e 100644 (file)
@@ -228,7 +228,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
  * Keep iterating over all registered VIC's until there are no pending
  * interrupts.
  */
-static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
 {
        int i, handled;
 
index 1846e7d666819ded083bb366ed20bee1e01fafa8..eb6e91efdec8bca0babc0d098e9e11aa7b66f257 100644 (file)
@@ -178,8 +178,7 @@ static struct irq_domain_ops vt8500_irq_domain_ops = {
        .xlate = irq_domain_xlate_onecell,
 };
 
-static asmlinkage
-void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
 {
        u32 stat, i;
        int irqnr, virq;
index f693f1bc1348e1ca178add91c86cde036dda4d7b..e1c2f963289374b38a3a7bce93c930cb6565b376 100644 (file)
@@ -122,7 +122,7 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d)
 static int xtensa_mx_irq_set_affinity(struct irq_data *d,
                const struct cpumask *dest, bool force)
 {
-       unsigned mask = 1u << cpumask_any(dest);
+       unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
 
        set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
        return 0;
index 8ed04c4a43eef7dc2a48710b8a9107d754d757bd..ceb3a4318f73a2be0f8cad589b5e45da00becdf0 100644 (file)
@@ -50,7 +50,7 @@ static void zevio_irq_ack(struct irq_data *irqd)
        readl(gc->reg_base + regs->ack);
 }
 
-static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
 {
        int irqnr;
 
index f496afce29deba24b782c4cc108d39b8312c2f46..cad3e24955526be2c8f745eb7dbaf1f8d5623813 100644 (file)
@@ -10,8 +10,7 @@
 
 #include <linux/init.h>
 #include <linux/of_irq.h>
-
-#include "irqchip.h"
+#include <linux/irqchip.h>
 
 /*
  * This special of_device_id is the sentinel at the end of the
index a45aab9f6bb135e64c0f0a420f75ed2508e17e92..1c3ae57082ed7bc1a2dcd8583a507875f3b7d92b 100644 (file)
@@ -251,8 +251,6 @@ static int arizona_apply_hardware_patch(struct arizona* arizona)
        unsigned int fll, sysclk;
        int ret, err;
 
-       regcache_cache_bypass(arizona->regmap, true);
-
        /* Cache existing FLL and SYSCLK settings */
        ret = regmap_read(arizona->regmap, ARIZONA_FLL1_CONTROL_1, &fll);
        if (ret != 0) {
@@ -322,8 +320,6 @@ err_fll:
                        err);
        }
 
-       regcache_cache_bypass(arizona->regmap, false);
-
        if (ret != 0)
                return ret;
        else
index 714e2135210ec2ddc989efcd44f3e800b80f5bcf..281a827472754ad9b79939e67eb11820d70e3eaa 100644 (file)
@@ -26,7 +26,9 @@
 #include <linux/mfd/samsung/core.h>
 #include <linux/mfd/samsung/irq.h>
 #include <linux/mfd/samsung/rtc.h>
+#include <linux/mfd/samsung/s2mpa01.h>
 #include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps14.h>
 #include <linux/mfd/samsung/s5m8763.h>
 #include <linux/mfd/samsung/s5m8767.h>
 #include <linux/regmap.h>
@@ -69,18 +71,53 @@ static const struct mfd_cell s2mps11_devs[] = {
        }
 };
 
+static const struct mfd_cell s2mps14_devs[] = {
+       {
+               .name = "s2mps14-pmic",
+       }, {
+               .name = "s2mps14-rtc",
+       }, {
+               .name = "s2mps14-clk",
+       }
+};
+
+static const struct mfd_cell s2mpa01_devs[] = {
+       {
+               .name = "s2mpa01-pmic",
+       },
+};
+
 #ifdef CONFIG_OF
 static struct of_device_id sec_dt_match[] = {
        {       .compatible = "samsung,s5m8767-pmic",
                .data = (void *)S5M8767X,
-       },
-       {       .compatible = "samsung,s2mps11-pmic",
+       }, {
+               .compatible = "samsung,s2mps11-pmic",
                .data = (void *)S2MPS11X,
+       }, {
+               .compatible = "samsung,s2mps14-pmic",
+               .data = (void *)S2MPS14X,
+       }, {
+               .compatible = "samsung,s2mpa01-pmic",
+               .data = (void *)S2MPA01,
+       }, {
+               /* Sentinel */
        },
-       {},
 };
 #endif
 
+static bool s2mpa01_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case S2MPA01_REG_INT1M:
+       case S2MPA01_REG_INT2M:
+       case S2MPA01_REG_INT3M:
+               return false;
+       default:
+               return true;
+       }
+}
+
 static bool s2mps11_volatile(struct device *dev, unsigned int reg)
 {
        switch (reg) {
@@ -111,6 +148,15 @@ static const struct regmap_config sec_regmap_config = {
        .val_bits = 8,
 };
 
+static const struct regmap_config s2mpa01_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = S2MPA01_REG_LDO_OVCB4,
+       .volatile_reg = s2mpa01_volatile,
+       .cache_type = REGCACHE_FLAT,
+};
+
 static const struct regmap_config s2mps11_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
@@ -120,6 +166,15 @@ static const struct regmap_config s2mps11_regmap_config = {
        .cache_type = REGCACHE_FLAT,
 };
 
+static const struct regmap_config s2mps14_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = S2MPS14_REG_LDODSCH3,
+       .volatile_reg = s2mps11_volatile,
+       .cache_type = REGCACHE_FLAT,
+};
+
 static const struct regmap_config s5m8763_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
@@ -138,9 +193,18 @@ static const struct regmap_config s5m8767_regmap_config = {
        .cache_type = REGCACHE_FLAT,
 };
 
-static const struct regmap_config sec_rtc_regmap_config = {
+static const struct regmap_config s5m_rtc_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = SEC_RTC_REG_MAX,
+};
+
+static const struct regmap_config s2mps14_rtc_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
+
+       .max_register = S2MPS_RTC_REG_MAX,
 };
 
 #ifdef CONFIG_OF
@@ -180,24 +244,24 @@ static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata(
 }
 #endif
 
-static inline int sec_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long sec_i2c_get_driver_data(struct i2c_client *i2c,
                                                const struct i2c_device_id *id)
 {
 #ifdef CONFIG_OF
        if (i2c->dev.of_node) {
                const struct of_device_id *match;
                match = of_match_node(sec_dt_match, i2c->dev.of_node);
-               return (int)match->data;
+               return (unsigned long)match->data;
        }
 #endif
-       return (int)id->driver_data;
+       return id->driver_data;
 }
 
 static int sec_pmic_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
        struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev);
-       const struct regmap_config *regmap;
+       const struct regmap_config *regmap, *regmap_rtc;
        struct sec_pmic_dev *sec_pmic;
        int ret;
 
@@ -229,17 +293,34 @@ static int sec_pmic_probe(struct i2c_client *i2c,
        }
 
        switch (sec_pmic->device_type) {
+       case S2MPA01:
+               regmap = &s2mpa01_regmap_config;
+               break;
        case S2MPS11X:
                regmap = &s2mps11_regmap_config;
+               /*
+                * The rtc-s5m driver does not support S2MPS11 and there
+                * is no mfd_cell for S2MPS11 RTC device.
+                * However we must pass something to devm_regmap_init_i2c()
+                * so use S5M-like regmap config even though it wouldn't work.
+                */
+               regmap_rtc = &s5m_rtc_regmap_config;
+               break;
+       case S2MPS14X:
+               regmap = &s2mps14_regmap_config;
+               regmap_rtc = &s2mps14_rtc_regmap_config;
                break;
        case S5M8763X:
                regmap = &s5m8763_regmap_config;
+               regmap_rtc = &s5m_rtc_regmap_config;
                break;
        case S5M8767X:
                regmap = &s5m8767_regmap_config;
+               regmap_rtc = &s5m_rtc_regmap_config;
                break;
        default:
                regmap = &sec_regmap_config;
+               regmap_rtc = &s5m_rtc_regmap_config;
                break;
        }
 
@@ -252,10 +333,13 @@ static int sec_pmic_probe(struct i2c_client *i2c,
        }
 
        sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+       if (!sec_pmic->rtc) {
+               dev_err(&i2c->dev, "Failed to allocate I2C for RTC\n");
+               return -ENODEV;
+       }
        i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
 
-       sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
-                       &sec_rtc_regmap_config);
+       sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc, regmap_rtc);
        if (IS_ERR(sec_pmic->regmap_rtc)) {
                ret = PTR_ERR(sec_pmic->regmap_rtc);
                dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n",
@@ -283,10 +367,18 @@ static int sec_pmic_probe(struct i2c_client *i2c,
                ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs,
                                      ARRAY_SIZE(s5m8767_devs), NULL, 0, NULL);
                break;
+       case S2MPA01:
+               ret = mfd_add_devices(sec_pmic->dev, -1, s2mpa01_devs,
+                                     ARRAY_SIZE(s2mpa01_devs), NULL, 0, NULL);
+               break;
        case S2MPS11X:
                ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs,
                                      ARRAY_SIZE(s2mps11_devs), NULL, 0, NULL);
                break;
+       case S2MPS14X:
+               ret = mfd_add_devices(sec_pmic->dev, -1, s2mps14_devs,
+                                     ARRAY_SIZE(s2mps14_devs), NULL, 0, NULL);
+               break;
        default:
                /* If this happens the probe function is problem */
                BUG();
index 4de494f51d401b3d85544633621091d9e7cceff6..64e7913aadc6cf3fbe68389edd8ba986e7a1ec8c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * sec-irq.c
  *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
  *              http://www.samsung.com
  *
  *  This program is free software; you can redistribute  it and/or modify it
@@ -19,6 +19,7 @@
 #include <linux/mfd/samsung/core.h>
 #include <linux/mfd/samsung/irq.h>
 #include <linux/mfd/samsung/s2mps11.h>
+#include <linux/mfd/samsung/s2mps14.h>
 #include <linux/mfd/samsung/s5m8763.h>
 #include <linux/mfd/samsung/s5m8767.h>
 
@@ -59,13 +60,13 @@ static const struct regmap_irq s2mps11_irqs[] = {
                .reg_offset = 1,
                .mask = S2MPS11_IRQ_RTC60S_MASK,
        },
-       [S2MPS11_IRQ_RTCA1] = {
+       [S2MPS11_IRQ_RTCA0] = {
                .reg_offset = 1,
-               .mask = S2MPS11_IRQ_RTCA1_MASK,
+               .mask = S2MPS11_IRQ_RTCA0_MASK,
        },
-       [S2MPS11_IRQ_RTCA2] = {
+       [S2MPS11_IRQ_RTCA1] = {
                .reg_offset = 1,
-               .mask = S2MPS11_IRQ_RTCA2_MASK,
+               .mask = S2MPS11_IRQ_RTCA1_MASK,
        },
        [S2MPS11_IRQ_SMPL] = {
                .reg_offset = 1,
@@ -89,6 +90,76 @@ static const struct regmap_irq s2mps11_irqs[] = {
        },
 };
 
+static const struct regmap_irq s2mps14_irqs[] = {
+       [S2MPS14_IRQ_PWRONF] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_PWRONF_MASK,
+       },
+       [S2MPS14_IRQ_PWRONR] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_PWRONR_MASK,
+       },
+       [S2MPS14_IRQ_JIGONBF] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_JIGONBF_MASK,
+       },
+       [S2MPS14_IRQ_JIGONBR] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_JIGONBR_MASK,
+       },
+       [S2MPS14_IRQ_ACOKBF] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_ACOKBF_MASK,
+       },
+       [S2MPS14_IRQ_ACOKBR] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_ACOKBR_MASK,
+       },
+       [S2MPS14_IRQ_PWRON1S] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_PWRON1S_MASK,
+       },
+       [S2MPS14_IRQ_MRB] = {
+               .reg_offset = 0,
+               .mask = S2MPS11_IRQ_MRB_MASK,
+       },
+       [S2MPS14_IRQ_RTC60S] = {
+               .reg_offset = 1,
+               .mask = S2MPS11_IRQ_RTC60S_MASK,
+       },
+       [S2MPS14_IRQ_RTCA1] = {
+               .reg_offset = 1,
+               .mask = S2MPS11_IRQ_RTCA1_MASK,
+       },
+       [S2MPS14_IRQ_RTCA0] = {
+               .reg_offset = 1,
+               .mask = S2MPS11_IRQ_RTCA0_MASK,
+       },
+       [S2MPS14_IRQ_SMPL] = {
+               .reg_offset = 1,
+               .mask = S2MPS11_IRQ_SMPL_MASK,
+       },
+       [S2MPS14_IRQ_RTC1S] = {
+               .reg_offset = 1,
+               .mask = S2MPS11_IRQ_RTC1S_MASK,
+       },
+       [S2MPS14_IRQ_WTSR] = {
+               .reg_offset = 1,
+               .mask = S2MPS11_IRQ_WTSR_MASK,
+       },
+       [S2MPS14_IRQ_INT120C] = {
+               .reg_offset = 2,
+               .mask = S2MPS11_IRQ_INT120C_MASK,
+       },
+       [S2MPS14_IRQ_INT140C] = {
+               .reg_offset = 2,
+               .mask = S2MPS11_IRQ_INT140C_MASK,
+       },
+       [S2MPS14_IRQ_TSD] = {
+               .reg_offset = 2,
+               .mask = S2MPS14_IRQ_TSD_MASK,
+       },
+};
 
 static const struct regmap_irq s5m8767_irqs[] = {
        [S5M8767_IRQ_PWRR] = {
@@ -246,6 +317,16 @@ static const struct regmap_irq_chip s2mps11_irq_chip = {
        .ack_base = S2MPS11_REG_INT1,
 };
 
+static const struct regmap_irq_chip s2mps14_irq_chip = {
+       .name = "s2mps14",
+       .irqs = s2mps14_irqs,
+       .num_irqs = ARRAY_SIZE(s2mps14_irqs),
+       .num_regs = 3,
+       .status_base = S2MPS14_REG_INT1,
+       .mask_base = S2MPS14_REG_INT1M,
+       .ack_base = S2MPS14_REG_INT1,
+};
+
 static const struct regmap_irq_chip s5m8767_irq_chip = {
        .name = "s5m8767",
        .irqs = s5m8767_irqs,
@@ -297,6 +378,12 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
                                  sec_pmic->irq_base, &s2mps11_irq_chip,
                                  &sec_pmic->irq_data);
                break;
+       case S2MPS14X:
+               ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
+                                 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                 sec_pmic->irq_base, &s2mps14_irq_chip,
+                                 &sec_pmic->irq_data);
+               break;
        default:
                dev_err(sec_pmic->dev, "Unknown device type %d\n",
                        sec_pmic->device_type);
index 1e9a4b2102f9d89c9549e3d226cf7ef0c91866ef..bffc584e4a4355f25f565ae6d0aa77244e556238 100644 (file)
@@ -80,8 +80,7 @@ static const struct reg_default wm5102_revb_patch[] = {
 int wm5102_patch(struct arizona *arizona)
 {
        const struct reg_default *wm5102_patch;
-       int ret = 0;
-       int i, patch_size;
+       int patch_size;
 
        switch (arizona->rev) {
        case 0:
@@ -92,21 +91,9 @@ int wm5102_patch(struct arizona *arizona)
                patch_size = ARRAY_SIZE(wm5102_revb_patch);
        }
 
-       regcache_cache_bypass(arizona->regmap, true);
-
-       for (i = 0; i < patch_size; i++) {
-               ret = regmap_write(arizona->regmap, wm5102_patch[i].reg,
-                                  wm5102_patch[i].def);
-               if (ret != 0) {
-                       dev_err(arizona->dev, "Failed to write %x = %x: %d\n",
-                               wm5102_patch[i].reg, wm5102_patch[i].def, ret);
-                       goto out;
-               }
-       }
-
-out:
-       regcache_cache_bypass(arizona->regmap, false);
-       return ret;
+       return regmap_multi_reg_write_bypassed(arizona->regmap,
+                                              wm5102_patch,
+                                              patch_size);
 }
 
 static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
index 55cd110a49c4009451b0ca4b35144c8631b8c7d1..c204b7d1532c0f1c910b25a7435d3f12c8435732 100644 (file)
@@ -2607,7 +2607,7 @@ int dw_mci_probe(struct dw_mci *host)
 
        tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
        host->card_workqueue = alloc_workqueue("dw-mci-card",
-                       WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
+                       WQ_MEM_RECLAIM, 1);
        if (!host->card_workqueue) {
                ret = -ENOMEM;
                goto err_dmaunmap;
index d72783dd7b962f798f1256067e3d4d9f5a1c5366..c0670237e7a2ebb1c4b845735ac38c591465e815 100644 (file)
@@ -897,7 +897,7 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
                if (!flctl->qos_request) {
                        ret = dev_pm_qos_add_request(&flctl->pdev->dev,
                                                        &flctl->pm_qos,
-                                                       DEV_PM_QOS_LATENCY,
+                                                       DEV_PM_QOS_RESUME_LATENCY,
                                                        100);
                        if (ret < 0)
                                dev_err(&flctl->pdev->dev,
index 3b6d0ba86c714d34204bd359d61a54ece62cd139..70a225c8df5c846a4d7045f2e7649925a5d8c50c 100644 (file)
@@ -17649,8 +17649,6 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        tg3_init_bufmgr_config(tp);
 
-       features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
        /* 5700 B0 chips do not support checksumming correctly due
         * to hardware bugs.
         */
@@ -17682,7 +17680,8 @@ static int tg3_init_one(struct pci_dev *pdev,
                        features |= NETIF_F_TSO_ECN;
        }
 
-       dev->features |= features;
+       dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
+                        NETIF_F_HW_VLAN_CTAG_RX;
        dev->vlan_features |= features;
 
        /*
index f418f4f20f94a0f22524a6d5c1c878c2a40f7146..8d76fca7fde75085da8364a621e97e332f1e23bc 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
@@ -88,8 +89,9 @@
 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
-#define MVNETA_SGMII_SERDES_CFG                         0x24A0
+#define MVNETA_SERDES_CFG                       0x24A0
 #define      MVNETA_SGMII_SERDES_PROTO          0x0cc7
+#define      MVNETA_RGMII_SERDES_PROTO          0x0667
 #define MVNETA_TYPE_PRIO                         0x24bc
 #define      MVNETA_FORCE_UNI                    BIT(21)
 #define MVNETA_TXQ_CMD_1                         0x24e4
 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 #define MVNETA_GMAC_CTRL_2                       0x2c08
-#define      MVNETA_GMAC2_PSC_ENABLE             BIT(3)
+#define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 #define MVNETA_GMAC_STATUS                       0x2c10
@@ -710,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
        mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 }
 
-
-
-/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
-static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
-{
-       u32  val;
-
-       val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-
-       if (enable)
-               val |= MVNETA_GMAC2_PORT_RGMII;
-       else
-               val &= ~MVNETA_GMAC2_PORT_RGMII;
-
-       mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-}
-
-/* Config SGMII port */
-static void mvneta_port_sgmii_config(struct mvneta_port *pp)
-{
-       u32 val;
-
-       val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
-       val |= MVNETA_GMAC2_PSC_ENABLE;
-       mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
-
-       mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-}
-
 /* Start the Ethernet port RX and TX activity */
 static void mvneta_port_up(struct mvneta_port *pp)
 {
@@ -2756,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
        if (phy_mode == PHY_INTERFACE_MODE_SGMII)
-               mvneta_port_sgmii_config(pp);
+               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
+       else
+               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
+
+       val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
 
-       mvneta_gmac_rgmii_set(pp, 1);
+       val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
 
        /* Cancel Port Reset */
-       val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
        val &= ~MVNETA_GMAC2_PORT_RESET;
        mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
 
@@ -2774,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
 static int mvneta_probe(struct platform_device *pdev)
 {
        const struct mbus_dram_target_info *dram_target_info;
+       struct resource *res;
        struct device_node *dn = pdev->dev.of_node;
        struct device_node *phy_node;
        u32 phy_addr;
@@ -2838,9 +2815,15 @@ static int mvneta_probe(struct platform_device *pdev)
 
        clk_prepare_enable(pp->clk);
 
-       pp->base = of_iomap(dn, 0);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               err = -ENODEV;
+               goto err_clk;
+       }
+
+       pp->base = devm_ioremap_resource(&pdev->dev, res);
        if (pp->base == NULL) {
-               err = -ENOMEM;
+               err = PTR_ERR(pp->base);
                goto err_clk;
        }
 
@@ -2848,7 +2831,7 @@ static int mvneta_probe(struct platform_device *pdev)
        pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
        if (!pp->stats) {
                err = -ENOMEM;
-               goto err_unmap;
+               goto err_clk;
        }
 
        for_each_possible_cpu(cpu) {
@@ -2913,8 +2896,6 @@ err_deinit:
        mvneta_deinit(pp);
 err_free_stats:
        free_percpu(pp->stats);
-err_unmap:
-       iounmap(pp->base);
 err_clk:
        clk_disable_unprepare(pp->clk);
 err_free_irq:
@@ -2934,7 +2915,6 @@ static int mvneta_remove(struct platform_device *pdev)
        mvneta_deinit(pp);
        clk_disable_unprepare(pp->clk);
        free_percpu(pp->stats);
-       iounmap(pp->base);
        irq_dispose_mapping(dev->irq);
        free_netdev(dev);
 
index 936c15364739171993bfcb52598bd03f5be8afb6..d413e60071d47cd7e74d86652f5a0fb813cd5eb0 100644 (file)
@@ -2681,7 +2681,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
 
 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
-       int ret = __mlx4_init_one(pdev, 0);
+       const struct pci_device_id *id;
+       int ret;
+
+       id = pci_match_id(mlx4_pci_table, pdev);
+       ret = __mlx4_init_one(pdev, id->driver_data);
 
        return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
index ce2cfddbed504c1b960e32bc87691ff6965a1fce..656c65ddadb4af03ff032f8f2310ebb91a7f0189 100644 (file)
@@ -4765,7 +4765,9 @@ static int qlge_probe(struct pci_dev *pdev,
        ndev->features = ndev->hw_features;
        ndev->vlan_features = ndev->hw_features;
        /* vlan gets same features (except vlan filter) */
-       ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+       ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
+                                NETIF_F_HW_VLAN_CTAG_TX |
+                                NETIF_F_HW_VLAN_CTAG_RX);
 
        if (test_bit(QL_DMA64, &qdev->flags))
                ndev->features |= NETIF_F_HIGHDMA;
index c14d39bf32d06a6f8d1b42a29b36dcd84b25c8f6..d7b2e947184b549a5034e4a54423ecde9050d721 100644 (file)
@@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev)
        dev->tx_queue_len = TX_Q_LIMIT;
 
        dev->features |= IFB_FEATURES;
-       dev->vlan_features |= IFB_FEATURES;
+       dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
+                                              NETIF_F_HW_VLAN_STAG_TX);
 
        dev->flags |= IFF_NOARP;
        dev->flags &= ~IFF_MULTICAST;
index dd10d5817d2a975b414dc40bf0f4937b46c263be..f9e96c4275589ebc63b3d87432fe50471c287872 100644 (file)
@@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
 // precondition: never called in_interrupt
 static void usbnet_terminate_urbs(struct usbnet *dev)
 {
-       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
        DECLARE_WAITQUEUE(wait, current);
        int temp;
 
        /* ensure there are no more active urbs */
-       add_wait_queue(&unlink_wakeup, &wait);
+       add_wait_queue(&dev->wait, &wait);
        set_current_state(TASK_UNINTERRUPTIBLE);
-       dev->wait = &unlink_wakeup;
        temp = unlink_urbs(dev, &dev->txq) +
                unlink_urbs(dev, &dev->rxq);
 
@@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
                                  "waited for %d urb completions\n", temp);
        }
        set_current_state(TASK_RUNNING);
-       dev->wait = NULL;
-       remove_wait_queue(&unlink_wakeup, &wait);
+       remove_wait_queue(&dev->wait, &wait);
 }
 
 int usbnet_stop (struct net_device *net)
 {
        struct usbnet           *dev = netdev_priv(net);
        struct driver_info      *info = dev->driver_info;
-       int                     retval;
+       int                     retval, pm;
 
        clear_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_stop_queue (net);
@@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net)
                   net->stats.rx_packets, net->stats.tx_packets,
                   net->stats.rx_errors, net->stats.tx_errors);
 
+       /* to not race resume */
+       pm = usb_autopm_get_interface(dev->intf);
        /* allow minidriver to stop correctly (wireless devices to turn off
         * radio etc) */
        if (info->stop) {
@@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net)
        dev->flags = 0;
        del_timer_sync (&dev->delay);
        tasklet_kill (&dev->bh);
+       if (!pm)
+               usb_autopm_put_interface(dev->intf);
+
        if (info->manage_power &&
            !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
                info->manage_power(dev, 0);
@@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param)
        /* restart RX again after disabling due to high error rate */
        clear_bit(EVENT_RX_KILL, &dev->flags);
 
-       // waiting for all pending urbs to complete?
-       if (dev->wait) {
-               if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
-                       wake_up (dev->wait);
-               }
+       /* waiting for all pending urbs to complete?
+        * only then can we forgo submitting anew
+        */
+       if (waitqueue_active(&dev->wait)) {
+               if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
+                       wake_up_all(&dev->wait);
 
        // or are we maybe short a few urbs?
        } else if (netif_running (dev->net) &&
@@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        dev->driver_name = name;
        dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
                                | NETIF_MSG_PROBE | NETIF_MSG_LINK);
+       init_waitqueue_head(&dev->wait);
        skb_queue_head_init (&dev->rxq);
        skb_queue_head_init (&dev->txq);
        skb_queue_head_init (&dev->done);
@@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf)
                spin_unlock_irq(&dev->txq.lock);
 
                if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
-                       /* handle remote wakeup ASAP */
-                       if (!dev->wait &&
-                               netif_device_present(dev->net) &&
+                       /* handle remote wakeup ASAP
+                        * we cannot race against stop
+                        */
+                       if (netif_device_present(dev->net) &&
                                !timer_pending(&dev->delay) &&
                                !test_bit(EVENT_RX_HALT, &dev->flags))
                                        rx_alloc_submit(dev, GFP_NOIO);
index 5b374370f71c702d1eec4153c21038cc882795cd..c0e7c64765abd449070a7bce6826a994c674b8c7 100644 (file)
@@ -286,7 +286,10 @@ static void veth_setup(struct net_device *dev)
        dev->features |= NETIF_F_LLTX;
        dev->features |= VETH_FEATURES;
        dev->vlan_features = dev->features &
-                            ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
+                            ~(NETIF_F_HW_VLAN_CTAG_TX |
+                              NETIF_F_HW_VLAN_STAG_TX |
+                              NETIF_F_HW_VLAN_CTAG_RX |
+                              NETIF_F_HW_VLAN_STAG_RX);
        dev->destructor = veth_dev_free;
 
        dev->hw_features = VETH_FEATURES;
index 5632a99cbbd24fdd059c182e5b28d1d19b3ea4e9..841b60831df1b2e83c12f55ee5e8fc90874b9475 100644 (file)
@@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
                if (err)
                        break;
        } while (rq->vq->num_free);
-       if (unlikely(!virtqueue_kick(rq->vq)))
-               return false;
+       virtqueue_kick(rq->vq);
        return !oom;
 }
 
@@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        err = xmit_skb(sq, skb);
 
        /* This should not happen! */
-       if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
+       if (unlikely(err)) {
                dev->stats.tx_fifo_errors++;
                if (net_ratelimit())
                        dev_warn(&dev->dev,
@@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
                kfree_skb(skb);
                return NETDEV_TX_OK;
        }
+       virtqueue_kick(sq->vq);
 
        /* Don't wait up for transmitted skbs to be freed. */
        skb_orphan(skb);
index caddc1b427a919659200c539552a619af49c5d17..42a2e06512f2f0900ac7f9384b1e247013f2b11d 100644 (file)
@@ -764,7 +764,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
        /*
         * Overwrite TX done handler
         */
-       PREPARE_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
+       INIT_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone);
 
        return 0;
 }
index 89e888a78899e2b61281f7007406e5f937cc28a0..1b95a405628f311bf068891f958324f0b5edd0c8 100644 (file)
@@ -903,6 +903,38 @@ struct device_node *of_find_node_by_phandle(phandle handle)
 }
 EXPORT_SYMBOL(of_find_node_by_phandle);
 
+/**
+ * of_property_count_elems_of_size - Count the number of elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property in a device node and count the number of elements of
+ * size elem_size in it. Returns number of elements on sucess, -EINVAL if the
+ * property does not exist or its length does not match a multiple of elem_size
+ * and -ENODATA if the property does not have a value.
+ */
+int of_property_count_elems_of_size(const struct device_node *np,
+                               const char *propname, int elem_size)
+{
+       struct property *prop = of_find_property(np, propname, NULL);
+
+       if (!prop)
+               return -EINVAL;
+       if (!prop->value)
+               return -ENODATA;
+
+       if (prop->length % elem_size != 0) {
+               pr_err("size of %s in node %s is not a multiple of %d\n",
+                      propname, np->full_name, elem_size);
+               return -EINVAL;
+       }
+
+       return prop->length / elem_size;
+}
+EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
+
 /**
  * of_find_property_value_of_size
  *
index 17ce88f79d2b1ed3bbd351726f9d0af68be7d9ba..2e48ecf09e2c5a120d4b07879904a0384137982d 100644 (file)
@@ -294,14 +294,12 @@ no_valid_irq:
 static void clear_irq(unsigned int irq)
 {
        unsigned int pos, nvec;
-       struct irq_desc *desc;
        struct msi_desc *msi;
        struct pcie_port *pp;
        struct irq_data *data = irq_get_irq_data(irq);
 
        /* get the port structure */
-       desc = irq_to_desc(irq);
-       msi = irq_desc_get_msi_desc(desc);
+       msi = irq_data_get_msi(data);
        pp = sys_to_pcie(msi->dev->bus->sysdata);
        if (!pp) {
                BUG();
index b6162be4df40a2f6a614d983491037dd865b2c8d..2b859249303b8088352995bc23b57c47f7c36472 100644 (file)
@@ -93,7 +93,6 @@ struct acpiphp_slot {
        struct list_head funcs;         /* one slot may have different
                                           objects (i.e. for each function) */
        struct slot *slot;
-       struct mutex crit_sect;
 
        u8              device;         /* pci device# */
        u32             flags;          /* see below */
@@ -117,20 +116,30 @@ struct acpiphp_func {
 };
 
 struct acpiphp_context {
-       acpi_handle handle;
+       struct acpi_hotplug_context hp;
        struct acpiphp_func func;
        struct acpiphp_bridge *bridge;
        unsigned int refcount;
 };
 
+static inline struct acpiphp_context *to_acpiphp_context(struct acpi_hotplug_context *hp)
+{
+       return container_of(hp, struct acpiphp_context, hp);
+}
+
 static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
 {
        return container_of(func, struct acpiphp_context, func);
 }
 
+static inline struct acpi_device *func_to_acpi_device(struct acpiphp_func *func)
+{
+       return func_to_context(func)->hp.self;
+}
+
 static inline acpi_handle func_to_handle(struct acpiphp_func *func)
 {
-       return func_to_context(func)->handle;
+       return func_to_acpi_device(func)->handle;
 }
 
 /*
@@ -158,7 +167,6 @@ struct acpiphp_attention_info
 
 #define FUNC_HAS_STA           (0x00000001)
 #define FUNC_HAS_EJ0           (0x00000002)
-#define FUNC_HAS_DCK            (0x00000004)
 
 /* function prototypes */
 
index 7c7a388c85ab3679732f7971552790057abec4f5..828acf422c17eca4fe02cab5feca62e41e50908e 100644 (file)
 
 static LIST_HEAD(bridge_list);
 static DEFINE_MUTEX(bridge_mutex);
-static DEFINE_MUTEX(acpiphp_context_lock);
 
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
+static void acpiphp_post_dock_fixup(struct acpi_device *adev);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void hotplug_event(acpi_handle handle, u32 type, void *data);
+static void hotplug_event(u32 type, struct acpiphp_context *context);
 static void free_bridge(struct kref *kref);
 
-static void acpiphp_context_handler(acpi_handle handle, void *context)
-{
-       /* Intentionally empty. */
-}
-
 /**
  * acpiphp_init_context - Create hotplug context and grab a reference to it.
- * @handle: ACPI object handle to create the context for.
+ * @adev: ACPI device object to create the context for.
  *
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
  */
-static struct acpiphp_context *acpiphp_init_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev)
 {
        struct acpiphp_context *context;
-       acpi_status status;
 
        context = kzalloc(sizeof(*context), GFP_KERNEL);
        if (!context)
                return NULL;
 
-       context->handle = handle;
        context->refcount = 1;
-       status = acpi_attach_data(handle, acpiphp_context_handler, context);
-       if (ACPI_FAILURE(status)) {
-               kfree(context);
-               return NULL;
-       }
+       acpi_set_hp_context(adev, &context->hp, acpiphp_hotplug_notify, NULL,
+                           acpiphp_post_dock_fixup);
        return context;
 }
 
 /**
  * acpiphp_get_context - Get hotplug context and grab a reference to it.
- * @handle: ACPI object handle to get the context for.
+ * @adev: ACPI device object to get the context for.
  *
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
  */
-static struct acpiphp_context *acpiphp_get_context(acpi_handle handle)
+static struct acpiphp_context *acpiphp_get_context(struct acpi_device *adev)
 {
-       struct acpiphp_context *context = NULL;
-       acpi_status status;
-       void *data;
+       struct acpiphp_context *context;
 
-       status = acpi_get_data(handle, acpiphp_context_handler, &data);
-       if (ACPI_SUCCESS(status)) {
-               context = data;
-               context->refcount++;
-       }
+       if (!adev->hp)
+               return NULL;
+
+       context = to_acpiphp_context(adev->hp);
+       context->refcount++;
        return context;
 }
 
 /**
  * acpiphp_put_context - Drop a reference to ACPI hotplug context.
- * @handle: ACPI object handle to put the context for.
+ * @context: ACPI hotplug context to drop a reference to.
  *
  * The context object is removed if there are no more references to it.
  *
- * Call under acpiphp_context_lock.
+ * Call under acpi_hp_context_lock.
  */
 static void acpiphp_put_context(struct acpiphp_context *context)
 {
@@ -130,7 +118,7 @@ static void acpiphp_put_context(struct acpiphp_context *context)
                return;
 
        WARN_ON(context->bridge);
-       acpi_detach_data(context->handle, acpiphp_context_handler);
+       context->hp.self->hp = NULL;
        kfree(context);
 }
 
@@ -144,6 +132,27 @@ static inline void put_bridge(struct acpiphp_bridge *bridge)
        kref_put(&bridge->ref, free_bridge);
 }
 
+static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
+{
+       struct acpiphp_context *context;
+
+       acpi_lock_hp_context();
+       context = acpiphp_get_context(adev);
+       if (!context || context->func.parent->is_going_away) {
+               acpi_unlock_hp_context();
+               return NULL;
+       }
+       get_bridge(context->func.parent);
+       acpiphp_put_context(context);
+       acpi_unlock_hp_context();
+       return context;
+}
+
+static void acpiphp_let_context_go(struct acpiphp_context *context)
+{
+       put_bridge(context->func.parent);
+}
+
 static void free_bridge(struct kref *kref)
 {
        struct acpiphp_context *context;
@@ -151,7 +160,7 @@ static void free_bridge(struct kref *kref)
        struct acpiphp_slot *slot, *next;
        struct acpiphp_func *func, *tmp;
 
-       mutex_lock(&acpiphp_context_lock);
+       acpi_lock_hp_context();
 
        bridge = container_of(kref, struct acpiphp_bridge, ref);
 
@@ -175,31 +184,32 @@ static void free_bridge(struct kref *kref)
        pci_dev_put(bridge->pci_dev);
        kfree(bridge);
 
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 }
 
-/*
- * the _DCK method can do funny things... and sometimes not
- * hah-hah funny.
+/**
+ * acpiphp_post_dock_fixup - Post-dock fixups for PCI devices.
+ * @adev: ACPI device object corresponding to a PCI device.
  *
- * TBD - figure out a way to only call fixups for
- * systems that require them.
+ * TBD - figure out a way to only call fixups for systems that require them.
  */
-static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
+static void acpiphp_post_dock_fixup(struct acpi_device *adev)
 {
-       struct acpiphp_context *context = data;
-       struct pci_bus *bus = context->func.slot->bus;
+       struct acpiphp_context *context = acpiphp_grab_context(adev);
+       struct pci_bus *bus;
        u32 buses;
 
-       if (!bus->self)
+       if (!context)
                return;
 
+       bus = context->func.slot->bus;
+       if (!bus->self)
+               goto out;
+
        /* fixup bad _DCK function that rewrites
         * secondary bridge on slot
         */
-       pci_read_config_dword(bus->self,
-                       PCI_PRIMARY_BUS,
-                       &buses);
+       pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses);
 
        if (((buses >> 8) & 0xff) != bus->busn_res.start) {
                buses = (buses & 0xff000000)
@@ -208,33 +218,11 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
                        | ((unsigned int)(bus->busn_res.end) << 16);
                pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
        }
-}
-
-static void dock_event(acpi_handle handle, u32 type, void *data)
-{
-       struct acpiphp_context *context;
-
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_get_context(handle);
-       if (!context || WARN_ON(context->handle != handle)
-           || context->func.parent->is_going_away) {
-               mutex_unlock(&acpiphp_context_lock);
-               return;
-       }
-       get_bridge(context->func.parent);
-       acpiphp_put_context(context);
-       mutex_unlock(&acpiphp_context_lock);
-
-       hotplug_event(handle, type, data);
 
-       put_bridge(context->func.parent);
+ out:
+       acpiphp_let_context_go(context);
 }
 
-static const struct acpi_dock_ops acpiphp_dock_ops = {
-       .fixup = post_dock_fixups,
-       .handler = dock_event,
-};
-
 /* Check whether the PCI device is managed by native PCIe hotplug driver */
 static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
 {
@@ -264,26 +252,19 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
        return true;
 }
 
-static void acpiphp_dock_init(void *data)
-{
-       struct acpiphp_context *context = data;
-
-       get_bridge(context->func.parent);
-}
-
-static void acpiphp_dock_release(void *data)
-{
-       struct acpiphp_context *context = data;
-
-       put_bridge(context->func.parent);
-}
-
-/* callback routine to register each ACPI PCI slot object */
-static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
-                                void **rv)
+/**
+ * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
+ * @handle: ACPI handle of the object to add a context to.
+ * @lvl: Not used.
+ * @data: The object's parent ACPIPHP bridge.
+ * @rv: Not used.
+ */
+static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
+                                      void **rv)
 {
        struct acpiphp_bridge *bridge = data;
        struct acpiphp_context *context;
+       struct acpi_device *adev;
        struct acpiphp_slot *slot;
        struct acpiphp_func *newfunc;
        acpi_status status = AE_OK;
@@ -293,9 +274,6 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
        struct pci_dev *pdev = bridge->pci_dev;
        u32 val;
 
-       if (pdev && device_is_managed_by_native_pciehp(pdev))
-               return AE_OK;
-
        status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        if (ACPI_FAILURE(status)) {
                if (status != AE_NOT_FOUND)
@@ -303,31 +281,34 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                                "can't evaluate _ADR (%#x)\n", status);
                return AE_OK;
        }
+       if (acpi_bus_get_device(handle, &adev))
+               return AE_OK;
 
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
 
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_init_context(handle);
+       acpi_lock_hp_context();
+       context = acpiphp_init_context(adev);
        if (!context) {
-               mutex_unlock(&acpiphp_context_lock);
+               acpi_unlock_hp_context();
                acpi_handle_err(handle, "No hotplug context\n");
                return AE_NOT_EXIST;
        }
        newfunc = &context->func;
        newfunc->function = function;
        newfunc->parent = bridge;
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 
-       if (acpi_has_method(handle, "_EJ0"))
+       /*
+        * If this is a dock device, its _EJ0 should be executed by the dock
+        * notify handler after calling _DCK.
+        */
+       if (!is_dock_device(adev) && acpi_has_method(handle, "_EJ0"))
                newfunc->flags = FUNC_HAS_EJ0;
 
        if (acpi_has_method(handle, "_STA"))
                newfunc->flags |= FUNC_HAS_STA;
 
-       if (acpi_has_method(handle, "_DCK"))
-               newfunc->flags |= FUNC_HAS_DCK;
-
        /* search for objects that share the same slot */
        list_for_each_entry(slot, &bridge->slots, node)
                if (slot->device == device)
@@ -335,19 +316,26 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
 
        slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
        if (!slot) {
-               status = AE_NO_MEMORY;
-               goto err;
+               acpi_lock_hp_context();
+               acpiphp_put_context(context);
+               acpi_unlock_hp_context();
+               return AE_NO_MEMORY;
        }
 
        slot->bus = bridge->pci_bus;
        slot->device = device;
        INIT_LIST_HEAD(&slot->funcs);
-       mutex_init(&slot->crit_sect);
 
        list_add_tail(&slot->node, &bridge->slots);
 
-       /* Register slots for ejectable functions only. */
-       if (acpi_pci_check_ejectable(pbus, handle)  || is_dock_device(handle)) {
+       /*
+        * Expose slots to user space for functions that have _EJ0 or _RMV or
+        * are located in dock stations.  Do not expose them for devices handled
+        * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
+        * expose slots to user space in those cases.
+        */
+       if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
+           && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
                unsigned long long sun;
                int retval;
 
@@ -381,44 +369,16 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                                       &val, 60*1000))
                slot->flags |= SLOT_ENABLED;
 
-       if (is_dock_device(handle)) {
-               /* we don't want to call this device's _EJ0
-                * because we want the dock notify handler
-                * to call it after it calls _DCK
-                */
-               newfunc->flags &= ~FUNC_HAS_EJ0;
-               if (register_hotplug_dock_device(handle,
-                       &acpiphp_dock_ops, context,
-                       acpiphp_dock_init, acpiphp_dock_release))
-                       pr_debug("failed to register dock device\n");
-       }
-
-       /* install notify handler */
-       if (!(newfunc->flags & FUNC_HAS_DCK)) {
-               status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-                                                    handle_hotplug_event,
-                                                    context);
-               if (ACPI_FAILURE(status))
-                       acpi_handle_err(handle,
-                                       "failed to install notify handler\n");
-       }
-
        return AE_OK;
-
- err:
-       mutex_lock(&acpiphp_context_lock);
-       acpiphp_put_context(context);
-       mutex_unlock(&acpiphp_context_lock);
-       return status;
 }
 
-static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
+static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
 {
        struct acpiphp_context *context;
        struct acpiphp_bridge *bridge = NULL;
 
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_get_context(handle);
+       acpi_lock_hp_context();
+       context = acpiphp_get_context(adev);
        if (context) {
                bridge = context->bridge;
                if (bridge)
@@ -426,7 +386,7 @@ static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
 
                acpiphp_put_context(context);
        }
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
        return bridge;
 }
 
@@ -434,22 +394,15 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
 {
        struct acpiphp_slot *slot;
        struct acpiphp_func *func;
-       acpi_status status;
 
        list_for_each_entry(slot, &bridge->slots, node) {
                list_for_each_entry(func, &slot->funcs, sibling) {
-                       acpi_handle handle = func_to_handle(func);
-
-                       if (is_dock_device(handle))
-                               unregister_hotplug_dock_device(handle);
+                       struct acpi_device *adev = func_to_acpi_device(func);
 
-                       if (!(func->flags & FUNC_HAS_DCK)) {
-                               status = acpi_remove_notify_handler(handle,
-                                                       ACPI_SYSTEM_NOTIFY,
-                                                       handle_hotplug_event);
-                               if (ACPI_FAILURE(status))
-                                       pr_err("failed to remove notify handler\n");
-                       }
+                       acpi_lock_hp_context();
+                       adev->hp->notify = NULL;
+                       adev->hp->fixup = NULL;
+                       acpi_unlock_hp_context();
                }
                slot->flags |= SLOT_IS_GOING_AWAY;
                if (slot->slot)
@@ -460,9 +413,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
        list_del(&bridge->list);
        mutex_unlock(&bridge_mutex);
 
-       mutex_lock(&acpiphp_context_lock);
+       acpi_lock_hp_context();
        bridge->is_going_away = true;
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 }
 
 /**
@@ -492,33 +445,6 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
        return max;
 }
 
-/**
- * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree.
- * @handle: ACPI device object handle to start from.
- */
-static void acpiphp_bus_trim(acpi_handle handle)
-{
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_get_device(handle, &adev);
-       if (adev)
-               acpi_bus_trim(adev);
-}
-
-/**
- * acpiphp_bus_add - Scan ACPI namespace subtree.
- * @handle: ACPI object handle to start the scan from.
- */
-static void acpiphp_bus_add(acpi_handle handle)
-{
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_scan(handle);
-       acpi_bus_get_device(handle, &adev);
-       if (acpi_device_enumerated(adev))
-               acpi_device_set_power(adev, ACPI_STATE_D0);
-}
-
 static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
 {
        struct acpiphp_func *func;
@@ -558,9 +484,13 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
 {
        struct acpiphp_func *func;
 
-       list_for_each_entry(func, &slot->funcs, sibling)
-               acpiphp_bus_add(func_to_handle(func));
+       list_for_each_entry(func, &slot->funcs, sibling) {
+               struct acpi_device *adev = func_to_acpi_device(func);
 
+               acpi_bus_scan(adev->handle);
+               if (acpi_device_enumerated(adev))
+                       acpi_device_set_power(adev, ACPI_STATE_D0);
+       }
        return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
 }
 
@@ -625,32 +555,15 @@ static void __ref enable_slot(struct acpiphp_slot *slot)
        }
 }
 
-/* return first device in slot, acquiring a reference on it */
-static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
-{
-       struct pci_bus *bus = slot->bus;
-       struct pci_dev *dev;
-       struct pci_dev *ret = NULL;
-
-       down_read(&pci_bus_sem);
-       list_for_each_entry(dev, &bus->devices, bus_list)
-               if (PCI_SLOT(dev->devfn) == slot->device) {
-                       ret = pci_dev_get(dev);
-                       break;
-               }
-       up_read(&pci_bus_sem);
-
-       return ret;
-}
-
 /**
  * disable_slot - disable a slot
  * @slot: ACPI PHP slot
  */
 static void disable_slot(struct acpiphp_slot *slot)
 {
+       struct pci_bus *bus = slot->bus;
+       struct pci_dev *dev, *prev;
        struct acpiphp_func *func;
-       struct pci_dev *pdev;
 
        /*
         * enable_slot() enumerates all functions in this device via
@@ -658,22 +571,18 @@ static void disable_slot(struct acpiphp_slot *slot)
         * methods (_EJ0, etc.) or not.  Therefore, we remove all functions
         * here.
         */
-       while ((pdev = dev_in_slot(slot))) {
-               pci_stop_and_remove_bus_device(pdev);
-               pci_dev_put(pdev);
-       }
+       list_for_each_entry_safe_reverse(dev, prev, &bus->devices, bus_list)
+               if (PCI_SLOT(dev->devfn) == slot->device)
+                       pci_stop_and_remove_bus_device(dev);
 
        list_for_each_entry(func, &slot->funcs, sibling)
-               acpiphp_bus_trim(func_to_handle(func));
+               acpi_bus_trim(func_to_acpi_device(func));
 
        slot->flags &= (~SLOT_ENABLED);
 }
 
-static bool acpiphp_no_hotplug(acpi_handle handle)
+static bool acpiphp_no_hotplug(struct acpi_device *adev)
 {
-       struct acpi_device *adev = NULL;
-
-       acpi_bus_get_device(handle, &adev);
        return adev && adev->flags.no_hotplug;
 }
 
@@ -682,7 +591,7 @@ static bool slot_no_hotplug(struct acpiphp_slot *slot)
        struct acpiphp_func *func;
 
        list_for_each_entry(func, &slot->funcs, sibling)
-               if (acpiphp_no_hotplug(func_to_handle(func)))
+               if (acpiphp_no_hotplug(func_to_acpi_device(func)))
                        return true;
 
        return false;
@@ -747,28 +656,25 @@ static inline bool device_status_valid(unsigned int sta)
  */
 static void trim_stale_devices(struct pci_dev *dev)
 {
-       acpi_handle handle = ACPI_HANDLE(&dev->dev);
+       struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
        struct pci_bus *bus = dev->subordinate;
        bool alive = false;
 
-       if (handle) {
+       if (adev) {
                acpi_status status;
                unsigned long long sta;
 
-               status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+               status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
                alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
-                       || acpiphp_no_hotplug(handle);
+                       || acpiphp_no_hotplug(adev);
        }
-       if (!alive) {
-               u32 v;
+       if (!alive)
+               alive = pci_device_is_present(dev);
 
-               /* Check if the device responds. */
-               alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0);
-       }
        if (!alive) {
                pci_stop_and_remove_bus_device(dev);
-               if (handle)
-                       acpiphp_bus_trim(handle);
+               if (adev)
+                       acpi_bus_trim(adev);
        } else if (bus) {
                struct pci_dev *child, *tmp;
 
@@ -800,7 +706,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                struct pci_bus *bus = slot->bus;
                struct pci_dev *dev, *tmp;
 
-               mutex_lock(&slot->crit_sect);
                if (slot_no_hotplug(slot)) {
                        ; /* do nothing */
                } else if (device_status_valid(get_slot_status(slot))) {
@@ -815,7 +720,6 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                } else {
                        disable_slot(slot);
                }
-               mutex_unlock(&slot->crit_sect);
        }
 }
 
@@ -855,11 +759,11 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
  * ACPI event handlers
  */
 
-void acpiphp_check_host_bridge(acpi_handle handle)
+void acpiphp_check_host_bridge(struct acpi_device *adev)
 {
        struct acpiphp_bridge *bridge;
 
-       bridge = acpiphp_handle_to_bridge(handle);
+       bridge = acpiphp_dev_to_bridge(adev);
        if (bridge) {
                pci_lock_rescan_remove();
 
@@ -872,73 +776,52 @@ void acpiphp_check_host_bridge(acpi_handle handle)
 
 static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
 
-static void hotplug_event(acpi_handle handle, u32 type, void *data)
+static void hotplug_event(u32 type, struct acpiphp_context *context)
 {
-       struct acpiphp_context *context = data;
+       acpi_handle handle = context->hp.self->handle;
        struct acpiphp_func *func = &context->func;
+       struct acpiphp_slot *slot = func->slot;
        struct acpiphp_bridge *bridge;
-       char objname[64];
-       struct acpi_buffer buffer = { .length = sizeof(objname),
-                                     .pointer = objname };
 
-       mutex_lock(&acpiphp_context_lock);
+       acpi_lock_hp_context();
        bridge = context->bridge;
        if (bridge)
                get_bridge(bridge);
 
-       mutex_unlock(&acpiphp_context_lock);
+       acpi_unlock_hp_context();
 
        pci_lock_rescan_remove();
-       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
                /* bus re-enumerate */
-               pr_debug("%s: Bus check notify on %s\n", __func__, objname);
-               pr_debug("%s: re-enumerating slots under %s\n",
-                        __func__, objname);
-               if (bridge) {
+               acpi_handle_debug(handle, "Bus check in %s()\n", __func__);
+               if (bridge)
                        acpiphp_check_bridge(bridge);
-               } else {
-                       struct acpiphp_slot *slot = func->slot;
-
-                       if (slot->flags & SLOT_IS_GOING_AWAY)
-                               break;
-
-                       mutex_lock(&slot->crit_sect);
+               else if (!(slot->flags & SLOT_IS_GOING_AWAY))
                        enable_slot(slot);
-                       mutex_unlock(&slot->crit_sect);
-               }
+
                break;
 
        case ACPI_NOTIFY_DEVICE_CHECK:
                /* device check */
-               pr_debug("%s: Device check notify on %s\n", __func__, objname);
+               acpi_handle_debug(handle, "Device check in %s()\n", __func__);
                if (bridge) {
                        acpiphp_check_bridge(bridge);
-               } else {
-                       struct acpiphp_slot *slot = func->slot;
-                       int ret;
-
-                       if (slot->flags & SLOT_IS_GOING_AWAY)
-                               break;
-
+               } else if (!(slot->flags & SLOT_IS_GOING_AWAY)) {
                        /*
                         * Check if anything has changed in the slot and rescan
                         * from the parent if that's the case.
                         */
-                       mutex_lock(&slot->crit_sect);
-                       ret = acpiphp_rescan_slot(slot);
-                       mutex_unlock(&slot->crit_sect);
-                       if (ret)
+                       if (acpiphp_rescan_slot(slot))
                                acpiphp_check_bridge(func->parent);
                }
                break;
 
        case ACPI_NOTIFY_EJECT_REQUEST:
                /* request device eject */
-               pr_debug("%s: Device eject notify on %s\n", __func__, objname);
-               acpiphp_disable_and_eject_slot(func->slot);
+               acpi_handle_debug(handle, "Eject request in %s()\n", __func__);
+               acpiphp_disable_and_eject_slot(slot);
                break;
        }
 
@@ -947,106 +830,41 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
                put_bridge(bridge);
 }
 
-static void hotplug_event_work(void *data, u32 type)
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type)
 {
-       struct acpiphp_context *context = data;
-       acpi_handle handle = context->handle;
-
-       acpi_scan_lock_acquire();
+       struct acpiphp_context *context;
 
-       hotplug_event(handle, type, context);
+       context = acpiphp_grab_context(adev);
+       if (!context)
+               return -ENODATA;
 
-       acpi_scan_lock_release();
-       acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
-       put_bridge(context->func.parent);
+       hotplug_event(type, context);
+       acpiphp_let_context_go(context);
+       return 0;
 }
 
 /**
- * handle_hotplug_event - handle ACPI hotplug event
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @data: pointer to acpiphp_context structure
+ * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus.
+ * @bus: PCI bus to enumerate the slots for.
  *
- * Handles ACPI event notification on slots.
- */
-static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
-{
-       struct acpiphp_context *context;
-       u32 ost_code = ACPI_OST_SC_SUCCESS;
-       acpi_status status;
-
-       switch (type) {
-       case ACPI_NOTIFY_BUS_CHECK:
-       case ACPI_NOTIFY_DEVICE_CHECK:
-               break;
-       case ACPI_NOTIFY_EJECT_REQUEST:
-               ost_code = ACPI_OST_SC_EJECT_IN_PROGRESS;
-               acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-               break;
-
-       case ACPI_NOTIFY_DEVICE_WAKE:
-               return;
-
-       case ACPI_NOTIFY_FREQUENCY_MISMATCH:
-               acpi_handle_err(handle, "Device cannot be configured due "
-                               "to a frequency mismatch\n");
-               goto out;
-
-       case ACPI_NOTIFY_BUS_MODE_MISMATCH:
-               acpi_handle_err(handle, "Device cannot be configured due "
-                               "to a bus mode mismatch\n");
-               goto out;
-
-       case ACPI_NOTIFY_POWER_FAULT:
-               acpi_handle_err(handle, "Device has suffered a power fault\n");
-               goto out;
-
-       default:
-               acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
-               ost_code = ACPI_OST_SC_UNRECOGNIZED_NOTIFY;
-               goto out;
-       }
-
-       mutex_lock(&acpiphp_context_lock);
-       context = acpiphp_get_context(handle);
-       if (!context || WARN_ON(context->handle != handle)
-           || context->func.parent->is_going_away)
-               goto err_out;
-
-       get_bridge(context->func.parent);
-       acpiphp_put_context(context);
-       status = acpi_hotplug_execute(hotplug_event_work, context, type);
-       if (ACPI_SUCCESS(status)) {
-               mutex_unlock(&acpiphp_context_lock);
-               return;
-       }
-       put_bridge(context->func.parent);
-
- err_out:
-       mutex_unlock(&acpiphp_context_lock);
-       ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
-
- out:
-       acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-}
-
-/*
- * Create hotplug slots for the PCI bus.
- * It should always return 0 to avoid skipping following notifiers.
+ * A "slot" is an object associated with a PCI device number.  All functions
+ * (PCI devices) with the same bus and device number belong to the same slot.
  */
 void acpiphp_enumerate_slots(struct pci_bus *bus)
 {
        struct acpiphp_bridge *bridge;
+       struct acpi_device *adev;
        acpi_handle handle;
        acpi_status status;
 
        if (acpiphp_disabled)
                return;
 
-       handle = ACPI_HANDLE(bus->bridge);
-       if (!handle)
+       adev = ACPI_COMPANION(bus->bridge);
+       if (!adev)
                return;
 
+       handle = adev->handle;
        bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
        if (!bridge) {
                acpi_handle_err(handle, "No memory for bridge object\n");
@@ -1074,10 +892,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
                 * parent is going to be handled by pciehp, in which case this
                 * bridge is not interesting to us either.
                 */
-               mutex_lock(&acpiphp_context_lock);
-               context = acpiphp_get_context(handle);
+               acpi_lock_hp_context();
+               context = acpiphp_get_context(adev);
                if (!context) {
-                       mutex_unlock(&acpiphp_context_lock);
+                       acpi_unlock_hp_context();
                        put_device(&bus->dev);
                        pci_dev_put(bridge->pci_dev);
                        kfree(bridge);
@@ -1087,17 +905,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
                context->bridge = bridge;
                /* Get a reference to the parent bridge. */
                get_bridge(context->func.parent);
-               mutex_unlock(&acpiphp_context_lock);
+               acpi_unlock_hp_context();
        }
 
-       /* must be added to the list prior to calling register_slot */
+       /* Must be added to the list prior to calling acpiphp_add_context(). */
        mutex_lock(&bridge_mutex);
        list_add(&bridge->list, &bridge_list);
        mutex_unlock(&bridge_mutex);
 
        /* register all slot objects under this bridge */
        status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
-                                    register_slot, NULL, bridge, NULL);
+                                    acpiphp_add_context, NULL, bridge, NULL);
        if (ACPI_FAILURE(status)) {
                acpi_handle_err(handle, "failed to register slots\n");
                cleanup_bridge(bridge);
@@ -1105,7 +923,10 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
        }
 }
 
-/* Destroy hotplug slots associated with the PCI bus */
+/**
+ * acpiphp_remove_slots - Remove slot objects associated with a given bus.
+ * @bus: PCI bus to remove the slot objects for.
+ */
 void acpiphp_remove_slots(struct pci_bus *bus)
 {
        struct acpiphp_bridge *bridge;
@@ -1136,13 +957,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
        if (slot->flags & SLOT_IS_GOING_AWAY)
                return -ENODEV;
 
-       mutex_lock(&slot->crit_sect);
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
                enable_slot(slot);
 
-       mutex_unlock(&slot->crit_sect);
-
        pci_unlock_rescan_remove();
        return 0;
 }
@@ -1158,8 +976,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
        if (slot->flags & SLOT_IS_GOING_AWAY)
                return -ENODEV;
 
-       mutex_lock(&slot->crit_sect);
-
        /* unconfigure all functions */
        disable_slot(slot);
 
@@ -1173,7 +989,6 @@ static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
                        break;
                }
 
-       mutex_unlock(&slot->crit_sect);
        return 0;
 }
 
@@ -1181,9 +996,15 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
 {
        int ret;
 
+       /*
+        * Acquire acpi_scan_lock to ensure that the execution of _EJ0 in
+        * acpiphp_disable_and_eject_slot() will be synchronized properly.
+        */
+       acpi_scan_lock_acquire();
        pci_lock_rescan_remove();
        ret = acpiphp_disable_and_eject_slot(slot);
        pci_unlock_rescan_remove();
+       acpi_scan_lock_release();
        return ret;
 }
 
index 25f0bc6591645707bb3b452bbaa4c28bee0efc59..d911e0c1f359799ef4f110b59b2f68516868d153 100644 (file)
@@ -616,15 +616,11 @@ static int pci_pm_prepare(struct device *dev)
        int error = 0;
 
        /*
-        * PCI devices suspended at run time need to be resumed at this
-        * point, because in general it is necessary to reconfigure them for
-        * system suspend.  Namely, if the device is supposed to wake up the
-        * system from the sleep state, we may need to reconfigure it for this
-        * purpose.  In turn, if the device is not supposed to wake up the
-        * system from the sleep state, we'll have to prevent it from signaling
-        * wake-up.
+        * Devices having power.ignore_children set may still be necessary for
+        * suspending their children in the next phase of device suspend.
         */
-       pm_runtime_resume(dev);
+       if (dev->power.ignore_children)
+               pm_runtime_resume(dev);
 
        if (drv && drv->pm && drv->pm->prepare)
                error = drv->pm->prepare(dev);
@@ -654,6 +650,16 @@ static int pci_pm_suspend(struct device *dev)
                goto Fixup;
        }
 
+       /*
+        * PCI devices suspended at run time need to be resumed at this point,
+        * because in general it is necessary to reconfigure them for system
+        * suspend.  Namely, if the device is supposed to wake up the system
+        * from the sleep state, we may need to reconfigure it for this purpose.
+        * In turn, if the device is not supposed to wake up the system from the
+        * sleep state, we'll have to prevent it from signaling wake-up.
+        */
+       pm_runtime_resume(dev);
+
        pci_dev->state_saved = false;
        if (pm->suspend) {
                pci_power_t prev = pci_dev->current_state;
@@ -808,6 +814,14 @@ static int pci_pm_freeze(struct device *dev)
                return 0;
        }
 
+       /*
+        * This used to be done in pci_pm_prepare() for all devices and some
+        * drivers may depend on it, so do it here.  Ideally, runtime-suspended
+        * devices should not be touched during freeze/thaw transitions,
+        * however.
+        */
+       pm_runtime_resume(dev);
+
        pci_dev->state_saved = false;
        if (pm->freeze) {
                int error;
@@ -915,6 +929,9 @@ static int pci_pm_poweroff(struct device *dev)
                goto Fixup;
        }
 
+       /* The reason to do that is the same as in pci_pm_suspend(). */
+       pm_runtime_resume(dev);
+
        pci_dev->state_saved = false;
        if (pm->poweroff) {
                int error;
index 6eecd7cddf5796b681b224e45a52a77476884d39..54d3089d157b628f35705cc15e311ed977894de6 100644 (file)
@@ -125,9 +125,6 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
                if (freqs->new < freqs->old)
                        sa1100_pcmcia_set_mecr(skt, freqs->new);
                break;
-       case CPUFREQ_RESUMECHANGE:
-               sa1100_pcmcia_set_mecr(skt, freqs->new);
-               break;
        }
 
        return 0;
index 1e4e69384baaed11ae859ce6a784eef0d8fa0f9c..06cee0189f3e4134cca8953519b742f999d92634 100644 (file)
@@ -224,7 +224,7 @@ config PINCTRL_MSM
 
 config PINCTRL_MSM8X74
        tristate "Qualcomm 8x74 pin controller driver"
-       depends on GPIOLIB && OF && OF_IRQ
+       depends on GPIOLIB && OF
        select PINCTRL_MSM
        help
          This is the pinctrl, pinmux, pinconf and gpiolib driver for the
index 340fb4e6c600393f0504e6d6cd5f7adc9cbad29e..eda13de2e7c0d110f5e105a84b5b19da93cadc36 100644 (file)
@@ -186,7 +186,9 @@ int pinctrl_dt_to_map(struct pinctrl *p)
 
        /* CONFIG_OF enabled, p->dev not instantiated from DT */
        if (!np) {
-               dev_dbg(p->dev, "no of_node; not parsing pinctrl DT\n");
+               if (of_have_populated_dt())
+                       dev_dbg(p->dev,
+                               "no of_node; not parsing pinctrl DT\n");
                return 0;
        }
 
index 366fa541ee9121591e78f66c7ec4307a5d415203..cc298fade93a79e359eb43c2daac3ef50bc91cc1 100644 (file)
@@ -8,6 +8,7 @@ config PINCTRL_MVEBU
 config PINCTRL_DOVE
        bool
        select PINCTRL_MVEBU
+       select MFD_SYSCON
 
 config PINCTRL_KIRKWOOD
        bool
@@ -17,6 +18,14 @@ config PINCTRL_ARMADA_370
        bool
        select PINCTRL_MVEBU
 
+config PINCTRL_ARMADA_375
+       bool
+       select PINCTRL_MVEBU
+
+config PINCTRL_ARMADA_38X
+       bool
+       select PINCTRL_MVEBU
+
 config PINCTRL_ARMADA_XP
        bool
        select PINCTRL_MVEBU
index 37c253297af004767252ec5c26eb8a4c75f19ef4..bc1b9f14f539a23c4e4a97312f03ae9730ac482a 100644 (file)
@@ -2,4 +2,6 @@ obj-$(CONFIG_PINCTRL_MVEBU)     += pinctrl-mvebu.o
 obj-$(CONFIG_PINCTRL_DOVE)     += pinctrl-dove.o
 obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o
 obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o
+obj-$(CONFIG_PINCTRL_ARMADA_375) += pinctrl-armada-375.o
+obj-$(CONFIG_PINCTRL_ARMADA_38X) += pinctrl-armada-38x.o
 obj-$(CONFIG_PINCTRL_ARMADA_XP)  += pinctrl-armada-xp.o
index ae1f760cbdd2b28cae4c0aa17f2fae0cba0fd50f..670e5b01c6781b3b97245f8b83f72576eeb020af 100644 (file)
 
 #include "pinctrl-mvebu.h"
 
+static void __iomem *mpp_base;
+
+static int armada_370_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+       return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_370_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+       return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
 static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
        MPP_MODE(0,
           MPP_FUNCTION(0x0, "gpio", NULL),
@@ -373,7 +385,7 @@ static struct of_device_id armada_370_pinctrl_of_match[] = {
 };
 
 static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = {
-       MPP_REG_CTRL(0, 65),
+       MPP_FUNC_CTRL(0, 65, NULL, armada_370_mpp_ctrl),
 };
 
 static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
@@ -385,6 +397,12 @@ static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = {
 static int armada_370_pinctrl_probe(struct platform_device *pdev)
 {
        struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info;
+       struct resource *res;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mpp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mpp_base))
+               return PTR_ERR(mpp_base);
 
        soc->variant = 0; /* no variants for Armada 370 */
        soc->controls = mv88f6710_mpp_controls;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
new file mode 100644 (file)
index 0000000..db078fe
--- /dev/null
@@ -0,0 +1,459 @@
+/*
+ * Marvell Armada 375 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static void __iomem *mpp_base;
+
+static int armada_375_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+       return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_375_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+       return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
+       MPP_MODE(0,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad2"),
+                MPP_FUNCTION(0x2, "spi0", "cs1"),
+                MPP_FUNCTION(0x3, "spi1", "cs1"),
+                MPP_FUNCTION(0x5, "nand", "io2")),
+       MPP_MODE(1,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad3"),
+                MPP_FUNCTION(0x2, "spi0", "mosi"),
+                MPP_FUNCTION(0x3, "spi1", "mosi"),
+                MPP_FUNCTION(0x5, "nand", "io3")),
+       MPP_MODE(2,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad4"),
+                MPP_FUNCTION(0x2, "ptp", "eventreq"),
+                MPP_FUNCTION(0x3, "led", "c0"),
+                MPP_FUNCTION(0x4, "audio", "sdi"),
+                MPP_FUNCTION(0x5, "nand", "io4"),
+                MPP_FUNCTION(0x6, "spi1", "mosi")),
+       MPP_MODE(3,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad5"),
+                MPP_FUNCTION(0x2, "ptp", "triggen"),
+                MPP_FUNCTION(0x3, "led", "p3"),
+                MPP_FUNCTION(0x4, "audio", "mclk"),
+                MPP_FUNCTION(0x5, "nand", "io5"),
+                MPP_FUNCTION(0x6, "spi1", "miso")),
+       MPP_MODE(4,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad6"),
+                MPP_FUNCTION(0x2, "spi0", "miso"),
+                MPP_FUNCTION(0x3, "spi1", "miso"),
+                MPP_FUNCTION(0x5, "nand", "io6")),
+       MPP_MODE(5,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad7"),
+                MPP_FUNCTION(0x2, "spi0", "cs2"),
+                MPP_FUNCTION(0x3, "spi1", "cs2"),
+                MPP_FUNCTION(0x5, "nand", "io7"),
+                MPP_FUNCTION(0x6, "spi1", "miso")),
+       MPP_MODE(6,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad0"),
+                MPP_FUNCTION(0x3, "led", "p1"),
+                MPP_FUNCTION(0x4, "audio", "rclk"),
+                MPP_FUNCTION(0x5, "nand", "io0")),
+       MPP_MODE(7,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "ad1"),
+                MPP_FUNCTION(0x2, "ptp", "clk"),
+                MPP_FUNCTION(0x3, "led", "p2"),
+                MPP_FUNCTION(0x4, "audio", "extclk"),
+                MPP_FUNCTION(0x5, "nand", "io1")),
+       MPP_MODE(8,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev ", "bootcs"),
+                MPP_FUNCTION(0x2, "spi0", "cs0"),
+                MPP_FUNCTION(0x3, "spi1", "cs0"),
+                MPP_FUNCTION(0x5, "nand", "ce")),
+       MPP_MODE(9,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "nf", "wen"),
+                MPP_FUNCTION(0x2, "spi0", "sck"),
+                MPP_FUNCTION(0x3, "spi1", "sck"),
+                MPP_FUNCTION(0x5, "nand", "we")),
+       MPP_MODE(10,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "nf", "ren"),
+                MPP_FUNCTION(0x2, "dram", "vttctrl"),
+                MPP_FUNCTION(0x3, "led", "c1"),
+                MPP_FUNCTION(0x5, "nand", "re"),
+                MPP_FUNCTION(0x6, "spi1", "sck")),
+       MPP_MODE(11,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "a0"),
+                MPP_FUNCTION(0x3, "led", "c2"),
+                MPP_FUNCTION(0x4, "audio", "sdo"),
+                MPP_FUNCTION(0x5, "nand", "cle")),
+       MPP_MODE(12,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "a1"),
+                MPP_FUNCTION(0x4, "audio", "bclk"),
+                MPP_FUNCTION(0x5, "nand", "ale")),
+       MPP_MODE(13,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "dev", "readyn"),
+                MPP_FUNCTION(0x2, "pcie0", "rstoutn"),
+                MPP_FUNCTION(0x3, "pcie1", "rstoutn"),
+                MPP_FUNCTION(0x5, "nand", "rb"),
+                MPP_FUNCTION(0x6, "spi1", "mosi")),
+       MPP_MODE(14,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "i2c0", "sda"),
+                MPP_FUNCTION(0x3, "uart1", "txd")),
+       MPP_MODE(15,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "i2c0", "sck"),
+                MPP_FUNCTION(0x3, "uart1", "rxd")),
+       MPP_MODE(16,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "uart0", "txd")),
+       MPP_MODE(17,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "uart0", "rxd")),
+       MPP_MODE(18,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "tdm", "intn")),
+       MPP_MODE(19,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "tdm", "rstn")),
+       MPP_MODE(20,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "tdm", "pclk")),
+       MPP_MODE(21,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "tdm", "fsync")),
+       MPP_MODE(22,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "tdm", "drx")),
+       MPP_MODE(23,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "tdm", "dtx")),
+       MPP_MODE(24,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "p0"),
+                MPP_FUNCTION(0x2, "ge1", "rxd0"),
+                MPP_FUNCTION(0x3, "sd", "cmd"),
+                MPP_FUNCTION(0x4, "uart0", "rts"),
+                MPP_FUNCTION(0x5, "spi0", "cs0"),
+                MPP_FUNCTION(0x6, "dev", "cs1")),
+       MPP_MODE(25,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "p2"),
+                MPP_FUNCTION(0x2, "ge1", "rxd1"),
+                MPP_FUNCTION(0x3, "sd", "d0"),
+                MPP_FUNCTION(0x4, "uart0", "cts"),
+                MPP_FUNCTION(0x5, "spi0", "mosi"),
+                MPP_FUNCTION(0x6, "dev", "cs2")),
+       MPP_MODE(26,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+                MPP_FUNCTION(0x2, "ge1", "rxd2"),
+                MPP_FUNCTION(0x3, "sd", "d2"),
+                MPP_FUNCTION(0x4, "uart1", "rts"),
+                MPP_FUNCTION(0x5, "spi0", "cs1"),
+                MPP_FUNCTION(0x6, "led", "c1")),
+       MPP_MODE(27,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+                MPP_FUNCTION(0x2, "ge1", "rxd3"),
+                MPP_FUNCTION(0x3, "sd", "d1"),
+                MPP_FUNCTION(0x4, "uart1", "cts"),
+                MPP_FUNCTION(0x5, "spi0", "miso"),
+                MPP_FUNCTION(0x6, "led", "c2")),
+       MPP_MODE(28,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "p3"),
+                MPP_FUNCTION(0x2, "ge1", "txctl"),
+                MPP_FUNCTION(0x3, "sd", "clk"),
+                MPP_FUNCTION(0x5, "dram", "vttctrl")),
+       MPP_MODE(29,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+                MPP_FUNCTION(0x2, "ge1", "rxclk"),
+                MPP_FUNCTION(0x3, "sd", "d3"),
+                MPP_FUNCTION(0x5, "spi0", "sck"),
+                MPP_FUNCTION(0x6, "pcie0", "rstoutn")),
+       MPP_MODE(30,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge1", "txd0"),
+                MPP_FUNCTION(0x3, "spi1", "cs0"),
+                MPP_FUNCTION(0x5, "led", "p3"),
+                MPP_FUNCTION(0x6, "ptp", "eventreq")),
+       MPP_MODE(31,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge1", "txd1"),
+                MPP_FUNCTION(0x3, "spi1", "mosi"),
+                MPP_FUNCTION(0x5, "led", "p0")),
+       MPP_MODE(32,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge1", "txd2"),
+                MPP_FUNCTION(0x3, "spi1", "sck"),
+                MPP_FUNCTION(0x4, "ptp", "triggen"),
+                MPP_FUNCTION(0x5, "led", "c0")),
+       MPP_MODE(33,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge1", "txd3"),
+                MPP_FUNCTION(0x3, "spi1", "miso"),
+                MPP_FUNCTION(0x5, "led", "p2")),
+       MPP_MODE(34,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge1", "txclkout"),
+                MPP_FUNCTION(0x3, "spi1", "sck"),
+                MPP_FUNCTION(0x5, "led", "c1")),
+       MPP_MODE(35,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge1", "rxctl"),
+                MPP_FUNCTION(0x3, "spi1", "cs1"),
+                MPP_FUNCTION(0x4, "spi0", "cs2"),
+                MPP_FUNCTION(0x5, "led", "p1")),
+       MPP_MODE(36,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+                MPP_FUNCTION(0x5, "led", "c2")),
+       MPP_MODE(37,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+                MPP_FUNCTION(0x2, "tdm", "intn"),
+                MPP_FUNCTION(0x4, "ge", "mdc")),
+       MPP_MODE(38,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+                MPP_FUNCTION(0x4, "ge", "mdio")),
+       MPP_MODE(39,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x4, "ref", "clkout"),
+                MPP_FUNCTION(0x5, "led", "p3")),
+       MPP_MODE(40,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x4, "uart1", "txd"),
+                MPP_FUNCTION(0x5, "led", "p0")),
+       MPP_MODE(41,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x4, "uart1", "rxd"),
+                MPP_FUNCTION(0x5, "led", "p1")),
+       MPP_MODE(42,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x3, "spi1", "cs2"),
+                MPP_FUNCTION(0x4, "led", "c0"),
+                MPP_FUNCTION(0x6, "ptp", "clk")),
+       MPP_MODE(43,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "sata0", "prsnt"),
+                MPP_FUNCTION(0x4, "dram", "vttctrl"),
+                MPP_FUNCTION(0x5, "led", "c1")),
+       MPP_MODE(44,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x4, "sata0", "prsnt")),
+       MPP_MODE(45,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "spi0", "cs2"),
+                MPP_FUNCTION(0x4, "pcie0", "rstoutn"),
+                MPP_FUNCTION(0x5, "led", "c2"),
+                MPP_FUNCTION(0x6, "spi1", "cs2")),
+       MPP_MODE(46,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "p0"),
+                MPP_FUNCTION(0x2, "ge0", "txd0"),
+                MPP_FUNCTION(0x3, "ge1", "txd0"),
+                MPP_FUNCTION(0x6, "dev", "wen1")),
+       MPP_MODE(47,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "p1"),
+                MPP_FUNCTION(0x2, "ge0", "txd1"),
+                MPP_FUNCTION(0x3, "ge1", "txd1"),
+                MPP_FUNCTION(0x5, "ptp", "triggen"),
+                MPP_FUNCTION(0x6, "dev", "ale0")),
+       MPP_MODE(48,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "p2"),
+                MPP_FUNCTION(0x2, "ge0", "txd2"),
+                MPP_FUNCTION(0x3, "ge1", "txd2"),
+                MPP_FUNCTION(0x6, "dev", "ale1")),
+       MPP_MODE(49,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "p3"),
+                MPP_FUNCTION(0x2, "ge0", "txd3"),
+                MPP_FUNCTION(0x3, "ge1", "txd3"),
+                MPP_FUNCTION(0x6, "dev", "a2")),
+       MPP_MODE(50,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "c0"),
+                MPP_FUNCTION(0x2, "ge0", "rxd0"),
+                MPP_FUNCTION(0x3, "ge1", "rxd0"),
+                MPP_FUNCTION(0x5, "ptp", "eventreq"),
+                MPP_FUNCTION(0x6, "dev", "ad12")),
+       MPP_MODE(51,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "c1"),
+                MPP_FUNCTION(0x2, "ge0", "rxd1"),
+                MPP_FUNCTION(0x3, "ge1", "rxd1"),
+                MPP_FUNCTION(0x6, "dev", "ad8")),
+       MPP_MODE(52,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "led", "c2"),
+                MPP_FUNCTION(0x2, "ge0", "rxd2"),
+                MPP_FUNCTION(0x3, "ge1", "rxd2"),
+                MPP_FUNCTION(0x5, "i2c0", "sda"),
+                MPP_FUNCTION(0x6, "dev", "ad9")),
+       MPP_MODE(53,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie1", "rstoutn"),
+                MPP_FUNCTION(0x2, "ge0", "rxd3"),
+                MPP_FUNCTION(0x3, "ge1", "rxd3"),
+                MPP_FUNCTION(0x5, "i2c0", "sck"),
+                MPP_FUNCTION(0x6, "dev", "ad10")),
+       MPP_MODE(54,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "pcie0", "rstoutn"),
+                MPP_FUNCTION(0x2, "ge0", "rxctl"),
+                MPP_FUNCTION(0x3, "ge1", "rxctl"),
+                MPP_FUNCTION(0x6, "dev", "ad11")),
+       MPP_MODE(55,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge0", "rxclk"),
+                MPP_FUNCTION(0x3, "ge1", "rxclk"),
+                MPP_FUNCTION(0x6, "dev", "cs0")),
+       MPP_MODE(56,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge0", "txclkout"),
+                MPP_FUNCTION(0x3, "ge1", "txclkout"),
+                MPP_FUNCTION(0x6, "dev", "oe")),
+       MPP_MODE(57,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ge0", "txctl"),
+                MPP_FUNCTION(0x3, "ge1", "txctl"),
+                MPP_FUNCTION(0x6, "dev", "wen0")),
+       MPP_MODE(58,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x4, "led", "c0")),
+       MPP_MODE(59,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x4, "led", "c1")),
+       MPP_MODE(60,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "uart1", "txd"),
+                MPP_FUNCTION(0x4, "led", "c2"),
+                MPP_FUNCTION(0x6, "dev", "ad13")),
+       MPP_MODE(61,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "i2c1", "sda"),
+                MPP_FUNCTION(0x2, "uart1", "rxd"),
+                MPP_FUNCTION(0x3, "spi1", "cs2"),
+                MPP_FUNCTION(0x4, "led", "p0"),
+                MPP_FUNCTION(0x6, "dev", "ad14")),
+       MPP_MODE(62,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "i2c1", "sck"),
+                MPP_FUNCTION(0x4, "led", "p1"),
+                MPP_FUNCTION(0x6, "dev", "ad15")),
+       MPP_MODE(63,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ptp", "triggen"),
+                MPP_FUNCTION(0x4, "led", "p2"),
+                MPP_FUNCTION(0x6, "dev", "burst")),
+       MPP_MODE(64,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "dram", "vttctrl"),
+                MPP_FUNCTION(0x4, "led", "p3")),
+       MPP_MODE(65,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x1, "sata1", "prsnt")),
+       MPP_MODE(66,
+                MPP_FUNCTION(0x0, "gpio", NULL),
+                MPP_FUNCTION(0x2, "ptp", "eventreq"),
+                MPP_FUNCTION(0x4, "spi1", "cs3"),
+                MPP_FUNCTION(0x5, "pcie0", "rstoutn"),
+                MPP_FUNCTION(0x6, "dev", "cs3")),
+};
+
+static struct mvebu_pinctrl_soc_info armada_375_pinctrl_info;
+
+static struct of_device_id armada_375_pinctrl_of_match[] = {
+       { .compatible = "marvell,mv88f6720-pinctrl" },
+       { },
+};
+
+static struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
+       MPP_FUNC_CTRL(0, 69, NULL, armada_375_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
+       MPP_GPIO_RANGE(0,   0,  0, 32),
+       MPP_GPIO_RANGE(1,  32, 32, 32),
+       MPP_GPIO_RANGE(2,  64, 64,  3),
+};
+
+static int armada_375_pinctrl_probe(struct platform_device *pdev)
+{
+       struct mvebu_pinctrl_soc_info *soc = &armada_375_pinctrl_info;
+       struct resource *res;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mpp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mpp_base))
+               return PTR_ERR(mpp_base);
+
+       soc->variant = 0; /* no variants for Armada 375 */
+       soc->controls = mv88f6720_mpp_controls;
+       soc->ncontrols = ARRAY_SIZE(mv88f6720_mpp_controls);
+       soc->modes = mv88f6720_mpp_modes;
+       soc->nmodes = ARRAY_SIZE(mv88f6720_mpp_modes);
+       soc->gpioranges = mv88f6720_mpp_gpio_ranges;
+       soc->ngpioranges = ARRAY_SIZE(mv88f6720_mpp_gpio_ranges);
+
+       pdev->dev.platform_data = soc;
+
+       return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_375_pinctrl_remove(struct platform_device *pdev)
+{
+       return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_375_pinctrl_driver = {
+       .driver = {
+               .name = "armada-375-pinctrl",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
+       },
+       .probe = armada_375_pinctrl_probe,
+       .remove = armada_375_pinctrl_remove,
+};
+
+module_platform_driver(armada_375_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 375 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
new file mode 100644 (file)
index 0000000..1049f82
--- /dev/null
@@ -0,0 +1,462 @@
+/*
+ * Marvell Armada 380/385 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static void __iomem *mpp_base;
+
+static int armada_38x_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+       return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_38x_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+       return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+enum {
+       V_88F6810 = BIT(0),
+       V_88F6820 = BIT(1),
+       V_88F6828 = BIT(2),
+       V_88F6810_PLUS = (V_88F6810 | V_88F6820 | V_88F6828),
+       V_88F6820_PLUS = (V_88F6820 | V_88F6828),
+};
+
+static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+       MPP_MODE(0,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ua0",   "rxd",        V_88F6810_PLUS)),
+       MPP_MODE(1,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ua0",   "txd",        V_88F6810_PLUS)),
+       MPP_MODE(2,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "i2c0",  "sck",        V_88F6810_PLUS)),
+       MPP_MODE(3,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "i2c0",  "sda",        V_88F6810_PLUS)),
+       MPP_MODE(4,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge",    "mdc",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ua1",   "txd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua0",   "rts",        V_88F6810_PLUS)),
+       MPP_MODE(5,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge",    "mdio",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ua1",   "rxd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua0",   "cts",        V_88F6810_PLUS)),
+       MPP_MODE(6,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "txclkout",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge0",   "crs",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "cs3",        V_88F6810_PLUS)),
+       MPP_MODE(7,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "txd0",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad9",        V_88F6810_PLUS)),
+       MPP_MODE(8,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "txd1",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad10",       V_88F6810_PLUS)),
+       MPP_MODE(9,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "txd2",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad11",       V_88F6810_PLUS)),
+       MPP_MODE(10,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "txd3",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad12",       V_88F6810_PLUS)),
+       MPP_MODE(11,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "txctl",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad13",       V_88F6810_PLUS)),
+       MPP_MODE(12,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "rxd0",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "spi0",  "cs1",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad14",       V_88F6810_PLUS)),
+       MPP_MODE(13,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "rxd1",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "pcie0", "clkreq",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie1", "clkreq",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "spi0",  "cs2",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad15",       V_88F6810_PLUS)),
+       MPP_MODE(14,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "rxd2",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi0",  "cs3",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "wen1",       V_88F6810_PLUS)),
+       MPP_MODE(15,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "rxd3",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge",    "mdc slave",  V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi0",  "mosi",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
+       MPP_MODE(16,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "rxctl",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge",    "mdio slave", V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi0",  "miso",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "pcie0", "clkreq",     V_88F6810_PLUS)),
+       MPP_MODE(17,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "rxclk",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua1",   "rxd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi0",  "sck",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sata1", "prsnt",      V_88F6810_PLUS)),
+       MPP_MODE(18,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "rxerr",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ptp",   "trig_gen",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua1",   "txd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi0",  "cs0",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
+       MPP_MODE(19,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "col",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ptp",   "event_req",  V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie0", "clkreq",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sata1", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "ua0",   "cts",        V_88F6810_PLUS)),
+       MPP_MODE(20,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ge0",   "txclk",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "sata0", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "ua0",   "rts",        V_88F6810_PLUS)),
+       MPP_MODE(21,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "spi0",  "cs1",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "rxd0",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "sata0", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "cmd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "bootcs",     V_88F6810_PLUS)),
+       MPP_MODE(22,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "spi0",  "mosi",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad0",        V_88F6810_PLUS)),
+       MPP_MODE(23,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "spi0",  "sck",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad2",        V_88F6810_PLUS)),
+       MPP_MODE(24,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "spi0",  "miso",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ua0",   "cts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua1",   "rxd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d4",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ready",      V_88F6810_PLUS)),
+       MPP_MODE(25,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "spi0",  "cs0",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ua0",   "rts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua1",   "txd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d5",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "cs0",        V_88F6810_PLUS)),
+       MPP_MODE(26,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "spi0",  "cs2",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "i2c1",  "sck",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d6",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "cs1",        V_88F6810_PLUS)),
+       MPP_MODE(27,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "spi0",  "cs3",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "txclkout",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "i2c1",  "sda",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d7",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "cs2",        V_88F6810_PLUS)),
+       MPP_MODE(28,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "txd0",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "clk",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad5",        V_88F6810_PLUS)),
+       MPP_MODE(29,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "txd1",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ale0",       V_88F6810_PLUS)),
+       MPP_MODE(30,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "txd2",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "oen",        V_88F6810_PLUS)),
+       MPP_MODE(31,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "txd3",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ale1",       V_88F6810_PLUS)),
+       MPP_MODE(32,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "txctl",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "wen0",       V_88F6810_PLUS)),
+       MPP_MODE(33,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "m",     "decc_err",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad3",        V_88F6810_PLUS)),
+       MPP_MODE(34,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad1",        V_88F6810_PLUS)),
+       MPP_MODE(35,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ref",   "clk_out1",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "a1",         V_88F6810_PLUS)),
+       MPP_MODE(36,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ptp",   "trig_gen",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "a0",         V_88F6810_PLUS)),
+       MPP_MODE(37,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ptp",   "clk",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "rxclk",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d3",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad8",        V_88F6810_PLUS)),
+       MPP_MODE(38,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ptp",   "event_req",  V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "rxd1",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ref",   "clk_out0",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d0",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad4",        V_88F6810_PLUS)),
+       MPP_MODE(39,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "i2c1",  "sck",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "rxd2",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua0",   "cts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d1",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "a2",         V_88F6810_PLUS)),
+       MPP_MODE(40,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "i2c1",  "sda",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "rxd3",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua0",   "rts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "sd0",   "d2",         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad6",        V_88F6810_PLUS)),
+       MPP_MODE(41,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ua1",   "rxd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge1",   "rxctl",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua0",   "cts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi1",  "cs3",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "burst/last", V_88F6810_PLUS)),
+       MPP_MODE(42,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ua1",   "txd",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "ua0",   "rts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "ad7",        V_88F6810_PLUS)),
+       MPP_MODE(43,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "pcie0", "clkreq",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "dev",   "clkout",     V_88F6810_PLUS)),
+       MPP_MODE(44,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
+                MPP_VAR_FUNCTION(4, "sata3", "prsnt",      V_88F6828),
+                MPP_VAR_FUNCTION(5, "pcie0", "rstout",     V_88F6810_PLUS)),
+       MPP_MODE(45,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ref",   "clk_out0",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
+       MPP_MODE(46,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ref",   "clk_out1",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
+       MPP_MODE(47,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
+                MPP_VAR_FUNCTION(4, "spi1",  "cs2",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sata3", "prsnt",      V_88F6828)),
+       MPP_MODE(48,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "tdm2c", "pclk",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "audio", "mclk",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d4",         V_88F6810_PLUS)),
+       MPP_MODE(49,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "sata2", "prsnt",      V_88F6828),
+                MPP_VAR_FUNCTION(2, "sata3", "prsnt",      V_88F6828),
+                MPP_VAR_FUNCTION(3, "tdm2c", "fsync",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "audio", "lrclk",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d5",         V_88F6810_PLUS)),
+       MPP_MODE(50,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(3, "tdm2c", "drx",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "audio", "extclk",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "cmd",        V_88F6810_PLUS)),
+       MPP_MODE(51,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "tdm2c", "dtx",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "audio", "sdo",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "m",     "decc_err",   V_88F6810_PLUS)),
+       MPP_MODE(52,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(3, "tdm2c", "intn",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "audio", "sdi",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d6",         V_88F6810_PLUS)),
+       MPP_MODE(53,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "sata1", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "sata0", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "tdm2c", "rstn",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "audio", "bclk",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d7",         V_88F6810_PLUS)),
+       MPP_MODE(54,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d3",         V_88F6810_PLUS)),
+       MPP_MODE(55,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ua1",   "cts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge",    "mdio",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie1", "clkreq",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "spi1",  "cs1",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d0",         V_88F6810_PLUS)),
+       MPP_MODE(56,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "ua1",   "rts",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "ge",    "mdc",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi1",  "mosi",       V_88F6810_PLUS)),
+       MPP_MODE(57,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi1",  "sck",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "clk",        V_88F6810_PLUS)),
+       MPP_MODE(58,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "pcie1", "clkreq",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(2, "i2c1",  "sck",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie2", "clkreq",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(4, "spi1",  "miso",       V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d1",         V_88F6810_PLUS)),
+       MPP_MODE(59,
+                MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(2, "i2c1",  "sda",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+                MPP_VAR_FUNCTION(4, "spi1",  "cs0",        V_88F6810_PLUS),
+                MPP_VAR_FUNCTION(5, "sd0",   "d2",         V_88F6810_PLUS)),
+};
+
+static struct mvebu_pinctrl_soc_info armada_38x_pinctrl_info;
+
+static struct of_device_id armada_38x_pinctrl_of_match[] = {
+       {
+               .compatible = "marvell,mv88f6810-pinctrl",
+               .data       = (void *) V_88F6810,
+       },
+       {
+               .compatible = "marvell,mv88f6820-pinctrl",
+               .data       = (void *) V_88F6820,
+       },
+       {
+               .compatible = "marvell,mv88f6828-pinctrl",
+               .data       = (void *) V_88F6828,
+       },
+       { },
+};
+
+static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
+       MPP_FUNC_CTRL(0, 59, NULL, armada_38x_mpp_ctrl),
+};
+
+static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
+       MPP_GPIO_RANGE(0,   0,  0, 32),
+       MPP_GPIO_RANGE(1,  32, 32, 27),
+};
+
+static int armada_38x_pinctrl_probe(struct platform_device *pdev)
+{
+       struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
+       const struct of_device_id *match =
+               of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
+       struct resource *res;
+
+       if (!match)
+               return -ENODEV;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mpp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mpp_base))
+               return PTR_ERR(mpp_base);
+
+       soc->variant = (unsigned) match->data & 0xff;
+       soc->controls = armada_38x_mpp_controls;
+       soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
+       soc->gpioranges = armada_38x_mpp_gpio_ranges;
+       soc->ngpioranges = ARRAY_SIZE(armada_38x_mpp_gpio_ranges);
+       soc->modes = armada_38x_mpp_modes;
+       soc->nmodes = armada_38x_mpp_controls[0].npins;
+
+       pdev->dev.platform_data = soc;
+
+       return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_38x_pinctrl_remove(struct platform_device *pdev)
+{
+       return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_38x_pinctrl_driver = {
+       .driver = {
+               .name = "armada-38x-pinctrl",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(armada_38x_pinctrl_of_match),
+       },
+       .probe = armada_38x_pinctrl_probe,
+       .remove = armada_38x_pinctrl_remove,
+};
+
+module_platform_driver(armada_38x_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 38x pinctrl driver");
+MODULE_LICENSE("GPL v2");
index 843a51f9d129c2d1ac6c95c202a06b07bcd24c06..de311129f7a020473acc3b46ee9ede902df86c9d 100644 (file)
 
 #include "pinctrl-mvebu.h"
 
+static void __iomem *mpp_base;
+
+static int armada_xp_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+       return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int armada_xp_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+       return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
 enum armada_xp_variant {
        V_MV78230       = BIT(0),
        V_MV78260       = BIT(1),
@@ -366,7 +378,7 @@ static struct of_device_id armada_xp_pinctrl_of_match[] = {
 };
 
 static struct mvebu_mpp_ctrl mv78230_mpp_controls[] = {
-       MPP_REG_CTRL(0, 48),
+       MPP_FUNC_CTRL(0, 48, NULL, armada_xp_mpp_ctrl),
 };
 
 static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
@@ -375,7 +387,7 @@ static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = {
 };
 
 static struct mvebu_mpp_ctrl mv78260_mpp_controls[] = {
-       MPP_REG_CTRL(0, 66),
+       MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
 };
 
 static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
@@ -385,7 +397,7 @@ static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = {
 };
 
 static struct mvebu_mpp_ctrl mv78460_mpp_controls[] = {
-       MPP_REG_CTRL(0, 66),
+       MPP_FUNC_CTRL(0, 66, NULL, armada_xp_mpp_ctrl),
 };
 
 static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = {
@@ -399,10 +411,16 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
        struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info;
        const struct of_device_id *match =
                of_match_device(armada_xp_pinctrl_of_match, &pdev->dev);
+       struct resource *res;
 
        if (!match)
                return -ENODEV;
 
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mpp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mpp_base))
+               return PTR_ERR(mpp_base);
+
        soc->variant = (unsigned) match->data & 0xff;
 
        switch (soc->variant) {
index 47268393af34689832fd189bde3c9da27eb6c090..3b022178a566eee5d5568f9b68f3135f405cfdde 100644 (file)
 #include <linux/clk.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
 #include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
 
 #include "pinctrl-mvebu.h"
 
-#define DOVE_SB_REGS_VIRT_BASE         IOMEM(0xfde00000)
-#define DOVE_MPP_VIRT_BASE             (DOVE_SB_REGS_VIRT_BASE + 0xd0200)
-#define DOVE_PMU_MPP_GENERAL_CTRL      (DOVE_MPP_VIRT_BASE + 0x10)
-#define  DOVE_AU0_AC97_SEL             BIT(16)
-#define DOVE_PMU_SIGNAL_SELECT_0       (DOVE_SB_REGS_VIRT_BASE + 0xd802C)
-#define DOVE_PMU_SIGNAL_SELECT_1       (DOVE_SB_REGS_VIRT_BASE + 0xd8030)
-#define DOVE_GLOBAL_CONFIG_1           (DOVE_SB_REGS_VIRT_BASE + 0xe802C)
-#define DOVE_GLOBAL_CONFIG_1           (DOVE_SB_REGS_VIRT_BASE + 0xe802C)
-#define  DOVE_TWSI_ENABLE_OPTION1      BIT(7)
-#define DOVE_GLOBAL_CONFIG_2           (DOVE_SB_REGS_VIRT_BASE + 0xe8030)
-#define  DOVE_TWSI_ENABLE_OPTION2      BIT(20)
-#define  DOVE_TWSI_ENABLE_OPTION3      BIT(21)
-#define  DOVE_TWSI_OPTION3_GPIO                BIT(22)
-#define DOVE_SSP_CTRL_STATUS_1         (DOVE_SB_REGS_VIRT_BASE + 0xe8034)
-#define  DOVE_SSP_ON_AU1               BIT(0)
-#define DOVE_MPP_GENERAL_VIRT_BASE     (DOVE_SB_REGS_VIRT_BASE + 0xe803c)
-#define  DOVE_AU1_SPDIFO_GPIO_EN       BIT(1)
-#define  DOVE_NAND_GPIO_EN             BIT(0)
-#define DOVE_GPIO_LO_VIRT_BASE         (DOVE_SB_REGS_VIRT_BASE + 0xd0400)
-#define DOVE_MPP_CTRL4_VIRT_BASE       (DOVE_GPIO_LO_VIRT_BASE + 0x40)
-#define  DOVE_SPI_GPIO_SEL             BIT(5)
-#define  DOVE_UART1_GPIO_SEL           BIT(4)
-#define  DOVE_AU1_GPIO_SEL             BIT(3)
-#define  DOVE_CAM_GPIO_SEL             BIT(2)
-#define  DOVE_SD1_GPIO_SEL             BIT(1)
-#define  DOVE_SD0_GPIO_SEL             BIT(0)
-
-#define MPPS_PER_REG   8
-#define MPP_BITS       4
-#define MPP_MASK       0xf
+/* Internal registers can be configured at any 1 MiB aligned address */
+#define INT_REGS_MASK          ~(SZ_1M - 1)
+#define MPP4_REGS_OFFS         0xd0440
+#define PMU_REGS_OFFS          0xd802c
+#define GC_REGS_OFFS           0xe802c
+
+/* MPP Base registers */
+#define PMU_MPP_GENERAL_CTRL   0x10
+#define  AU0_AC97_SEL          BIT(16)
+
+/* MPP Control 4 register */
+#define SPI_GPIO_SEL           BIT(5)
+#define UART1_GPIO_SEL         BIT(4)
+#define AU1_GPIO_SEL           BIT(3)
+#define CAM_GPIO_SEL           BIT(2)
+#define SD1_GPIO_SEL           BIT(1)
+#define SD0_GPIO_SEL           BIT(0)
+
+/* PMU Signal Select registers */
+#define PMU_SIGNAL_SELECT_0    0x00
+#define PMU_SIGNAL_SELECT_1    0x04
+
+/* Global Config regmap registers */
+#define GLOBAL_CONFIG_1                0x00
+#define  TWSI_ENABLE_OPTION1   BIT(7)
+#define GLOBAL_CONFIG_2                0x04
+#define  TWSI_ENABLE_OPTION2   BIT(20)
+#define  TWSI_ENABLE_OPTION3   BIT(21)
+#define  TWSI_OPTION3_GPIO     BIT(22)
+#define SSP_CTRL_STATUS_1      0x08
+#define  SSP_ON_AU1            BIT(0)
+#define MPP_GENERAL_CONFIG     0x10
+#define  AU1_SPDIFO_GPIO_EN    BIT(1)
+#define  NAND_GPIO_EN          BIT(0)
 
 #define CONFIG_PMU     BIT(4)
 
-static int dove_pmu_mpp_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
-                                unsigned long *config)
+static void __iomem *mpp_base;
+static void __iomem *mpp4_base;
+static void __iomem *pmu_base;
+static struct regmap *gconfmap;
+
+static int dove_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+       return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int dove_mpp_ctrl_set(unsigned pid, unsigned long config)
 {
-       unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
-       unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
-       unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+       return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
+static int dove_pmu_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+       unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+       unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+       unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
        unsigned long func;
 
-       if (pmu & (1 << ctrl->pid)) {
-               func = readl(DOVE_PMU_SIGNAL_SELECT_0 + off);
-               *config = (func >> shift) & MPP_MASK;
-               *config |= CONFIG_PMU;
-       } else {
-               func = readl(DOVE_MPP_VIRT_BASE + off);
-               *config = (func >> shift) & MPP_MASK;
-       }
+       if ((pmu & BIT(pid)) == 0)
+               return default_mpp_ctrl_get(mpp_base, pid, config);
+
+       func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
+       *config = (func >> shift) & MVEBU_MPP_MASK;
+       *config |= CONFIG_PMU;
+
        return 0;
 }
 
-static int dove_pmu_mpp_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
-                                unsigned long config)
+static int dove_pmu_mpp_ctrl_set(unsigned pid, unsigned long config)
 {
-       unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS;
-       unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS;
-       unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+       unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+       unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+       unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
        unsigned long func;
 
-       if (config & CONFIG_PMU) {
-               writel(pmu | (1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
-               func = readl(DOVE_PMU_SIGNAL_SELECT_0 + off);
-               func &= ~(MPP_MASK << shift);
-               func |= (config & MPP_MASK) << shift;
-               writel(func, DOVE_PMU_SIGNAL_SELECT_0 + off);
-       } else {
-               writel(pmu & ~(1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL);
-               func = readl(DOVE_MPP_VIRT_BASE + off);
-               func &= ~(MPP_MASK << shift);
-               func |= (config & MPP_MASK) << shift;
-               writel(func, DOVE_MPP_VIRT_BASE + off);
+       if ((config & CONFIG_PMU) == 0) {
+               writel(pmu & ~BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+               return default_mpp_ctrl_set(mpp_base, pid, config);
        }
+
+       writel(pmu | BIT(pid), mpp_base + PMU_MPP_GENERAL_CTRL);
+       func = readl(pmu_base + PMU_SIGNAL_SELECT_0 + off);
+       func &= ~(MVEBU_MPP_MASK << shift);
+       func |= (config & MVEBU_MPP_MASK) << shift;
+       writel(func, pmu_base + PMU_SIGNAL_SELECT_0 + off);
+
        return 0;
 }
 
-static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
-                             unsigned long *config)
+static int dove_mpp4_ctrl_get(unsigned pid, unsigned long *config)
 {
-       unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+       unsigned long mpp4 = readl(mpp4_base);
        unsigned long mask;
 
-       switch (ctrl->pid) {
+       switch (pid) {
        case 24: /* mpp_camera */
-               mask = DOVE_CAM_GPIO_SEL;
+               mask = CAM_GPIO_SEL;
                break;
        case 40: /* mpp_sdio0 */
-               mask = DOVE_SD0_GPIO_SEL;
+               mask = SD0_GPIO_SEL;
                break;
        case 46: /* mpp_sdio1 */
-               mask = DOVE_SD1_GPIO_SEL;
+               mask = SD1_GPIO_SEL;
                break;
        case 58: /* mpp_spi0 */
-               mask = DOVE_SPI_GPIO_SEL;
+               mask = SPI_GPIO_SEL;
                break;
        case 62: /* mpp_uart1 */
-               mask = DOVE_UART1_GPIO_SEL;
+               mask = UART1_GPIO_SEL;
                break;
        default:
                return -EINVAL;
@@ -129,27 +144,26 @@ static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
        return 0;
 }
 
-static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
-                             unsigned long config)
+static int dove_mpp4_ctrl_set(unsigned pid, unsigned long config)
 {
-       unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
+       unsigned long mpp4 = readl(mpp4_base);
        unsigned long mask;
 
-       switch (ctrl->pid) {
+       switch (pid) {
        case 24: /* mpp_camera */
-               mask = DOVE_CAM_GPIO_SEL;
+               mask = CAM_GPIO_SEL;
                break;
        case 40: /* mpp_sdio0 */
-               mask = DOVE_SD0_GPIO_SEL;
+               mask = SD0_GPIO_SEL;
                break;
        case 46: /* mpp_sdio1 */
-               mask = DOVE_SD1_GPIO_SEL;
+               mask = SD1_GPIO_SEL;
                break;
        case 58: /* mpp_spi0 */
-               mask = DOVE_SPI_GPIO_SEL;
+               mask = SPI_GPIO_SEL;
                break;
        case 62: /* mpp_uart1 */
-               mask = DOVE_UART1_GPIO_SEL;
+               mask = UART1_GPIO_SEL;
                break;
        default:
                return -EINVAL;
@@ -159,74 +173,69 @@ static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
        if (config)
                mpp4 |= mask;
 
-       writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
+       writel(mpp4, mpp4_base);
 
        return 0;
 }
 
-static int dove_nand_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
-                             unsigned long *config)
+static int dove_nand_ctrl_get(unsigned pid, unsigned long *config)
 {
-       unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
+       unsigned int gmpp;
 
-       *config = ((gmpp & DOVE_NAND_GPIO_EN) != 0);
+       regmap_read(gconfmap, MPP_GENERAL_CONFIG, &gmpp);
+       *config = ((gmpp & NAND_GPIO_EN) != 0);
 
        return 0;
 }
 
-static int dove_nand_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
-                             unsigned long config)
+static int dove_nand_ctrl_set(unsigned pid, unsigned long config)
 {
-       unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
-
-       gmpp &= ~DOVE_NAND_GPIO_EN;
-       if (config)
-               gmpp |= DOVE_NAND_GPIO_EN;
-
-       writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
-
+       regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
+                          NAND_GPIO_EN,
+                          (config) ? NAND_GPIO_EN : 0);
        return 0;
 }
 
-static int dove_audio0_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
-                               unsigned long *config)
+static int dove_audio0_ctrl_get(unsigned pid, unsigned long *config)
 {
-       unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+       unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
 
-       *config = ((pmu & DOVE_AU0_AC97_SEL) != 0);
+       *config = ((pmu & AU0_AC97_SEL) != 0);
 
        return 0;
 }
 
-static int dove_audio0_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
-                               unsigned long config)
+static int dove_audio0_ctrl_set(unsigned pid, unsigned long config)
 {
-       unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL);
+       unsigned long pmu = readl(mpp_base + PMU_MPP_GENERAL_CTRL);
 
-       pmu &= ~DOVE_AU0_AC97_SEL;
+       pmu &= ~AU0_AC97_SEL;
        if (config)
-               pmu |= DOVE_AU0_AC97_SEL;
-       writel(pmu, DOVE_PMU_MPP_GENERAL_CTRL);
+               pmu |= AU0_AC97_SEL;
+       writel(pmu, mpp_base + PMU_MPP_GENERAL_CTRL);
 
        return 0;
 }
 
-static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
-                               unsigned long *config)
+static int dove_audio1_ctrl_get(unsigned pid, unsigned long *config)
 {
-       unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
-       unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
-       unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
-       unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+       unsigned int mpp4 = readl(mpp4_base);
+       unsigned int sspc1;
+       unsigned int gmpp;
+       unsigned int gcfg2;
+
+       regmap_read(gconfmap, SSP_CTRL_STATUS_1, &sspc1);
+       regmap_read(gconfmap, MPP_GENERAL_CONFIG, &gmpp);
+       regmap_read(gconfmap, GLOBAL_CONFIG_2, &gcfg2);
 
        *config = 0;
-       if (mpp4 & DOVE_AU1_GPIO_SEL)
+       if (mpp4 & AU1_GPIO_SEL)
                *config |= BIT(3);
-       if (sspc1 & DOVE_SSP_ON_AU1)
+       if (sspc1 & SSP_ON_AU1)
                *config |= BIT(2);
-       if (gmpp & DOVE_AU1_SPDIFO_GPIO_EN)
+       if (gmpp & AU1_SPDIFO_GPIO_EN)
                *config |= BIT(1);
-       if (gcfg2 & DOVE_TWSI_OPTION3_GPIO)
+       if (gcfg2 & TWSI_OPTION3_GPIO)
                *config |= BIT(0);
 
        /* SSP/TWSI only if I2S1 not set*/
@@ -238,35 +247,24 @@ static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
        return 0;
 }
 
-static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
-                               unsigned long config)
+static int dove_audio1_ctrl_set(unsigned pid, unsigned long config)
 {
-       unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE);
-       unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1);
-       unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE);
-       unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+       unsigned int mpp4 = readl(mpp4_base);
 
-       /*
-        * clear all audio1 related bits before configure
-        */
-       gcfg2 &= ~DOVE_TWSI_OPTION3_GPIO;
-       gmpp &= ~DOVE_AU1_SPDIFO_GPIO_EN;
-       sspc1 &= ~DOVE_SSP_ON_AU1;
-       mpp4 &= ~DOVE_AU1_GPIO_SEL;
-
-       if (config & BIT(0))
-               gcfg2 |= DOVE_TWSI_OPTION3_GPIO;
-       if (config & BIT(1))
-               gmpp |= DOVE_AU1_SPDIFO_GPIO_EN;
-       if (config & BIT(2))
-               sspc1 |= DOVE_SSP_ON_AU1;
+       mpp4 &= ~AU1_GPIO_SEL;
        if (config & BIT(3))
-               mpp4 |= DOVE_AU1_GPIO_SEL;
-
-       writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE);
-       writel(sspc1, DOVE_SSP_CTRL_STATUS_1);
-       writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE);
-       writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+               mpp4 |= AU1_GPIO_SEL;
+       writel(mpp4, mpp4_base);
+
+       regmap_update_bits(gconfmap, SSP_CTRL_STATUS_1,
+                          SSP_ON_AU1,
+                          (config & BIT(2)) ? SSP_ON_AU1 : 0);
+       regmap_update_bits(gconfmap, MPP_GENERAL_CONFIG,
+                          AU1_SPDIFO_GPIO_EN,
+                          (config & BIT(1)) ? AU1_SPDIFO_GPIO_EN : 0);
+       regmap_update_bits(gconfmap, GLOBAL_CONFIG_2,
+                          TWSI_OPTION3_GPIO,
+                          (config & BIT(0)) ? TWSI_OPTION3_GPIO : 0);
 
        return 0;
 }
@@ -276,11 +274,11 @@ static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
  * break other functions. If you require all mpps as gpio
  * enforce gpio setting by pinctrl mapping.
  */
-static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid)
+static int dove_audio1_ctrl_gpio_req(unsigned pid)
 {
        unsigned long config;
 
-       dove_audio1_ctrl_get(ctrl, &config);
+       dove_audio1_ctrl_get(pid, &config);
 
        switch (config) {
        case 0x02: /* i2s1 : gpio[56:57] */
@@ -303,76 +301,62 @@ static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid)
 }
 
 /* mpp[52:57] has gpio pins capable of in and out */
-static int dove_audio1_ctrl_gpio_dir(struct mvebu_mpp_ctrl *ctrl, u8 pid,
-                               bool input)
+static int dove_audio1_ctrl_gpio_dir(unsigned pid, bool input)
 {
        if (pid < 52 || pid > 57)
                return -ENOTSUPP;
        return 0;
 }
 
-static int dove_twsi_ctrl_get(struct mvebu_mpp_ctrl *ctrl,
-                             unsigned long *config)
+static int dove_twsi_ctrl_get(unsigned pid, unsigned long *config)
 {
-       unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
-       unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
+       unsigned int gcfg1;
+       unsigned int gcfg2;
+
+       regmap_read(gconfmap, GLOBAL_CONFIG_1, &gcfg1);
+       regmap_read(gconfmap, GLOBAL_CONFIG_2, &gcfg2);
 
        *config = 0;
-       if (gcfg1 & DOVE_TWSI_ENABLE_OPTION1)
+       if (gcfg1 & TWSI_ENABLE_OPTION1)
                *config = 1;
-       else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION2)
+       else if (gcfg2 & TWSI_ENABLE_OPTION2)
                *config = 2;
-       else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION3)
+       else if (gcfg2 & TWSI_ENABLE_OPTION3)
                *config = 3;
 
        return 0;
 }
 
-static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl *ctrl,
-                               unsigned long config)
+static int dove_twsi_ctrl_set(unsigned pid, unsigned long config)
 {
-       unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1);
-       unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2);
-
-       gcfg1 &= ~DOVE_TWSI_ENABLE_OPTION1;
-       gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION3);
+       unsigned int gcfg1 = 0;
+       unsigned int gcfg2 = 0;
 
        switch (config) {
        case 1:
-               gcfg1 |= DOVE_TWSI_ENABLE_OPTION1;
+               gcfg1 TWSI_ENABLE_OPTION1;
                break;
        case 2:
-               gcfg2 |= DOVE_TWSI_ENABLE_OPTION2;
+               gcfg2 TWSI_ENABLE_OPTION2;
                break;
        case 3:
-               gcfg2 |= DOVE_TWSI_ENABLE_OPTION3;
+               gcfg2 TWSI_ENABLE_OPTION3;
                break;
        }
 
-       writel(gcfg1, DOVE_GLOBAL_CONFIG_1);
-       writel(gcfg2, DOVE_GLOBAL_CONFIG_2);
+       regmap_update_bits(gconfmap, GLOBAL_CONFIG_1,
+                          TWSI_ENABLE_OPTION1,
+                          gcfg1);
+       regmap_update_bits(gconfmap, GLOBAL_CONFIG_2,
+                          TWSI_ENABLE_OPTION2 | TWSI_ENABLE_OPTION3,
+                          gcfg2);
 
        return 0;
 }
 
 static struct mvebu_mpp_ctrl dove_mpp_controls[] = {
-       MPP_FUNC_CTRL(0, 0, "mpp0", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(1, 1, "mpp1", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(2, 2, "mpp2", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(3, 3, "mpp3", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(4, 4, "mpp4", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(5, 5, "mpp5", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(6, 6, "mpp6", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(7, 7, "mpp7", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(8, 8, "mpp8", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(9, 9, "mpp9", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(10, 10, "mpp10", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(11, 11, "mpp11", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(12, 12, "mpp12", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(13, 13, "mpp13", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(14, 14, "mpp14", dove_pmu_mpp_ctrl),
-       MPP_FUNC_CTRL(15, 15, "mpp15", dove_pmu_mpp_ctrl),
-       MPP_REG_CTRL(16, 23),
+       MPP_FUNC_CTRL(0, 15, NULL, dove_pmu_mpp_ctrl),
+       MPP_FUNC_CTRL(16, 23, NULL, dove_mpp_ctrl),
        MPP_FUNC_CTRL(24, 39, "mpp_camera", dove_mpp4_ctrl),
        MPP_FUNC_CTRL(40, 45, "mpp_sdio0", dove_mpp4_ctrl),
        MPP_FUNC_CTRL(46, 51, "mpp_sdio1", dove_mpp4_ctrl),
@@ -772,8 +756,17 @@ static struct of_device_id dove_pinctrl_of_match[] = {
        { }
 };
 
+static struct regmap_config gc_regmap_config = {
+       .reg_bits = 32,
+       .val_bits = 32,
+       .reg_stride = 4,
+       .max_register = 5,
+};
+
 static int dove_pinctrl_probe(struct platform_device *pdev)
 {
+       struct resource *res, *mpp_res;
+       struct resource fb_res;
        const struct of_device_id *match =
                of_match_device(dove_pinctrl_of_match, &pdev->dev);
        pdev->dev.platform_data = (void *)match->data;
@@ -789,6 +782,59 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
        }
        clk_prepare_enable(clk);
 
+       mpp_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mpp_base = devm_ioremap_resource(&pdev->dev, mpp_res);
+       if (IS_ERR(mpp_base))
+               return PTR_ERR(mpp_base);
+
+       /* prepare fallback resource */
+       memcpy(&fb_res, mpp_res, sizeof(struct resource));
+       fb_res.start = 0;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res) {
+               dev_warn(&pdev->dev, "falling back to hardcoded MPP4 resource\n");
+               adjust_resource(&fb_res,
+                       (mpp_res->start & INT_REGS_MASK) + MPP4_REGS_OFFS, 0x4);
+               res = &fb_res;
+       }
+
+       mpp4_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mpp4_base))
+               return PTR_ERR(mpp4_base);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+       if (!res) {
+               dev_warn(&pdev->dev, "falling back to hardcoded PMU resource\n");
+               adjust_resource(&fb_res,
+                       (mpp_res->start & INT_REGS_MASK) + PMU_REGS_OFFS, 0x8);
+               res = &fb_res;
+       }
+
+       pmu_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pmu_base))
+               return PTR_ERR(pmu_base);
+
+       gconfmap = syscon_regmap_lookup_by_compatible("marvell,dove-global-config");
+       if (IS_ERR(gconfmap)) {
+               void __iomem *gc_base;
+
+               dev_warn(&pdev->dev, "falling back to hardcoded global registers\n");
+               adjust_resource(&fb_res,
+                       (mpp_res->start & INT_REGS_MASK) + GC_REGS_OFFS, 0x14);
+               gc_base = devm_ioremap_resource(&pdev->dev, &fb_res);
+               if (IS_ERR(gc_base))
+                       return PTR_ERR(gc_base);
+               gconfmap = devm_regmap_init_mmio(&pdev->dev,
+                                                gc_base, &gc_regmap_config);
+               if (IS_ERR(gconfmap))
+                       return PTR_ERR(gconfmap);
+       }
+
+       /* Warn on any missing DT resource */
+       if (fb_res.start)
+               dev_warn(&pdev->dev, FW_BUG "Missing pinctrl regs in DTB. Please update your firmware.\n");
+
        return mvebu_pinctrl_probe(pdev);
 }
 
index 6b504b5935a5ec75ecff507a0717e82af4b72982..0d0211a1a0b005659a4ddbe557c46fd8dbe1d521 100644 (file)
 
 #include "pinctrl-mvebu.h"
 
+static void __iomem *mpp_base;
+
+static int kirkwood_mpp_ctrl_get(unsigned pid, unsigned long *config)
+{
+       return default_mpp_ctrl_get(mpp_base, pid, config);
+}
+
+static int kirkwood_mpp_ctrl_set(unsigned pid, unsigned long config)
+{
+       return default_mpp_ctrl_set(mpp_base, pid, config);
+}
+
 #define V(f6180, f6190, f6192, f6281, f6282, dx4122)   \
        ((f6180 << 0) | (f6190 << 1) | (f6192 << 2) |   \
         (f6281 << 3) | (f6282 << 4) | (dx4122 << 5))
@@ -359,7 +371,7 @@ static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = {
 };
 
 static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = {
-       MPP_REG_CTRL(0, 29),
+       MPP_FUNC_CTRL(0, 29, NULL, kirkwood_mpp_ctrl),
 };
 
 static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
@@ -367,7 +379,7 @@ static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = {
 };
 
 static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = {
-       MPP_REG_CTRL(0, 35),
+       MPP_FUNC_CTRL(0, 35, NULL, kirkwood_mpp_ctrl),
 };
 
 static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
@@ -376,7 +388,7 @@ static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = {
 };
 
 static struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = {
-       MPP_REG_CTRL(0, 49),
+       MPP_FUNC_CTRL(0, 49, NULL, kirkwood_mpp_ctrl),
 };
 
 static struct pinctrl_gpio_range mv88f628x_gpio_ranges[] = {
@@ -456,9 +468,16 @@ static struct of_device_id kirkwood_pinctrl_of_match[] = {
 
 static int kirkwood_pinctrl_probe(struct platform_device *pdev)
 {
+       struct resource *res;
        const struct of_device_id *match =
                of_match_device(kirkwood_pinctrl_of_match, &pdev->dev);
        pdev->dev.platform_data = (void *)match->data;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mpp_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mpp_base))
+               return PTR_ERR(mpp_base);
+
        return mvebu_pinctrl_probe(pdev);
 }
 
index 0fd1ad31fbf9aa719d19dffb7f18cc61194d2984..9908374f8f9207d0242b5fe1e081d2c066be0de3 100644 (file)
@@ -50,7 +50,6 @@ struct mvebu_pinctrl {
        struct device *dev;
        struct pinctrl_dev *pctldev;
        struct pinctrl_desc desc;
-       void __iomem *base;
        struct mvebu_pinctrl_group *groups;
        unsigned num_groups;
        struct mvebu_pinctrl_function *functions;
@@ -138,43 +137,6 @@ static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name(
        return NULL;
 }
 
-/*
- * Common mpp pin configuration registers on MVEBU are
- * registers of eight 4-bit values for each mpp setting.
- * Register offset and bit mask are calculated accordingly below.
- */
-static int mvebu_common_mpp_get(struct mvebu_pinctrl *pctl,
-                               struct mvebu_pinctrl_group *grp,
-                               unsigned long *config)
-{
-       unsigned pin = grp->gid;
-       unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
-       unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
-
-       *config = readl(pctl->base + off);
-       *config >>= shift;
-       *config &= MPP_MASK;
-
-       return 0;
-}
-
-static int mvebu_common_mpp_set(struct mvebu_pinctrl *pctl,
-                               struct mvebu_pinctrl_group *grp,
-                               unsigned long config)
-{
-       unsigned pin = grp->gid;
-       unsigned off = (pin / MPPS_PER_REG) * MPP_BITS;
-       unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS;
-       unsigned long reg;
-
-       reg = readl(pctl->base + off);
-       reg &= ~(MPP_MASK << shift);
-       reg |= (config << shift);
-       writel(reg, pctl->base + off);
-
-       return 0;
-}
-
 static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
                                unsigned gid, unsigned long *config)
 {
@@ -184,10 +146,7 @@ static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
        if (!grp->ctrl)
                return -EINVAL;
 
-       if (grp->ctrl->mpp_get)
-               return grp->ctrl->mpp_get(grp->ctrl, config);
-
-       return mvebu_common_mpp_get(pctl, grp, config);
+       return grp->ctrl->mpp_get(grp->pins[0], config);
 }
 
 static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
@@ -202,11 +161,7 @@ static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        for (i = 0; i < num_configs; i++) {
-               if (grp->ctrl->mpp_set)
-                       ret = grp->ctrl->mpp_set(grp->ctrl, configs[i]);
-               else
-                       ret = mvebu_common_mpp_set(pctl, grp, configs[i]);
-
+               ret = grp->ctrl->mpp_set(grp->pins[0], configs[i]);
                if (ret)
                        return ret;
        } /* for each config */
@@ -347,7 +302,7 @@ static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        if (grp->ctrl->mpp_gpio_req)
-               return grp->ctrl->mpp_gpio_req(grp->ctrl, offset);
+               return grp->ctrl->mpp_gpio_req(offset);
 
        setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
        if (!setting)
@@ -370,7 +325,7 @@ static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
                return -EINVAL;
 
        if (grp->ctrl->mpp_gpio_dir)
-               return grp->ctrl->mpp_gpio_dir(grp->ctrl, offset, input);
+               return grp->ctrl->mpp_gpio_dir(offset, input);
 
        setting = mvebu_pinctrl_find_gpio_setting(pctl, grp);
        if (!setting)
@@ -593,11 +548,12 @@ static int mvebu_pinctrl_build_functions(struct platform_device *pdev,
 int mvebu_pinctrl_probe(struct platform_device *pdev)
 {
        struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
-       struct resource *res;
        struct mvebu_pinctrl *pctl;
-       void __iomem *base;
        struct pinctrl_pin_desc *pdesc;
        unsigned gid, n, k;
+       unsigned size, noname = 0;
+       char *noname_buf;
+       void *p;
        int ret;
 
        if (!soc || !soc->controls || !soc->modes) {
@@ -605,11 +561,6 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
-
        pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl),
                        GFP_KERNEL);
        if (!pctl) {
@@ -623,7 +574,6 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
        pctl->desc.pmxops = &mvebu_pinmux_ops;
        pctl->desc.confops = &mvebu_pinconf_ops;
        pctl->variant = soc->variant;
-       pctl->base = base;
        pctl->dev = &pdev->dev;
        platform_set_drvdata(pdev, pctl);
 
@@ -633,33 +583,23 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
        pctl->desc.npins = 0;
        for (n = 0; n < soc->ncontrols; n++) {
                struct mvebu_mpp_ctrl *ctrl = &soc->controls[n];
-               char *names;
 
                pctl->desc.npins += ctrl->npins;
-               /* initial control pins */
+               /* initialize control's pins[] array */
                for (k = 0; k < ctrl->npins; k++)
                        ctrl->pins[k] = ctrl->pid + k;
 
-               /* special soc specific control */
-               if (ctrl->mpp_get || ctrl->mpp_set) {
-                       if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
-                               dev_err(&pdev->dev, "wrong soc control info\n");
-                               return -EINVAL;
-                       }
+               /*
+                * We allow to pass controls with NULL name that we treat
+                * as a range of one-pin groups with generic mvebu register
+                * controls.
+                */
+               if (!ctrl->name) {
+                       pctl->num_groups += ctrl->npins;
+                       noname += ctrl->npins;
+               } else {
                        pctl->num_groups += 1;
-                       continue;
                }
-
-               /* generic mvebu register control */
-               names = devm_kzalloc(&pdev->dev, ctrl->npins * 8, GFP_KERNEL);
-               if (!names) {
-                       dev_err(&pdev->dev, "failed to alloc mpp names\n");
-                       return -ENOMEM;
-               }
-               for (k = 0; k < ctrl->npins; k++)
-                       sprintf(names + 8*k, "mpp%d", ctrl->pid+k);
-               ctrl->name = names;
-               pctl->num_groups += ctrl->npins;
        }
 
        pdesc = devm_kzalloc(&pdev->dev, pctl->desc.npins *
@@ -673,12 +613,17 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
                pdesc[n].number = n;
        pctl->desc.pins = pdesc;
 
-       pctl->groups = devm_kzalloc(&pdev->dev, pctl->num_groups *
-                            sizeof(struct mvebu_pinctrl_group), GFP_KERNEL);
-       if (!pctl->groups) {
-               dev_err(&pdev->dev, "failed to alloc pinctrl groups\n");
+       /*
+        * allocate groups and name buffers for unnamed groups.
+        */
+       size = pctl->num_groups * sizeof(*pctl->groups) + noname * 8;
+       p = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+       if (!p) {
+               dev_err(&pdev->dev, "failed to alloc group data\n");
                return -ENOMEM;
        }
+       pctl->groups = p;
+       noname_buf = p + pctl->num_groups * sizeof(*pctl->groups);
 
        /* assign mpp controls to groups */
        gid = 0;
@@ -690,17 +635,26 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
                pctl->groups[gid].pins = ctrl->pins;
                pctl->groups[gid].npins = ctrl->npins;
 
-               /* generic mvebu register control maps to a number of groups */
-               if (!ctrl->mpp_get && !ctrl->mpp_set) {
+               /*
+                * We treat unnamed controls as a range of one-pin groups
+                * with generic mvebu register controls. Use one group for
+                * each in this range and assign a default group name.
+                */
+               if (!ctrl->name) {
+                       pctl->groups[gid].name = noname_buf;
                        pctl->groups[gid].npins = 1;
+                       sprintf(noname_buf, "mpp%d", ctrl->pid+0);
+                       noname_buf += 8;
 
                        for (k = 1; k < ctrl->npins; k++) {
                                gid++;
                                pctl->groups[gid].gid = gid;
                                pctl->groups[gid].ctrl = ctrl;
-                               pctl->groups[gid].name = &ctrl->name[8*k];
+                               pctl->groups[gid].name = noname_buf;
                                pctl->groups[gid].pins = &ctrl->pins[k];
                                pctl->groups[gid].npins = 1;
+                               sprintf(noname_buf, "mpp%d", ctrl->pid+k);
+                               noname_buf += 8;
                        }
                }
                gid++;
index 90bd3beee860a10dfa0b085d5d9bbb7261cae1ab..65a98e6f72657a3277154bcbe8e8adee82cde20f 100644 (file)
  * between two or more different settings, e.g. assign mpp pin 13 to
  * uart1 or sata.
  *
- * If optional mpp_get/_set functions are set these are used to get/set
- * a specific mode. Otherwise it is assumed that the mpp control is based
- * on 4-bit groups in subsequent registers. The optional mpp_gpio_req/_dir
- * functions can be used to allow pin settings with varying gpio pins.
+ * The mpp_get/_set functions are mandatory and are used to get/set a
+ * specific mode. The optional mpp_gpio_req/_dir functions can be used
+ * to allow pin settings with varying gpio pins.
  */
 struct mvebu_mpp_ctrl {
        const char *name;
        u8 pid;
        u8 npins;
        unsigned *pins;
-       int (*mpp_get)(struct mvebu_mpp_ctrl *ctrl, unsigned long *config);
-       int (*mpp_set)(struct mvebu_mpp_ctrl *ctrl, unsigned long config);
-       int (*mpp_gpio_req)(struct mvebu_mpp_ctrl *ctrl, u8 pid);
-       int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl *ctrl, u8 pid, bool input);
+       int (*mpp_get)(unsigned pid, unsigned long *config);
+       int (*mpp_set)(unsigned pid, unsigned long config);
+       int (*mpp_gpio_req)(unsigned pid);
+       int (*mpp_gpio_dir)(unsigned pid, bool input);
 };
 
 /**
@@ -114,18 +113,6 @@ struct mvebu_pinctrl_soc_info {
        int ngpioranges;
 };
 
-#define MPP_REG_CTRL(_idl, _idh)                               \
-       {                                                       \
-               .name = NULL,                                   \
-               .pid = _idl,                                    \
-               .npins = _idh - _idl + 1,                       \
-               .pins = (unsigned[_idh - _idl + 1]) { },        \
-               .mpp_get = NULL,                                \
-               .mpp_set = NULL,                                \
-               .mpp_gpio_req = NULL,                           \
-               .mpp_gpio_dir = NULL,                           \
-       }
-
 #define MPP_FUNC_CTRL(_idl, _idh, _name, _func)                        \
        {                                                       \
                .name = _name,                                  \
@@ -186,6 +173,34 @@ struct mvebu_pinctrl_soc_info {
                .npins = _npins,                                \
        }
 
+#define MVEBU_MPPS_PER_REG     8
+#define MVEBU_MPP_BITS         4
+#define MVEBU_MPP_MASK         0xf
+
+static inline int default_mpp_ctrl_get(void __iomem *base, unsigned int pid,
+                                      unsigned long *config)
+{
+       unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+       unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+
+       *config = (readl(base + off) >> shift) & MVEBU_MPP_MASK;
+
+       return 0;
+}
+
+static inline int default_mpp_ctrl_set(void __iomem *base, unsigned int pid,
+                                      unsigned long config)
+{
+       unsigned off = (pid / MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+       unsigned shift = (pid % MVEBU_MPPS_PER_REG) * MVEBU_MPP_BITS;
+       unsigned long reg;
+
+       reg = readl(base + off) & ~(MVEBU_MPP_MASK << shift);
+       writel(reg | (config << shift), base + off);
+
+       return 0;
+}
+
 int mvebu_pinctrl_probe(struct platform_device *pdev);
 int mvebu_pinctrl_remove(struct platform_device *pdev);
 
index ea9d9ab9cda1dc3c1342f83a00e1e8f40a0dc2df..008a29e92e56dffe8f8151b81aa364267a69ec56 100644 (file)
@@ -309,39 +309,6 @@ static const unsigned keys_8x8_pins[] = {
        GPIO_PE4, GPIO_PE5, GPIO_PE6, GPIO_PE7,
 };
 
-static const struct adi_pin_group adi_pin_groups[] = {
-       ADI_PIN_GROUP("uart0grp", uart0_pins),
-       ADI_PIN_GROUP("uart1grp", uart1_pins),
-       ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
-       ADI_PIN_GROUP("uart2grp", uart2_pins),
-       ADI_PIN_GROUP("uart3grp", uart3_pins),
-       ADI_PIN_GROUP("uart3ctsrtsgrp", uart3_ctsrts_pins),
-       ADI_PIN_GROUP("rsi0grp", rsi0_pins),
-       ADI_PIN_GROUP("spi0grp", spi0_pins),
-       ADI_PIN_GROUP("spi1grp", spi1_pins),
-       ADI_PIN_GROUP("twi0grp", twi0_pins),
-       ADI_PIN_GROUP("twi1grp", twi1_pins),
-       ADI_PIN_GROUP("rotarygrp", rotary_pins),
-       ADI_PIN_GROUP("can0grp", can0_pins),
-       ADI_PIN_GROUP("can1grp", can1_pins),
-       ADI_PIN_GROUP("smc0grp", smc0_pins),
-       ADI_PIN_GROUP("sport0grp", sport0_pins),
-       ADI_PIN_GROUP("sport1grp", sport1_pins),
-       ADI_PIN_GROUP("sport2grp", sport2_pins),
-       ADI_PIN_GROUP("sport3grp", sport3_pins),
-       ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
-       ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
-       ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
-       ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
-       ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
-       ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
-       ADI_PIN_GROUP("atapigrp", atapi_pins),
-       ADI_PIN_GROUP("atapialtergrp", atapi_alter_pins),
-       ADI_PIN_GROUP("nfc0grp", nfc0_pins),
-       ADI_PIN_GROUP("keys_4x4grp", keys_4x4_pins),
-       ADI_PIN_GROUP("keys_8x8grp", keys_8x8_pins),
-};
-
 static const unsigned short uart0_mux[] = {
        P_UART0_TX, P_UART0_RX,
        0
@@ -513,6 +480,39 @@ static const unsigned short keys_8x8_mux[] = {
        0
 };
 
+static const struct adi_pin_group adi_pin_groups[] = {
+       ADI_PIN_GROUP("uart0grp", uart0_pins, uart0_mux),
+       ADI_PIN_GROUP("uart1grp", uart1_pins, uart1_mux),
+       ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins, uart1_ctsrts_mux),
+       ADI_PIN_GROUP("uart2grp", uart2_pins, uart2_mux),
+       ADI_PIN_GROUP("uart3grp", uart3_pins, uart3_mux),
+       ADI_PIN_GROUP("uart3ctsrtsgrp", uart3_ctsrts_pins, uart3_ctsrts_mux),
+       ADI_PIN_GROUP("rsi0grp", rsi0_pins, rsi0_mux),
+       ADI_PIN_GROUP("spi0grp", spi0_pins, spi0_mux),
+       ADI_PIN_GROUP("spi1grp", spi1_pins, spi1_mux),
+       ADI_PIN_GROUP("twi0grp", twi0_pins, twi0_mux),
+       ADI_PIN_GROUP("twi1grp", twi1_pins, twi1_mux),
+       ADI_PIN_GROUP("rotarygrp", rotary_pins, rotary_mux),
+       ADI_PIN_GROUP("can0grp", can0_pins, can0_mux),
+       ADI_PIN_GROUP("can1grp", can1_pins, can1_mux),
+       ADI_PIN_GROUP("smc0grp", smc0_pins, smc0_mux),
+       ADI_PIN_GROUP("sport0grp", sport0_pins, sport0_mux),
+       ADI_PIN_GROUP("sport1grp", sport1_pins, sport1_mux),
+       ADI_PIN_GROUP("sport2grp", sport2_pins, sport2_mux),
+       ADI_PIN_GROUP("sport3grp", sport3_pins, sport3_mux),
+       ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins, ppi0_8b_mux),
+       ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins, ppi0_16b_mux),
+       ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins, ppi0_24b_mux),
+       ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins, ppi1_8b_mux),
+       ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins, ppi1_16b_mux),
+       ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins, ppi2_8b_mux),
+       ADI_PIN_GROUP("atapigrp", atapi_pins, atapi_mux),
+       ADI_PIN_GROUP("atapialtergrp", atapi_alter_pins, atapi_alter_mux),
+       ADI_PIN_GROUP("nfc0grp", nfc0_pins, nfc0_mux),
+       ADI_PIN_GROUP("keys_4x4grp", keys_4x4_pins, keys_4x4_mux),
+       ADI_PIN_GROUP("keys_8x8grp", keys_8x8_pins, keys_8x8_mux),
+};
+
 static const char * const uart0grp[] = { "uart0grp" };
 static const char * const uart1grp[] = { "uart1grp" };
 static const char * const uart1ctsrtsgrp[] = { "uart1ctsrtsgrp" };
@@ -532,49 +532,45 @@ static const char * const sport0grp[] = { "sport0grp" };
 static const char * const sport1grp[] = { "sport1grp" };
 static const char * const sport2grp[] = { "sport2grp" };
 static const char * const sport3grp[] = { "sport3grp" };
-static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
-static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
-static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
-static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
-static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
-static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
+static const char * const ppi0grp[] = { "ppi0_8bgrp",
+                                       "ppi0_16bgrp",
+                                       "ppi0_24bgrp" };
+static const char * const ppi1grp[] = { "ppi1_8bgrp",
+                                       "ppi1_16bgrp" };
+static const char * const ppi2grp[] = { "ppi2_8bgrp" };
 static const char * const atapigrp[] = { "atapigrp" };
 static const char * const atapialtergrp[] = { "atapialtergrp" };
 static const char * const nfc0grp[] = { "nfc0grp" };
-static const char * const keys_4x4grp[] = { "keys_4x4grp" };
-static const char * const keys_8x8grp[] = { "keys_8x8grp" };
+static const char * const keysgrp[] = { "keys_4x4grp",
+                                       "keys_8x8grp" };
 
 static const struct adi_pmx_func adi_pmx_functions[] = {
-       ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
-       ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
-       ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
-       ADI_PMX_FUNCTION("uart2", uart2grp, uart2_mux),
-       ADI_PMX_FUNCTION("uart3", uart3grp, uart3_mux),
-       ADI_PMX_FUNCTION("uart3_ctsrts", uart3ctsrtsgrp, uart3_ctsrts_mux),
-       ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
-       ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
-       ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
-       ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
-       ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
-       ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
-       ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
-       ADI_PMX_FUNCTION("can1", can1grp, can1_mux),
-       ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
-       ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
-       ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
-       ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
-       ADI_PMX_FUNCTION("sport3", sport3grp, sport3_mux),
-       ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
-       ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
-       ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
-       ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
-       ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
-       ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
-       ADI_PMX_FUNCTION("atapi", atapigrp, atapi_mux),
-       ADI_PMX_FUNCTION("atapi_alter", atapialtergrp, atapi_alter_mux),
-       ADI_PMX_FUNCTION("nfc0", nfc0grp, nfc0_mux),
-       ADI_PMX_FUNCTION("keys_4x4", keys_4x4grp, keys_4x4_mux),
-       ADI_PMX_FUNCTION("keys_8x8", keys_8x8grp, keys_8x8_mux),
+       ADI_PMX_FUNCTION("uart0", uart0grp),
+       ADI_PMX_FUNCTION("uart1", uart1grp),
+       ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp),
+       ADI_PMX_FUNCTION("uart2", uart2grp),
+       ADI_PMX_FUNCTION("uart3", uart3grp),
+       ADI_PMX_FUNCTION("uart3_ctsrts", uart3ctsrtsgrp),
+       ADI_PMX_FUNCTION("rsi0", rsi0grp),
+       ADI_PMX_FUNCTION("spi0", spi0grp),
+       ADI_PMX_FUNCTION("spi1", spi1grp),
+       ADI_PMX_FUNCTION("twi0", twi0grp),
+       ADI_PMX_FUNCTION("twi1", twi1grp),
+       ADI_PMX_FUNCTION("rotary", rotarygrp),
+       ADI_PMX_FUNCTION("can0", can0grp),
+       ADI_PMX_FUNCTION("can1", can1grp),
+       ADI_PMX_FUNCTION("smc0", smc0grp),
+       ADI_PMX_FUNCTION("sport0", sport0grp),
+       ADI_PMX_FUNCTION("sport1", sport1grp),
+       ADI_PMX_FUNCTION("sport2", sport2grp),
+       ADI_PMX_FUNCTION("sport3", sport3grp),
+       ADI_PMX_FUNCTION("ppi0", ppi0grp),
+       ADI_PMX_FUNCTION("ppi1", ppi1grp),
+       ADI_PMX_FUNCTION("ppi2", ppi2grp),
+       ADI_PMX_FUNCTION("atapi", atapigrp),
+       ADI_PMX_FUNCTION("atapi_alter", atapialtergrp),
+       ADI_PMX_FUNCTION("nfc0", nfc0grp),
+       ADI_PMX_FUNCTION("keys", keysgrp),
 };
 
 static const struct adi_pinctrl_soc_data adi_bf54x_soc = {
index bf57aea2826c737bcba2482614a2b690d0832b3e..4cb59fe9be7039154079f572af6a24634b9d5ac4 100644 (file)
@@ -259,37 +259,6 @@ static const unsigned lp3_pins[] = {
        GPIO_PF12, GPIO_PF13, GPIO_PF14, GPIO_PF15,
 };
 
-static const struct adi_pin_group adi_pin_groups[] = {
-       ADI_PIN_GROUP("uart0grp", uart0_pins),
-       ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins),
-       ADI_PIN_GROUP("uart1grp", uart1_pins),
-       ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins),
-       ADI_PIN_GROUP("rsi0grp", rsi0_pins),
-       ADI_PIN_GROUP("eth0grp", eth0_pins),
-       ADI_PIN_GROUP("eth1grp", eth1_pins),
-       ADI_PIN_GROUP("spi0grp", spi0_pins),
-       ADI_PIN_GROUP("spi1grp", spi1_pins),
-       ADI_PIN_GROUP("twi0grp", twi0_pins),
-       ADI_PIN_GROUP("twi1grp", twi1_pins),
-       ADI_PIN_GROUP("rotarygrp", rotary_pins),
-       ADI_PIN_GROUP("can0grp", can0_pins),
-       ADI_PIN_GROUP("smc0grp", smc0_pins),
-       ADI_PIN_GROUP("sport0grp", sport0_pins),
-       ADI_PIN_GROUP("sport1grp", sport1_pins),
-       ADI_PIN_GROUP("sport2grp", sport2_pins),
-       ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins),
-       ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins),
-       ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins),
-       ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins),
-       ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins),
-       ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins),
-       ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins),
-       ADI_PIN_GROUP("lp0grp", lp0_pins),
-       ADI_PIN_GROUP("lp1grp", lp1_pins),
-       ADI_PIN_GROUP("lp2grp", lp2_pins),
-       ADI_PIN_GROUP("lp3grp", lp3_pins),
-};
-
 static const unsigned short uart0_mux[] = {
        P_UART0_TX, P_UART0_RX,
        0
@@ -446,6 +415,37 @@ static const unsigned short lp3_mux[] = {
         0
 };
 
+static const struct adi_pin_group adi_pin_groups[] = {
+       ADI_PIN_GROUP("uart0grp", uart0_pins, uart0_mux),
+       ADI_PIN_GROUP("uart0ctsrtsgrp", uart0_ctsrts_pins, uart0_ctsrts_mux),
+       ADI_PIN_GROUP("uart1grp", uart1_pins, uart1_mux),
+       ADI_PIN_GROUP("uart1ctsrtsgrp", uart1_ctsrts_pins, uart1_ctsrts_mux),
+       ADI_PIN_GROUP("rsi0grp", rsi0_pins, rsi0_mux),
+       ADI_PIN_GROUP("eth0grp", eth0_pins, eth0_mux),
+       ADI_PIN_GROUP("eth1grp", eth1_pins, eth1_mux),
+       ADI_PIN_GROUP("spi0grp", spi0_pins, spi0_mux),
+       ADI_PIN_GROUP("spi1grp", spi1_pins, spi1_mux),
+       ADI_PIN_GROUP("twi0grp", twi0_pins, twi0_mux),
+       ADI_PIN_GROUP("twi1grp", twi1_pins, twi1_mux),
+       ADI_PIN_GROUP("rotarygrp", rotary_pins, rotary_mux),
+       ADI_PIN_GROUP("can0grp", can0_pins, can0_mux),
+       ADI_PIN_GROUP("smc0grp", smc0_pins, smc0_mux),
+       ADI_PIN_GROUP("sport0grp", sport0_pins, sport0_mux),
+       ADI_PIN_GROUP("sport1grp", sport1_pins, sport1_mux),
+       ADI_PIN_GROUP("sport2grp", sport2_pins, sport2_mux),
+       ADI_PIN_GROUP("ppi0_8bgrp", ppi0_8b_pins, ppi0_8b_mux),
+       ADI_PIN_GROUP("ppi0_16bgrp", ppi0_16b_pins, ppi0_16b_mux),
+       ADI_PIN_GROUP("ppi0_24bgrp", ppi0_24b_pins, ppi0_24b_mux),
+       ADI_PIN_GROUP("ppi1_8bgrp", ppi1_8b_pins, ppi1_8b_mux),
+       ADI_PIN_GROUP("ppi1_16bgrp", ppi1_16b_pins, ppi1_16b_mux),
+       ADI_PIN_GROUP("ppi2_8bgrp", ppi2_8b_pins, ppi2_8b_mux),
+       ADI_PIN_GROUP("ppi2_16bgrp", ppi2_16b_pins, ppi2_16b_mux),
+       ADI_PIN_GROUP("lp0grp", lp0_pins, lp0_mux),
+       ADI_PIN_GROUP("lp1grp", lp1_pins, lp1_mux),
+       ADI_PIN_GROUP("lp2grp", lp2_pins, lp2_mux),
+       ADI_PIN_GROUP("lp3grp", lp3_pins, lp3_mux),
+};
+
 static const char * const uart0grp[] = { "uart0grp" };
 static const char * const uart0ctsrtsgrp[] = { "uart0ctsrtsgrp" };
 static const char * const uart1grp[] = { "uart1grp" };
@@ -463,47 +463,43 @@ static const char * const smc0grp[] = { "smc0grp" };
 static const char * const sport0grp[] = { "sport0grp" };
 static const char * const sport1grp[] = { "sport1grp" };
 static const char * const sport2grp[] = { "sport2grp" };
-static const char * const ppi0_8bgrp[] = { "ppi0_8bgrp" };
-static const char * const ppi0_16bgrp[] = { "ppi0_16bgrp" };
-static const char * const ppi0_24bgrp[] = { "ppi0_24bgrp" };
-static const char * const ppi1_8bgrp[] = { "ppi1_8bgrp" };
-static const char * const ppi1_16bgrp[] = { "ppi1_16bgrp" };
-static const char * const ppi2_8bgrp[] = { "ppi2_8bgrp" };
-static const char * const ppi2_16bgrp[] = { "ppi2_16bgrp" };
+static const char * const ppi0grp[] = { "ppi0_8bgrp",
+                                       "ppi0_16bgrp",
+                                       "ppi0_24bgrp" };
+static const char * const ppi1grp[] = { "ppi1_8bgrp",
+                                       "ppi1_16bgrp" };
+static const char * const ppi2grp[] = { "ppi2_8bgrp",
+                                       "ppi2_16bgrp" };
 static const char * const lp0grp[] = { "lp0grp" };
 static const char * const lp1grp[] = { "lp1grp" };
 static const char * const lp2grp[] = { "lp2grp" };
 static const char * const lp3grp[] = { "lp3grp" };
 
 static const struct adi_pmx_func adi_pmx_functions[] = {
-       ADI_PMX_FUNCTION("uart0", uart0grp, uart0_mux),
-       ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp, uart0_ctsrts_mux),
-       ADI_PMX_FUNCTION("uart1", uart1grp, uart1_mux),
-       ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp, uart1_ctsrts_mux),
-       ADI_PMX_FUNCTION("rsi0", rsi0grp, rsi0_mux),
-       ADI_PMX_FUNCTION("eth0", eth0grp, eth0_mux),
-       ADI_PMX_FUNCTION("eth1", eth1grp, eth1_mux),
-       ADI_PMX_FUNCTION("spi0", spi0grp, spi0_mux),
-       ADI_PMX_FUNCTION("spi1", spi1grp, spi1_mux),
-       ADI_PMX_FUNCTION("twi0", twi0grp, twi0_mux),
-       ADI_PMX_FUNCTION("twi1", twi1grp, twi1_mux),
-       ADI_PMX_FUNCTION("rotary", rotarygrp, rotary_mux),
-       ADI_PMX_FUNCTION("can0", can0grp, can0_mux),
-       ADI_PMX_FUNCTION("smc0", smc0grp, smc0_mux),
-       ADI_PMX_FUNCTION("sport0", sport0grp, sport0_mux),
-       ADI_PMX_FUNCTION("sport1", sport1grp, sport1_mux),
-       ADI_PMX_FUNCTION("sport2", sport2grp, sport2_mux),
-       ADI_PMX_FUNCTION("ppi0_8b", ppi0_8bgrp, ppi0_8b_mux),
-       ADI_PMX_FUNCTION("ppi0_16b", ppi0_16bgrp, ppi0_16b_mux),
-       ADI_PMX_FUNCTION("ppi0_24b", ppi0_24bgrp, ppi0_24b_mux),
-       ADI_PMX_FUNCTION("ppi1_8b", ppi1_8bgrp, ppi1_8b_mux),
-       ADI_PMX_FUNCTION("ppi1_16b", ppi1_16bgrp, ppi1_16b_mux),
-       ADI_PMX_FUNCTION("ppi2_8b", ppi2_8bgrp, ppi2_8b_mux),
-       ADI_PMX_FUNCTION("ppi2_16b", ppi2_16bgrp, ppi2_16b_mux),
-       ADI_PMX_FUNCTION("lp0", lp0grp, lp0_mux),
-       ADI_PMX_FUNCTION("lp1", lp1grp, lp1_mux),
-       ADI_PMX_FUNCTION("lp2", lp2grp, lp2_mux),
-       ADI_PMX_FUNCTION("lp3", lp3grp, lp3_mux),
+       ADI_PMX_FUNCTION("uart0", uart0grp),
+       ADI_PMX_FUNCTION("uart0_ctsrts", uart0ctsrtsgrp),
+       ADI_PMX_FUNCTION("uart1", uart1grp),
+       ADI_PMX_FUNCTION("uart1_ctsrts", uart1ctsrtsgrp),
+       ADI_PMX_FUNCTION("rsi0", rsi0grp),
+       ADI_PMX_FUNCTION("eth0", eth0grp),
+       ADI_PMX_FUNCTION("eth1", eth1grp),
+       ADI_PMX_FUNCTION("spi0", spi0grp),
+       ADI_PMX_FUNCTION("spi1", spi1grp),
+       ADI_PMX_FUNCTION("twi0", twi0grp),
+       ADI_PMX_FUNCTION("twi1", twi1grp),
+       ADI_PMX_FUNCTION("rotary", rotarygrp),
+       ADI_PMX_FUNCTION("can0", can0grp),
+       ADI_PMX_FUNCTION("smc0", smc0grp),
+       ADI_PMX_FUNCTION("sport0", sport0grp),
+       ADI_PMX_FUNCTION("sport1", sport1grp),
+       ADI_PMX_FUNCTION("sport2", sport2grp),
+       ADI_PMX_FUNCTION("ppi0", ppi0grp),
+       ADI_PMX_FUNCTION("ppi1", ppi1grp),
+       ADI_PMX_FUNCTION("ppi2", ppi2grp),
+       ADI_PMX_FUNCTION("lp0", lp0grp),
+       ADI_PMX_FUNCTION("lp1", lp1grp),
+       ADI_PMX_FUNCTION("lp2", lp2grp),
+       ADI_PMX_FUNCTION("lp3", lp3grp),
 };
 
 static const struct adi_pinctrl_soc_data adi_bf60x_soc = {
index 7a39562c3e42f4f70fff0ffe3924edd6ba4ac609..200ea1e72d4040afb75254beef022948d45b2811 100644 (file)
@@ -89,6 +89,19 @@ struct gpio_port_saved {
        u32 mux;
 };
 
+/*
+ * struct gpio_pint_saved - PINT registers saved in PM operations
+ *
+ * @assign: ASSIGN register
+ * @edge_set: EDGE_SET register
+ * @invert_set: INVERT_SET register
+ */
+struct gpio_pint_saved {
+       u32 assign;
+       u32 edge_set;
+       u32 invert_set;
+};
+
 /**
  * struct gpio_pint - Pin interrupt controller device. Multiple ADI GPIO
  * banks can be mapped into one Pin interrupt controller.
@@ -114,7 +127,7 @@ struct gpio_pint {
        int irq;
        struct irq_domain *domain[2];
        struct gpio_pint_regs *regs;
-       struct adi_pm_pint_save saved_data;
+       struct gpio_pint_saved saved_data;
        int map_count;
        spinlock_t lock;
 
@@ -160,7 +173,7 @@ struct adi_pinctrl {
 struct gpio_port {
        struct list_head node;
        void __iomem *base;
-       unsigned int irq_base;
+       int irq_base;
        unsigned int width;
        struct gpio_port_t *regs;
        struct gpio_port_saved saved_data;
@@ -605,8 +618,8 @@ static struct pinctrl_ops adi_pctrl_ops = {
        .get_group_pins = adi_get_group_pins,
 };
 
-static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
-       unsigned group)
+static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned func_id,
+       unsigned group_id)
 {
        struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
        struct gpio_port *port;
@@ -614,7 +627,7 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
        unsigned long flags;
        unsigned short *mux, pin;
 
-       mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+       mux = (unsigned short *)pinctrl->soc->groups[group_id].mux;
 
        while (*mux) {
                pin = P_IDENT(*mux);
@@ -628,7 +641,7 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
                spin_lock_irqsave(&port->lock, flags);
 
                portmux_setup(port, pin_to_offset(range, pin),
-                                P_FUNCT2MUX(*mux));
+                               P_FUNCT2MUX(*mux));
                port_setup(port, pin_to_offset(range, pin), false);
                mux++;
 
@@ -638,8 +651,8 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
        return 0;
 }
 
-static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned selector,
-       unsigned group)
+static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned func_id,
+       unsigned group_id)
 {
        struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
        struct gpio_port *port;
@@ -647,7 +660,7 @@ static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned selector,
        unsigned long flags;
        unsigned short *mux, pin;
 
-       mux = (unsigned short *)pinctrl->soc->functions[selector].mux;
+       mux = (unsigned short *)pinctrl->soc->groups[group_id].mux;
 
        while (*mux) {
                pin = P_IDENT(*mux);
index 1f06f8df1fa380c6d0e832d24bdf28dd428aed1e..3ca29738213f7ba5895a886dfbf7977195acd043 100644 (file)
@@ -21,13 +21,15 @@ struct adi_pin_group {
        const char *name;
        const unsigned *pins;
        const unsigned num;
+       const unsigned short *mux;
 };
 
-#define ADI_PIN_GROUP(n, p)  \
+#define ADI_PIN_GROUP(n, p, m)  \
        {                       \
                .name = n,      \
                .pins = p,      \
                .num = ARRAY_SIZE(p),   \
+               .mux = m,                       \
        }
 
  /**
@@ -41,15 +43,13 @@ struct adi_pmx_func {
        const char *name;
        const char * const *groups;
        const unsigned num_groups;
-       const unsigned short *mux;
 };
 
-#define ADI_PMX_FUNCTION(n, g, m)              \
+#define ADI_PMX_FUNCTION(n, g)         \
        {                                       \
                .name = n,                      \
                .groups = g,                    \
                .num_groups = ARRAY_SIZE(g),    \
-               .mux = m,                       \
        }
 
 /**
index d990e33d8aa778b9a8cb1a3143db5345122ab584..5d24aaec5dbcba04f5669b4d58cef60d844a18a4 100644 (file)
@@ -1137,6 +1137,17 @@ static void at91_gpio_free(struct gpio_chip *chip, unsigned offset)
        pinctrl_free_gpio(gpio);
 }
 
+static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+       struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
+       void __iomem *pio = at91_gpio->regbase;
+       unsigned mask = 1 << offset;
+       u32 osr;
+
+       osr = readl_relaxed(pio + PIO_OSR);
+       return !(osr & mask);
+}
+
 static int at91_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 {
        struct at91_gpio_chip *at91_gpio = to_at91_gpio_chip(chip);
@@ -1325,6 +1336,31 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
        return 0;
 }
 
+static unsigned int gpio_irq_startup(struct irq_data *d)
+{
+       struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
+       unsigned        pin = d->hwirq;
+       int ret;
+
+       ret = gpio_lock_as_irq(&at91_gpio->chip, pin);
+       if (ret) {
+               dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
+                       d->hwirq);
+               return ret;
+       }
+       gpio_irq_unmask(d);
+       return 0;
+}
+
+static void gpio_irq_shutdown(struct irq_data *d)
+{
+       struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
+       unsigned        pin = d->hwirq;
+
+       gpio_irq_mask(d);
+       gpio_unlock_as_irq(&at91_gpio->chip, pin);
+}
+
 #ifdef CONFIG_PM
 
 static u32 wakeups[MAX_GPIO_BANKS];
@@ -1399,6 +1435,8 @@ void at91_pinctrl_gpio_resume(void)
 
 static struct irq_chip gpio_irqchip = {
        .name           = "GPIO",
+       .irq_startup    = gpio_irq_startup,
+       .irq_shutdown   = gpio_irq_shutdown,
        .irq_disable    = gpio_irq_mask,
        .irq_mask       = gpio_irq_mask,
        .irq_unmask     = gpio_irq_unmask,
@@ -1543,6 +1581,7 @@ static int at91_gpio_of_irq_setup(struct device_node *node,
 static struct gpio_chip at91_gpio_template = {
        .request                = at91_gpio_request,
        .free                   = at91_gpio_free,
+       .get_direction          = at91_gpio_get_direction,
        .direction_input        = at91_gpio_direction_input,
        .get                    = at91_gpio_get,
        .direction_output       = at91_gpio_direction_output,
index 665b96bc0c3a19799a6e8e91581b383f2c025360..bf2b3f65546986ac3a9c7b787549d16fc0fcc5b9 100644 (file)
 #define BYT_NGPIO_NCORE                28
 #define BYT_NGPIO_SUS          44
 
+#define BYT_SCORE_ACPI_UID     "1"
+#define BYT_NCORE_ACPI_UID     "2"
+#define BYT_SUS_ACPI_UID       "3"
+
 /*
  * Baytrail gpio controller consist of three separate sub-controllers called
  * SCORE, NCORE and SUS. The sub-controllers are identified by their acpi UID.
@@ -102,17 +106,17 @@ static unsigned const sus_pins[BYT_NGPIO_SUS] = {
 
 static struct pinctrl_gpio_range byt_ranges[] = {
        {
-               .name = "1", /* match with acpi _UID in probe */
+               .name = BYT_SCORE_ACPI_UID, /* match with acpi _UID in probe */
                .npins = BYT_NGPIO_SCORE,
                .pins = score_pins,
        },
        {
-               .name = "2",
+               .name = BYT_NCORE_ACPI_UID,
                .npins = BYT_NGPIO_NCORE,
                .pins = ncore_pins,
        },
        {
-               .name = "3",
+               .name = BYT_SUS_ACPI_UID,
                .npins = BYT_NGPIO_SUS,
                .pins = sus_pins,
        },
@@ -145,9 +149,41 @@ static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
        return vg->reg_base + reg_offset + reg;
 }
 
+static bool is_special_pin(struct byt_gpio *vg, unsigned offset)
+{
+       /* SCORE pin 92-93 */
+       if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
+               offset >= 92 && offset <= 93)
+               return true;
+
+       /* SUS pin 11-21 */
+       if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
+               offset >= 11 && offset <= 21)
+               return true;
+
+       return false;
+}
+
 static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
        struct byt_gpio *vg = to_byt_gpio(chip);
+       void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
+       u32 value;
+       bool special;
+
+       /*
+        * In most cases, func pin mux 000 means GPIO function.
+        * But, some pins may have func pin mux 001 represents
+        * GPIO function. Only allow user to export pin with
+        * func pin mux preset as GPIO function by BIOS/FW.
+        */
+       value = readl(reg) & BYT_PIN_MUX;
+       special = is_special_pin(vg, offset);
+       if ((special && value != 1) || (!special && value)) {
+               dev_err(&vg->pdev->dev,
+                       "pin %u cannot be used as GPIO.\n", offset);
+               return -EINVAL;
+       }
 
        pm_runtime_get(&vg->pdev->dev);
 
index 155b1b3a0e7a71597d26d3c0c1cab2a525a9ee19..07c81306f2f3bd7b7f39b6ae5a19217a6d947398 100644 (file)
@@ -1042,6 +1042,88 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
        },
 };
 
+/* pin banks of exynos5260 pin-controller 0 */
+static struct samsung_pin_bank exynos5260_pin_banks0[] = {
+       EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
+       EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpa1", 0x04),
+       EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpa2", 0x08),
+       EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpb0", 0x0c),
+       EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpb1", 0x10),
+       EXYNOS_PIN_BANK_EINTG(5, 0x0a0, "gpb2", 0x14),
+       EXYNOS_PIN_BANK_EINTG(8, 0x0c0, "gpb3", 0x18),
+       EXYNOS_PIN_BANK_EINTG(8, 0x0e0, "gpb4", 0x1c),
+       EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpb5", 0x20),
+       EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd0", 0x24),
+       EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpd1", 0x28),
+       EXYNOS_PIN_BANK_EINTG(5, 0x160, "gpd2", 0x2c),
+       EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpe0", 0x30),
+       EXYNOS_PIN_BANK_EINTG(5, 0x1a0, "gpe1", 0x34),
+       EXYNOS_PIN_BANK_EINTG(4, 0x1c0, "gpf0", 0x38),
+       EXYNOS_PIN_BANK_EINTG(8, 0x1e0, "gpf1", 0x3c),
+       EXYNOS_PIN_BANK_EINTG(2, 0x200, "gpk0", 0x40),
+       EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gpx0", 0x00),
+       EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gpx1", 0x04),
+       EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gpx2", 0x08),
+       EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gpx3", 0x0c),
+};
+
+/* pin banks of exynos5260 pin-controller 1 */
+static struct samsung_pin_bank exynos5260_pin_banks1[] = {
+       EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpc0", 0x00),
+       EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpc1", 0x04),
+       EXYNOS_PIN_BANK_EINTG(7, 0x040, "gpc2", 0x08),
+       EXYNOS_PIN_BANK_EINTG(4, 0x060, "gpc3", 0x0c),
+       EXYNOS_PIN_BANK_EINTG(4, 0x080, "gpc4", 0x10),
+};
+
+/* pin banks of exynos5260 pin-controller 2 */
+static struct samsung_pin_bank exynos5260_pin_banks2[] = {
+       EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+       EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+};
+
+/*
+ * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
+ * three gpio/pin-mux/pinconfig controllers.
+ */
+struct samsung_pin_ctrl exynos5260_pin_ctrl[] = {
+       {
+               /* pin-controller instance 0 data */
+               .pin_banks      = exynos5260_pin_banks0,
+               .nr_banks       = ARRAY_SIZE(exynos5260_pin_banks0),
+               .geint_con      = EXYNOS_GPIO_ECON_OFFSET,
+               .geint_mask     = EXYNOS_GPIO_EMASK_OFFSET,
+               .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
+               .weint_con      = EXYNOS_WKUP_ECON_OFFSET,
+               .weint_mask     = EXYNOS_WKUP_EMASK_OFFSET,
+               .weint_pend     = EXYNOS_WKUP_EPEND_OFFSET,
+               .svc            = EXYNOS_SVC_OFFSET,
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .eint_wkup_init = exynos_eint_wkup_init,
+               .label          = "exynos5260-gpio-ctrl0",
+       }, {
+               /* pin-controller instance 1 data */
+               .pin_banks      = exynos5260_pin_banks1,
+               .nr_banks       = ARRAY_SIZE(exynos5260_pin_banks1),
+               .geint_con      = EXYNOS_GPIO_ECON_OFFSET,
+               .geint_mask     = EXYNOS_GPIO_EMASK_OFFSET,
+               .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
+               .svc            = EXYNOS_SVC_OFFSET,
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .label          = "exynos5260-gpio-ctrl1",
+       }, {
+               /* pin-controller instance 2 data */
+               .pin_banks      = exynos5260_pin_banks2,
+               .nr_banks       = ARRAY_SIZE(exynos5260_pin_banks2),
+               .geint_con      = EXYNOS_GPIO_ECON_OFFSET,
+               .geint_mask     = EXYNOS_GPIO_EMASK_OFFSET,
+               .geint_pend     = EXYNOS_GPIO_EPEND_OFFSET,
+               .svc            = EXYNOS_SVC_OFFSET,
+               .eint_gpio_init = exynos_eint_gpio_init,
+               .label          = "exynos5260-gpio-ctrl2",
+       },
+};
+
 /* pin banks of exynos5420 pin-controller 0 */
 static struct samsung_pin_bank exynos5420_pin_banks0[] = {
        EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
index 4779b8e0eee8f1cb90037e78fff60a7f53e9a1ad..e118fb121e024772cbad4cd4b73f32210ad0179c 100644 (file)
@@ -491,7 +491,7 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
                        pin->mux_mode |= IOMUXC_CONFIG_SION;
                pin->config = config & ~IMX_PAD_SION;
 
-               dev_dbg(info->dev, "%s: %d 0x%08lx", info->pins[i].name,
+               dev_dbg(info->dev, "%s: %d 0x%08lx", info->pins[pin_id].name,
                                pin->mux_mode, pin->config);
        }
 
index ef2bf3126da6ca0c10de2ef1a1025c1df744afc9..343f421c7696ef19311fe636a606d4f56674e3ff 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqchip/chained_irq.h>
-#include <linux/of_irq.h>
 #include <linux/spinlock.h>
 
 #include "core.h"
@@ -50,7 +49,6 @@
  * @enabled_irqs:   Bitmap of currently enabled irqs.
  * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
  *                  detection.
- * @wake_irqs:      Bitmap of irqs with requested as wakeup source.
  * @soc;            Reference to soc_data of platform specific data.
  * @regs:           Base address for the TLMM register map.
  */
@@ -65,7 +63,6 @@ struct msm_pinctrl {
 
        DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
-       DECLARE_BITMAP(wake_irqs, MAX_NR_GPIO);
 
        const struct msm_pinctrl_soc_data *soc;
        void __iomem *regs;
@@ -203,42 +200,29 @@ static const struct pinmux_ops msm_pinmux_ops = {
 static int msm_config_reg(struct msm_pinctrl *pctrl,
                          const struct msm_pingroup *g,
                          unsigned param,
-                         s16 *reg,
                          unsigned *mask,
                          unsigned *bit)
 {
        switch (param) {
        case PIN_CONFIG_BIAS_DISABLE:
-               *reg = g->ctl_reg;
-               *bit = g->pull_bit;
-               *mask = 3;
-               break;
        case PIN_CONFIG_BIAS_PULL_DOWN:
-               *reg = g->ctl_reg;
-               *bit = g->pull_bit;
-               *mask = 3;
-               break;
        case PIN_CONFIG_BIAS_PULL_UP:
-               *reg = g->ctl_reg;
                *bit = g->pull_bit;
                *mask = 3;
                break;
        case PIN_CONFIG_DRIVE_STRENGTH:
-               *reg = g->ctl_reg;
                *bit = g->drv_bit;
                *mask = 7;
                break;
+       case PIN_CONFIG_OUTPUT:
+               *bit = g->oe_bit;
+               *mask = 1;
+               break;
        default:
                dev_err(pctrl->dev, "Invalid config param %04x\n", param);
                return -ENOTSUPP;
        }
 
-       if (*reg < 0) {
-               dev_err(pctrl->dev, "Config param %04x not supported on group %s\n",
-                       param, g->name);
-               return -ENOTSUPP;
-       }
-
        return 0;
 }
 
@@ -261,8 +245,10 @@ static int msm_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
 #define MSM_PULL_DOWN  1
 #define MSM_PULL_UP    3
 
-static const unsigned msm_regval_to_drive[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
-static const unsigned msm_drive_to_regval[] = { -1, -1, 0, -1, 1, -1, 2, -1, 3, -1, 4, -1, 5, -1, 6, -1, 7 };
+static unsigned msm_regval_to_drive(u32 val)
+{
+       return (val + 1) * 2;
+}
 
 static int msm_config_group_get(struct pinctrl_dev *pctldev,
                                unsigned int group,
@@ -274,17 +260,16 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
        unsigned mask;
        unsigned arg;
        unsigned bit;
-       s16 reg;
        int ret;
        u32 val;
 
        g = &pctrl->soc->groups[group];
 
-       ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+       ret = msm_config_reg(pctrl, g, param, &mask, &bit);
        if (ret < 0)
                return ret;
 
-       val = readl(pctrl->regs + reg);
+       val = readl(pctrl->regs + g->ctl_reg);
        arg = (val >> bit) & mask;
 
        /* Convert register value to pinconf value */
@@ -299,7 +284,15 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
                arg = arg == MSM_PULL_UP;
                break;
        case PIN_CONFIG_DRIVE_STRENGTH:
-               arg = msm_regval_to_drive[arg];
+               arg = msm_regval_to_drive(arg);
+               break;
+       case PIN_CONFIG_OUTPUT:
+               /* Pin is not output */
+               if (!arg)
+                       return -EINVAL;
+
+               val = readl(pctrl->regs + g->io_reg);
+               arg = !!(val & BIT(g->in_bit));
                break;
        default:
                dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
@@ -324,7 +317,6 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
        unsigned mask;
        unsigned arg;
        unsigned bit;
-       s16 reg;
        int ret;
        u32 val;
        int i;
@@ -335,7 +327,7 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
                param = pinconf_to_config_param(configs[i]);
                arg = pinconf_to_config_argument(configs[i]);
 
-               ret = msm_config_reg(pctrl, g, param, &reg, &mask, &bit);
+               ret = msm_config_reg(pctrl, g, param, &mask, &bit);
                if (ret < 0)
                        return ret;
 
@@ -352,10 +344,24 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
                        break;
                case PIN_CONFIG_DRIVE_STRENGTH:
                        /* Check for invalid values */
-                       if (arg >= ARRAY_SIZE(msm_drive_to_regval))
+                       if (arg > 16 || arg < 2 || (arg % 2) != 0)
                                arg = -1;
                        else
-                               arg = msm_drive_to_regval[arg];
+                               arg = (arg / 2) - 1;
+                       break;
+               case PIN_CONFIG_OUTPUT:
+                       /* set output value */
+                       spin_lock_irqsave(&pctrl->lock, flags);
+                       val = readl(pctrl->regs + g->io_reg);
+                       if (arg)
+                               val |= BIT(g->out_bit);
+                       else
+                               val &= ~BIT(g->out_bit);
+                       writel(val, pctrl->regs + g->io_reg);
+                       spin_unlock_irqrestore(&pctrl->lock, flags);
+
+                       /* enable output */
+                       arg = 1;
                        break;
                default:
                        dev_err(pctrl->dev, "Unsupported config parameter: %x\n",
@@ -370,10 +376,10 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
                }
 
                spin_lock_irqsave(&pctrl->lock, flags);
-               val = readl(pctrl->regs + reg);
+               val = readl(pctrl->regs + g->ctl_reg);
                val &= ~(mask << bit);
                val |= arg << bit;
-               writel(val, pctrl->regs + reg);
+               writel(val, pctrl->regs + g->ctl_reg);
                spin_unlock_irqrestore(&pctrl->lock, flags);
        }
 
@@ -402,8 +408,6 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        u32 val;
 
        g = &pctrl->soc->groups[offset];
-       if (WARN_ON(g->io_reg < 0))
-               return -EINVAL;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -424,8 +428,6 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
        u32 val;
 
        g = &pctrl->soc->groups[offset];
-       if (WARN_ON(g->io_reg < 0))
-               return -EINVAL;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -452,8 +454,6 @@ static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
        u32 val;
 
        g = &pctrl->soc->groups[offset];
-       if (WARN_ON(g->io_reg < 0))
-               return -EINVAL;
 
        val = readl(pctrl->regs + g->io_reg);
        return !!(val & BIT(g->in_bit));
@@ -467,8 +467,6 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        u32 val;
 
        g = &pctrl->soc->groups[offset];
-       if (WARN_ON(g->io_reg < 0))
-               return;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -534,7 +532,7 @@ static void msm_gpio_dbg_show_one(struct seq_file *s,
        pull = (ctl_reg >> g->pull_bit) & 3;
 
        seq_printf(s, " %-8s: %-3s %d", g->name, is_out ? "out" : "in", func);
-       seq_printf(s, " %dmA", msm_regval_to_drive[drive]);
+       seq_printf(s, " %dmA", msm_regval_to_drive(drive));
        seq_printf(s, " %s", pulls[pull]);
 }
 
@@ -617,8 +615,6 @@ static void msm_gpio_irq_mask(struct irq_data *d)
 
        pctrl = irq_data_get_irq_chip_data(d);
        g = &pctrl->soc->groups[d->hwirq];
-       if (WARN_ON(g->intr_cfg_reg < 0))
-               return;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -640,8 +636,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
 
        pctrl = irq_data_get_irq_chip_data(d);
        g = &pctrl->soc->groups[d->hwirq];
-       if (WARN_ON(g->intr_status_reg < 0))
-               return;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -667,8 +661,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
 
        pctrl = irq_data_get_irq_chip_data(d);
        g = &pctrl->soc->groups[d->hwirq];
-       if (WARN_ON(g->intr_status_reg < 0))
-               return;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -693,8 +685,6 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 
        pctrl = irq_data_get_irq_chip_data(d);
        g = &pctrl->soc->groups[d->hwirq];
-       if (WARN_ON(g->intr_cfg_reg < 0))
-               return -EINVAL;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
@@ -783,22 +773,12 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
        struct msm_pinctrl *pctrl;
        unsigned long flags;
-       unsigned ngpio;
 
        pctrl = irq_data_get_irq_chip_data(d);
-       ngpio = pctrl->chip.ngpio;
 
        spin_lock_irqsave(&pctrl->lock, flags);
 
-       if (on) {
-               if (bitmap_empty(pctrl->wake_irqs, ngpio))
-                       enable_irq_wake(pctrl->irq);
-               set_bit(d->hwirq, pctrl->wake_irqs);
-       } else {
-               clear_bit(d->hwirq, pctrl->wake_irqs);
-               if (bitmap_empty(pctrl->wake_irqs, ngpio))
-                       disable_irq_wake(pctrl->irq);
-       }
+       irq_set_irq_wake(pctrl->irq, on);
 
        spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -869,6 +849,12 @@ static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
 static int msm_gpio_init(struct msm_pinctrl *pctrl)
 {
        struct gpio_chip *chip;
@@ -876,10 +862,14 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
        int ret;
        int i;
        int r;
+       unsigned ngpio = pctrl->soc->ngpios;
+
+       if (WARN_ON(ngpio > MAX_NR_GPIO))
+               return -EINVAL;
 
        chip = &pctrl->chip;
        chip->base = 0;
-       chip->ngpio = pctrl->soc->ngpios;
+       chip->ngpio = ngpio;
        chip->label = dev_name(pctrl->dev);
        chip->dev = pctrl->dev;
        chip->owner = THIS_MODULE;
@@ -907,6 +897,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
 
        for (i = 0; i < chip->ngpio; i++) {
                irq = irq_create_mapping(pctrl->domain, i);
+               irq_set_lockdep_class(irq, &gpio_lock_class);
                irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, handle_edge_irq);
                irq_set_chip_data(irq, pctrl);
        }
index 206e782e2daaf5c9bfc0964169a408a3ed92aed1..8fbe9fb19f36e993e34d712c02a47a530a3b915b 100644 (file)
 #ifndef __PINCTRL_MSM_H__
 #define __PINCTRL_MSM_H__
 
-#include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/machine.h>
+struct pinctrl_pin_desc;
 
 /**
  * struct msm_function - a pinmux function
index f944bf2172ef50860c9a18722190adb73fe63928..dde5529807aab71cc78729b8ef0d3e98544385f3 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pinctrl/pinctrl.h>
-#include <linux/pinctrl/pinmux.h>
 
 #include "pinctrl-msm.h"
 
@@ -406,6 +405,7 @@ enum msm8x74_functions {
        MSM_MUX_blsp_i2c6,
        MSM_MUX_blsp_i2c11,
        MSM_MUX_blsp_spi1,
+       MSM_MUX_blsp_spi8,
        MSM_MUX_blsp_uart2,
        MSM_MUX_blsp_uart8,
        MSM_MUX_slimbus,
@@ -416,6 +416,9 @@ static const char * const blsp_i2c2_groups[] = { "gpio6", "gpio7" };
 static const char * const blsp_i2c6_groups[] = { "gpio29", "gpio30" };
 static const char * const blsp_i2c11_groups[] = { "gpio83", "gpio84" };
 static const char * const blsp_spi1_groups[] = { "gpio0", "gpio1", "gpio2", "gpio3" };
+static const char * const blsp_spi8_groups[] = {
+       "gpio45", "gpio46", "gpio47", "gpio48"
+};
 static const char * const blsp_uart2_groups[] = { "gpio4", "gpio5" };
 static const char * const blsp_uart8_groups[] = { "gpio45", "gpio46" };
 static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
@@ -425,6 +428,7 @@ static const struct msm_function msm8x74_functions[] = {
        FUNCTION(blsp_i2c6),
        FUNCTION(blsp_i2c11),
        FUNCTION(blsp_spi1),
+       FUNCTION(blsp_spi8),
        FUNCTION(blsp_uart2),
        FUNCTION(blsp_uart8),
        FUNCTION(slimbus),
@@ -476,10 +480,10 @@ static const struct msm_pingroup msm8x74_groups[] = {
        PINGROUP(42,  NA, NA, NA, NA, NA, NA, NA),
        PINGROUP(43,  NA, NA, NA, NA, NA, NA, NA),
        PINGROUP(44,  NA, NA, NA, NA, NA, NA, NA),
-       PINGROUP(45,  NA, blsp_uart8, NA, NA, NA, NA, NA),
-       PINGROUP(46,  NA, blsp_uart8, NA, NA, NA, NA, NA),
-       PINGROUP(47,  NA, NA, NA, NA, NA, NA, NA),
-       PINGROUP(48,  NA, NA, NA, NA, NA, NA, NA),
+       PINGROUP(45,  blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA),
+       PINGROUP(46,  blsp_spi8, blsp_uart8, NA, NA, NA, NA, NA),
+       PINGROUP(47,  blsp_spi8, NA, NA, NA, NA, NA, NA),
+       PINGROUP(48,  blsp_spi8, NA, NA, NA, NA, NA, NA),
        PINGROUP(49,  NA, NA, NA, NA, NA, NA, NA),
        PINGROUP(50,  NA, NA, NA, NA, NA, NA, NA),
        PINGROUP(51,  NA, NA, NA, NA, NA, NA, NA),
index 53a11114927fc2278456444dc22ae6ac8f1af964..cec7762cf3354592d75d7cc913dfd2a6c0cfbcdf 100644 (file)
@@ -2035,27 +2035,29 @@ static const struct of_device_id nmk_pinctrl_match[] = {
        {},
 };
 
-static int nmk_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int nmk_pinctrl_suspend(struct device *dev)
 {
        struct nmk_pinctrl *npct;
 
-       npct = platform_get_drvdata(pdev);
+       npct = dev_get_drvdata(dev);
        if (!npct)
                return -EINVAL;
 
        return pinctrl_force_sleep(npct->pctl);
 }
 
-static int nmk_pinctrl_resume(struct platform_device *pdev)
+static int nmk_pinctrl_resume(struct device *dev)
 {
        struct nmk_pinctrl *npct;
 
-       npct = platform_get_drvdata(pdev);
+       npct = dev_get_drvdata(dev);
        if (!npct)
                return -EINVAL;
 
        return pinctrl_force_default(npct->pctl);
 }
+#endif
 
 static int nmk_pinctrl_probe(struct platform_device *pdev)
 {
@@ -2144,17 +2146,18 @@ static struct platform_driver nmk_gpio_driver = {
        .probe = nmk_gpio_probe,
 };
 
+static SIMPLE_DEV_PM_OPS(nmk_pinctrl_pm_ops,
+                       nmk_pinctrl_suspend,
+                       nmk_pinctrl_resume);
+
 static struct platform_driver nmk_pinctrl_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "pinctrl-nomadik",
                .of_match_table = nmk_pinctrl_match,
+               .pm = &nmk_pinctrl_pm_ops,
        },
        .probe = nmk_pinctrl_probe,
-#ifdef CONFIG_PM
-       .suspend = nmk_pinctrl_suspend,
-       .resume = nmk_pinctrl_resume,
-#endif
 };
 
 static int __init nmk_gpio_init(void)
index 47ec2e8741e4221ba3dc03a4d1ce6cdba5184bb2..0324d4cb19b22d3172a32aadb12aae5cd4f8c03c 100644 (file)
@@ -1120,6 +1120,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
                .data = (void *)exynos4x12_pin_ctrl },
        { .compatible = "samsung,exynos5250-pinctrl",
                .data = (void *)exynos5250_pin_ctrl },
+       { .compatible = "samsung,exynos5260-pinctrl",
+               .data = (void *)exynos5260_pin_ctrl },
        { .compatible = "samsung,exynos5420-pinctrl",
                .data = (void *)exynos5420_pin_ctrl },
        { .compatible = "samsung,s5pv210-pinctrl",
index 30622d9afa2ed897451944a6d2b8838fcf77ac0d..bab9c21225562fe2c2dbf13ece67aea645c37d76 100644 (file)
@@ -254,6 +254,7 @@ struct samsung_pmx_func {
 extern struct samsung_pin_ctrl exynos4210_pin_ctrl[];
 extern struct samsung_pin_ctrl exynos4x12_pin_ctrl[];
 extern struct samsung_pin_ctrl exynos5250_pin_ctrl[];
+extern struct samsung_pin_ctrl exynos5260_pin_ctrl[];
 extern struct samsung_pin_ctrl exynos5420_pin_ctrl[];
 extern struct samsung_pin_ctrl s3c64xx_pin_ctrl[];
 extern struct samsung_pin_ctrl s3c2412_pin_ctrl[];
index de6459628b4fe9af7d7838975bdc6a92de042bca..81075f2a1d3f87d9ac9d2cf4d62edf94e21bf75f 100644 (file)
@@ -662,6 +662,7 @@ static int pcs_pinconf_get(struct pinctrl_dev *pctldev,
                        break;
                case PIN_CONFIG_DRIVE_STRENGTH:
                case PIN_CONFIG_SLEW_RATE:
+               case PIN_CONFIG_LOW_POWER_MODE:
                default:
                        *config = data;
                        break;
@@ -699,6 +700,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
                        case PIN_CONFIG_INPUT_SCHMITT:
                        case PIN_CONFIG_DRIVE_STRENGTH:
                        case PIN_CONFIG_SLEW_RATE:
+                       case PIN_CONFIG_LOW_POWER_MODE:
                                shift = ffs(func->conf[i].mask) - 1;
                                data &= ~func->conf[i].mask;
                                data |= (arg << shift) & func->conf[i].mask;
@@ -1101,6 +1103,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
                { "pinctrl-single,drive-strength", PIN_CONFIG_DRIVE_STRENGTH, },
                { "pinctrl-single,slew-rate", PIN_CONFIG_SLEW_RATE, },
                { "pinctrl-single,input-schmitt", PIN_CONFIG_INPUT_SCHMITT, },
+               { "pinctrl-single,low-power-mode", PIN_CONFIG_LOW_POWER_MODE, },
        };
        struct pcs_conf_type prop4[] = {
                { "pinctrl-single,bias-pullup", PIN_CONFIG_BIAS_PULL_UP, },
index 320c27363cc87293ed0448cc0054b3b13c884995..bd725b0a43414b77ec068003ac5ddf995824cf21 100644 (file)
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/of_gpio.h>
 #include <linux/of_address.h>
 #include <linux/regmap.h>
@@ -266,11 +271,59 @@ struct st_pctl_group {
        struct st_pinconf       *pin_conf;
 };
 
+/*
+ * Edge triggers are not supported at hardware level, it is supported by
+ * software by exploiting the level trigger support in hardware.
+ * Software uses a virtual register (EDGE_CONF) for edge trigger configuration
+ * of each gpio pin in a GPIO bank.
+ *
+ * Each bank has a 32 bit EDGE_CONF register which is divided in to 8 parts of
+ * 4-bits. Each 4-bit space is allocated for each pin in a gpio bank.
+ *
+ * bit allocation per pin is:
+ * Bits:  [0 - 3] | [4 - 7]  [8 - 11] ... ... ... ...  [ 28 - 31]
+ *       --------------------------------------------------------
+ *       |  pin-0  |  pin-2 | pin-3  | ... ... ... ... | pin -7 |
+ *       --------------------------------------------------------
+ *
+ *  A pin can have one of following the values in its edge configuration field.
+ *
+ *     -------   ----------------------------
+ *     [0-3]   - Description
+ *     -------   ----------------------------
+ *     0000    - No edge IRQ.
+ *     0001    - Falling edge IRQ.
+ *     0010    - Rising edge IRQ.
+ *     0011    - Rising and Falling edge IRQ.
+ *     -------   ----------------------------
+ */
+
+#define ST_IRQ_EDGE_CONF_BITS_PER_PIN  4
+#define ST_IRQ_EDGE_MASK               0xf
+#define ST_IRQ_EDGE_FALLING            BIT(0)
+#define ST_IRQ_EDGE_RISING             BIT(1)
+#define ST_IRQ_EDGE_BOTH               (BIT(0) | BIT(1))
+
+#define ST_IRQ_RISING_EDGE_CONF(pin) \
+       (ST_IRQ_EDGE_RISING << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_FALLING_EDGE_CONF(pin) \
+       (ST_IRQ_EDGE_FALLING << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_BOTH_EDGE_CONF(pin) \
+       (ST_IRQ_EDGE_BOTH << (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN))
+
+#define ST_IRQ_EDGE_CONF(conf, pin) \
+       (conf >> (pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN) & ST_IRQ_EDGE_MASK)
+
 struct st_gpio_bank {
        struct gpio_chip                gpio_chip;
        struct pinctrl_gpio_range       range;
        void __iomem                    *base;
        struct st_pio_control           pc;
+       struct  irq_domain              *domain;
+       unsigned long                   irq_edge_conf;
+       spinlock_t                      lock;
 };
 
 struct st_pinctrl {
@@ -284,6 +337,7 @@ struct st_pinctrl {
        int                             ngroups;
        struct regmap                   *regmap;
        const struct st_pctl_data       *data;
+       void __iomem                    *irqmux_base;
 };
 
 /* SOC specific data */
@@ -330,12 +384,25 @@ static unsigned int stih416_delays[] = {0, 300, 500, 750, 1000, 1250, 1500,
 static const struct st_pctl_data  stih416_data = {
        .rt_style       = st_retime_style_dedicated,
        .input_delays   = stih416_delays,
-       .ninput_delays  = 14,
+       .ninput_delays  = ARRAY_SIZE(stih416_delays),
        .output_delays  = stih416_delays,
-       .noutput_delays = 14,
+       .noutput_delays = ARRAY_SIZE(stih416_delays),
        .alt = 0, .oe = 40, .pu = 50, .od = 60, .rt = 100,
 };
 
+static const struct st_pctl_data stih407_flashdata = {
+       .rt_style       = st_retime_style_none,
+       .input_delays   = stih416_delays,
+       .ninput_delays  = ARRAY_SIZE(stih416_delays),
+       .output_delays  = stih416_delays,
+       .noutput_delays = ARRAY_SIZE(stih416_delays),
+       .alt = 0,
+       .oe = -1, /* Not Available */
+       .pu = -1, /* Not Available */
+       .od = 60,
+       .rt = 100,
+};
+
 /* Low level functions.. */
 static inline int st_gpio_bank(int gpio)
 {
@@ -356,25 +423,29 @@ static void st_pinconf_set_config(struct st_pio_control *pc,
        unsigned int oe_value, pu_value, od_value;
        unsigned long mask = BIT(pin);
 
-       regmap_field_read(output_enable, &oe_value);
-       regmap_field_read(pull_up, &pu_value);
-       regmap_field_read(open_drain, &od_value);
-
-       /* Clear old values */
-       oe_value &= ~mask;
-       pu_value &= ~mask;
-       od_value &= ~mask;
-
-       if (config & ST_PINCONF_OE)
-               oe_value |= mask;
-       if (config & ST_PINCONF_PU)
-               pu_value |= mask;
-       if (config & ST_PINCONF_OD)
-               od_value |= mask;
-
-       regmap_field_write(output_enable, oe_value);
-       regmap_field_write(pull_up, pu_value);
-       regmap_field_write(open_drain, od_value);
+       if (output_enable) {
+               regmap_field_read(output_enable, &oe_value);
+               oe_value &= ~mask;
+               if (config & ST_PINCONF_OE)
+                       oe_value |= mask;
+               regmap_field_write(output_enable, oe_value);
+       }
+
+       if (pull_up) {
+               regmap_field_read(pull_up, &pu_value);
+               pu_value &= ~mask;
+               if (config & ST_PINCONF_PU)
+                       pu_value |= mask;
+               regmap_field_write(pull_up, pu_value);
+       }
+
+       if (open_drain) {
+               regmap_field_read(open_drain, &od_value);
+               od_value &= ~mask;
+               if (config & ST_PINCONF_OD)
+                       od_value |= mask;
+               regmap_field_write(open_drain, od_value);
+       }
 }
 
 static void st_pctl_set_function(struct st_pio_control *pc,
@@ -385,6 +456,9 @@ static void st_pctl_set_function(struct st_pio_control *pc,
        int pin = st_gpio_pin(pin_id);
        int offset = pin * 4;
 
+       if (!alt)
+               return;
+
        regmap_field_read(alt, &val);
        val &= ~(0xf << offset);
        val |= function << offset;
@@ -522,17 +596,23 @@ static void st_pinconf_get_direction(struct st_pio_control *pc,
 {
        unsigned int oe_value, pu_value, od_value;
 
-       regmap_field_read(pc->oe, &oe_value);
-       regmap_field_read(pc->pu, &pu_value);
-       regmap_field_read(pc->od, &od_value);
+       if (pc->oe) {
+               regmap_field_read(pc->oe, &oe_value);
+               if (oe_value & BIT(pin))
+                       ST_PINCONF_PACK_OE(*config);
+       }
 
-       if (oe_value & BIT(pin))
-               ST_PINCONF_PACK_OE(*config);
-       if (pu_value & BIT(pin))
-               ST_PINCONF_PACK_PU(*config);
-       if (od_value & BIT(pin))
-               ST_PINCONF_PACK_OD(*config);
+       if (pc->pu) {
+               regmap_field_read(pc->pu, &pu_value);
+               if (pu_value & BIT(pin))
+                       ST_PINCONF_PACK_PU(*config);
+       }
 
+       if (pc->od) {
+               regmap_field_read(pc->od, &od_value);
+               if (od_value & BIT(pin))
+                       ST_PINCONF_PACK_OD(*config);
+       }
 }
 
 static int st_pinconf_get_retime_packed(struct st_pinctrl *info,
@@ -1051,8 +1131,21 @@ static int st_pctl_dt_setup_retime(struct st_pinctrl *info,
        return -EINVAL;
 }
 
-static int st_parse_syscfgs(struct st_pinctrl *info,
-               int bank, struct device_node *np)
+
+static struct regmap_field *st_pc_get_value(struct device *dev,
+                                           struct regmap *regmap, int bank,
+                                           int data, int lsb, int msb)
+{
+       struct reg_field reg = REG_FIELD((data + bank) * 4, lsb, msb);
+
+       if (data < 0)
+               return NULL;
+
+       return devm_regmap_field_alloc(dev, regmap, reg);
+}
+
+static void st_parse_syscfgs(struct st_pinctrl *info, int bank,
+                            struct device_node *np)
 {
        const struct st_pctl_data *data = info->data;
        /**
@@ -1062,29 +1155,21 @@ static int st_parse_syscfgs(struct st_pinctrl *info,
         */
        int lsb = (bank%4) * ST_GPIO_PINS_PER_BANK;
        int msb = lsb + ST_GPIO_PINS_PER_BANK - 1;
-       struct reg_field alt_reg = REG_FIELD((data->alt + bank) * 4, 0, 31);
-       struct reg_field oe_reg = REG_FIELD((data->oe + bank/4) * 4, lsb, msb);
-       struct reg_field pu_reg = REG_FIELD((data->pu + bank/4) * 4, lsb, msb);
-       struct reg_field od_reg = REG_FIELD((data->od + bank/4) * 4, lsb, msb);
        struct st_pio_control *pc = &info->banks[bank].pc;
        struct device *dev = info->dev;
        struct regmap *regmap  = info->regmap;
 
-       pc->alt = devm_regmap_field_alloc(dev, regmap, alt_reg);
-       pc->oe = devm_regmap_field_alloc(dev, regmap, oe_reg);
-       pc->pu = devm_regmap_field_alloc(dev, regmap, pu_reg);
-       pc->od = devm_regmap_field_alloc(dev, regmap, od_reg);
-
-       if (IS_ERR(pc->alt) || IS_ERR(pc->oe) ||
-                       IS_ERR(pc->pu) || IS_ERR(pc->od))
-               return -EINVAL;
+       pc->alt = st_pc_get_value(dev, regmap, bank, data->alt, 0, 31);
+       pc->oe = st_pc_get_value(dev, regmap, bank/4, data->oe, lsb, msb);
+       pc->pu = st_pc_get_value(dev, regmap, bank/4, data->pu, lsb, msb);
+       pc->od = st_pc_get_value(dev, regmap, bank/4, data->od, lsb, msb);
 
        /* retime avaiable for all pins by default */
        pc->rt_pin_mask = 0xff;
        of_property_read_u32(np, "st,retime-pin-mask", &pc->rt_pin_mask);
        st_pctl_dt_setup_retime(info, bank, pc);
 
-       return 0;
+       return;
 }
 
 /*
@@ -1200,6 +1285,194 @@ static int st_pctl_parse_functions(struct device_node *np,
        return 0;
 }
 
+static int st_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+       struct st_gpio_bank *bank = gpio_chip_to_bank(chip);
+       int irq = -ENXIO;
+
+       if (offset < chip->ngpio)
+               irq = irq_find_mapping(bank->domain, offset);
+
+       dev_info(chip->dev, "%s: request IRQ for GPIO %d, return %d\n",
+                               chip->label, offset + chip->base, irq);
+       return irq;
+}
+
+static void st_gpio_irq_mask(struct irq_data *d)
+{
+       struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+       writel(BIT(d->hwirq), bank->base + REG_PIO_CLR_PMASK);
+}
+
+static void st_gpio_irq_unmask(struct irq_data *d)
+{
+       struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+       writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
+}
+
+static unsigned int st_gpio_irq_startup(struct irq_data *d)
+{
+       struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+       if (gpio_lock_as_irq(&bank->gpio_chip, d->hwirq))
+               dev_err(bank->gpio_chip.dev,
+                       "unable to lock HW IRQ %lu for IRQ\n",
+                       d->hwirq);
+
+       st_gpio_irq_unmask(d);
+
+       return 0;
+}
+
+static void st_gpio_irq_shutdown(struct irq_data *d)
+{
+       struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+       st_gpio_irq_mask(d);
+       gpio_unlock_as_irq(&bank->gpio_chip, d->hwirq);
+}
+
+static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
+{
+       struct st_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+       unsigned long flags;
+       int comp, pin = d->hwirq;
+       u32 val;
+       u32 pin_edge_conf = 0;
+
+       switch (type) {
+       case IRQ_TYPE_LEVEL_HIGH:
+               comp = 0;
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               comp = 0;
+               pin_edge_conf = ST_IRQ_FALLING_EDGE_CONF(pin);
+               break;
+       case IRQ_TYPE_LEVEL_LOW:
+               comp = 1;
+               break;
+       case IRQ_TYPE_EDGE_RISING:
+               comp = 1;
+               pin_edge_conf = ST_IRQ_RISING_EDGE_CONF(pin);
+               break;
+       case IRQ_TYPE_EDGE_BOTH:
+               comp = st_gpio_get(&bank->gpio_chip, pin);
+               pin_edge_conf = ST_IRQ_BOTH_EDGE_CONF(pin);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       spin_lock_irqsave(&bank->lock, flags);
+       bank->irq_edge_conf &=  ~(ST_IRQ_EDGE_MASK << (
+                               pin * ST_IRQ_EDGE_CONF_BITS_PER_PIN));
+       bank->irq_edge_conf |= pin_edge_conf;
+       spin_unlock_irqrestore(&bank->lock, flags);
+
+       val = readl(bank->base + REG_PIO_PCOMP);
+       val &= ~BIT(pin);
+       val |= (comp << pin);
+       writel(val, bank->base + REG_PIO_PCOMP);
+
+       return 0;
+}
+
+/*
+ * As edge triggers are not supported at hardware level, it is supported by
+ * software by exploiting the level trigger support in hardware.
+ *
+ * Steps for detection raising edge interrupt in software.
+ *
+ * Step 1: CONFIGURE pin to detect level LOW interrupts.
+ *
+ * Step 2: DETECT level LOW interrupt and in irqmux/gpio bank interrupt handler,
+ * if the value of pin is low, then CONFIGURE pin for level HIGH interrupt.
+ * IGNORE calling the actual interrupt handler for the pin at this stage.
+ *
+ * Step 3: DETECT level HIGH interrupt and in irqmux/gpio-bank interrupt handler
+ * if the value of pin is HIGH, CONFIGURE pin for level LOW interrupt and then
+ * DISPATCH the interrupt to the interrupt handler of the pin.
+ *
+ *              step-1  ________     __________
+ *                             |     | step - 3
+ *                             |     |
+ *                     step -2 |_____|
+ *
+ * falling edge is also detected int the same way.
+ *
+ */
+static void __gpio_irq_handler(struct st_gpio_bank *bank)
+{
+       unsigned long port_in, port_mask, port_comp, active_irqs;
+       unsigned long bank_edge_mask, flags;
+       int n, val, ecfg;
+
+       spin_lock_irqsave(&bank->lock, flags);
+       bank_edge_mask = bank->irq_edge_conf;
+       spin_unlock_irqrestore(&bank->lock, flags);
+
+       for (;;) {
+               port_in = readl(bank->base + REG_PIO_PIN);
+               port_comp = readl(bank->base + REG_PIO_PCOMP);
+               port_mask = readl(bank->base + REG_PIO_PMASK);
+
+               active_irqs = (port_in ^ port_comp) & port_mask;
+
+               if (active_irqs == 0)
+                       break;
+
+               for_each_set_bit(n, &active_irqs, BITS_PER_LONG) {
+                       /* check if we are detecting fake edges ... */
+                       ecfg = ST_IRQ_EDGE_CONF(bank_edge_mask, n);
+
+                       if (ecfg) {
+                               /* edge detection. */
+                               val = st_gpio_get(&bank->gpio_chip, n);
+
+                               writel(BIT(n),
+                                       val ? bank->base + REG_PIO_SET_PCOMP :
+                                       bank->base + REG_PIO_CLR_PCOMP);
+
+                               if (ecfg != ST_IRQ_EDGE_BOTH &&
+                                       !((ecfg & ST_IRQ_EDGE_FALLING) ^ val))
+                                       continue;
+                       }
+
+                       generic_handle_irq(irq_find_mapping(bank->domain, n));
+               }
+       }
+}
+
+static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+       /* interrupt dedicated per bank */
+       struct irq_chip *chip = irq_get_chip(irq);
+       struct st_gpio_bank *bank = irq_get_handler_data(irq);
+
+       chained_irq_enter(chip, desc);
+       __gpio_irq_handler(bank);
+       chained_irq_exit(chip, desc);
+}
+
+static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_get_chip(irq);
+       struct st_pinctrl *info = irq_get_handler_data(irq);
+       unsigned long status;
+       int n;
+
+       chained_irq_enter(chip, desc);
+
+       status = readl(info->irqmux_base);
+
+       for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+               __gpio_irq_handler(&info->banks[n]);
+
+       chained_irq_exit(chip, desc);
+}
+
 static struct gpio_chip st_gpio_template = {
        .request                = st_gpio_request,
        .free                   = st_gpio_free,
@@ -1210,6 +1483,34 @@ static struct gpio_chip st_gpio_template = {
        .ngpio                  = ST_GPIO_PINS_PER_BANK,
        .of_gpio_n_cells        = 1,
        .of_xlate               = st_gpio_xlate,
+       .to_irq                 = st_gpio_to_irq,
+};
+
+static struct irq_chip st_gpio_irqchip = {
+       .name           = "GPIO",
+       .irq_mask       = st_gpio_irq_mask,
+       .irq_unmask     = st_gpio_irq_unmask,
+       .irq_set_type   = st_gpio_irq_set_type,
+       .irq_startup    = st_gpio_irq_startup,
+       .irq_shutdown   = st_gpio_irq_shutdown,
+};
+
+static int st_gpio_irq_domain_map(struct irq_domain *h,
+                       unsigned int virq, irq_hw_number_t hw)
+{
+       struct st_gpio_bank *bank = h->host_data;
+
+       irq_set_chip(virq, &st_gpio_irqchip);
+       irq_set_handler(virq, handle_simple_irq);
+       set_irq_flags(virq, IRQF_VALID);
+       irq_set_chip_data(virq, bank);
+
+       return 0;
+}
+
+static struct irq_domain_ops st_gpio_irq_ops = {
+       .map    = st_gpio_irq_domain_map,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 static int st_gpiolib_register_bank(struct st_pinctrl *info,
@@ -1219,8 +1520,8 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
        struct pinctrl_gpio_range *range = &bank->range;
        struct device *dev = info->dev;
        int bank_num = of_alias_get_id(np, "gpio");
-       struct resource res;
-       int err;
+       struct resource res, irq_res;
+       int gpio_irq = 0, err, i;
 
        if (of_address_to_resource(np, 0, &res))
                return -ENODEV;
@@ -1233,6 +1534,7 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
        bank->gpio_chip.base = bank_num * ST_GPIO_PINS_PER_BANK;
        bank->gpio_chip.ngpio = ST_GPIO_PINS_PER_BANK;
        bank->gpio_chip.of_node = np;
+       spin_lock_init(&bank->lock);
 
        of_property_read_string(np, "st,bank-name", &range->name);
        bank->gpio_chip.label = range->name;
@@ -1248,6 +1550,51 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
        }
        dev_info(dev, "%s bank added.\n", range->name);
 
+       /**
+        * GPIO bank can have one of the two possible types of
+        * interrupt-wirings.
+        *
+        * First type is via irqmux, single interrupt is used by multiple
+        * gpio banks. This reduces number of overall interrupts numbers
+        * required. All these banks belong to a single pincontroller.
+        *                _________
+        *               |         |----> [gpio-bank (n)    ]
+        *               |         |----> [gpio-bank (n + 1)]
+        *      [irqN]-- | irq-mux |----> [gpio-bank (n + 2)]
+        *               |         |----> [gpio-bank (...  )]
+        *               |_________|----> [gpio-bank (n + 7)]
+        *
+        * Second type has a dedicated interrupt per each gpio bank.
+        *
+        *      [irqN]----> [gpio-bank (n)]
+        */
+
+       if (of_irq_to_resource(np, 0, &irq_res)) {
+               gpio_irq = irq_res.start;
+               irq_set_chained_handler(gpio_irq, st_gpio_irq_handler);
+               irq_set_handler_data(gpio_irq, bank);
+       }
+
+       if (info->irqmux_base > 0 || gpio_irq > 0) {
+               /* Setup IRQ domain */
+               bank->domain  = irq_domain_add_linear(np,
+                                               ST_GPIO_PINS_PER_BANK,
+                                               &st_gpio_irq_ops, bank);
+               if (!bank->domain) {
+                       dev_err(dev, "Failed to add irq domain for %s\n",
+                               np->full_name);
+               } else  {
+                       for (i = 0; i < ST_GPIO_PINS_PER_BANK; i++) {
+                               if (irq_create_mapping(bank->domain, i) < 0)
+                                       dev_err(dev,
+                                               "Failed to map IRQ %i\n", i);
+                       }
+               }
+
+       } else {
+               dev_info(dev, "No IRQ support for %s bank\n", np->full_name);
+       }
+
        return 0;
 }
 
@@ -1264,6 +1611,10 @@ static struct of_device_id st_pctl_of_match[] = {
        { .compatible = "st,stih416-rear-pinctrl", .data = &stih416_data},
        { .compatible = "st,stih416-fvdp-fe-pinctrl", .data = &stih416_data},
        { .compatible = "st,stih416-fvdp-lite-pinctrl", .data = &stih416_data},
+       { .compatible = "st,stih407-sbc-pinctrl", .data = &stih416_data},
+       { .compatible = "st,stih407-front-pinctrl", .data = &stih416_data},
+       { .compatible = "st,stih407-rear-pinctrl", .data = &stih416_data},
+       { .compatible = "st,stih407-flash-pinctrl", .data = &stih407_flashdata},
        { /* sentinel */ }
 };
 
@@ -1276,6 +1627,8 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
        struct device_node *np = pdev->dev.of_node;
        struct device_node *child;
        int grp_index = 0;
+       int irq = 0;
+       struct resource *res;
 
        st_pctl_dt_child_count(info, np);
        if (!info->nbanks) {
@@ -1306,6 +1659,21 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
        }
        info->data = of_match_node(st_pctl_of_match, np)->data;
 
+       irq = platform_get_irq(pdev, 0);
+
+       if (irq > 0) {
+               res = platform_get_resource_byname(pdev,
+                                       IORESOURCE_MEM, "irqmux");
+               info->irqmux_base = devm_ioremap_resource(&pdev->dev, res);
+
+               if (IS_ERR(info->irqmux_base))
+                       return PTR_ERR(info->irqmux_base);
+
+               irq_set_chained_handler(irq, st_gpio_irqmux_handler);
+               irq_set_handler_data(irq, info);
+
+       }
+
        pctl_desc->npins = info->nbanks * ST_GPIO_PINS_PER_BANK;
        pdesc = devm_kzalloc(&pdev->dev,
                        sizeof(*pdesc) * pctl_desc->npins, GFP_KERNEL);
index 6fd8d4d951406aec5f01c021c2c80b84ebf4f4b0..3d6066988a7251cc0e234c8033dde50aff79ae0f 100644 (file)
@@ -1932,27 +1932,27 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = {
        SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x4, "mmc0")),         /* D1 */
+                 SUNXI_FUNCTION(0x2, "mmc0")),         /* D1 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x4, "mmc0")),         /* D0 */
+                 SUNXI_FUNCTION(0x2, "mmc0")),         /* D0 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x4, "mmc0")),         /* CLK */
+                 SUNXI_FUNCTION(0x2, "mmc0")),         /* CLK */
        SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x4, "mmc0")),         /* CMD */
+                 SUNXI_FUNCTION(0x2, "mmc0")),         /* CMD */
        SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x4, "mmc0")),         /* D3 */
+                 SUNXI_FUNCTION(0x2, "mmc0")),         /* D3 */
        SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
                  SUNXI_FUNCTION(0x0, "gpio_in"),
                  SUNXI_FUNCTION(0x1, "gpio_out"),
-                 SUNXI_FUNCTION(0x4, "mmc0")),         /* D2 */
+                 SUNXI_FUNCTION(0x2, "mmc0")),         /* D2 */
        /* Hole */
        SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
                  SUNXI_FUNCTION(0x0, "gpio_in"),
index e767355ab0ad7a02fc5a1ff8cb62ad0cd943f698..65458096f41e2e3c7b998489311f573629523a2c 100644 (file)
@@ -39,6 +39,7 @@ struct tegra_pmx {
        struct pinctrl_dev *pctl;
 
        const struct tegra_pinctrl_soc_data *soc;
+       const char **group_pins;
 
        int nbanks;
        void __iomem **regs;
@@ -620,6 +621,8 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
        struct tegra_pmx *pmx;
        struct resource *res;
        int i;
+       const char **group_pins;
+       int fn, gn, gfn;
 
        pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
        if (!pmx) {
@@ -629,6 +632,41 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
        pmx->dev = &pdev->dev;
        pmx->soc = soc_data;
 
+       /*
+        * Each mux group will appear in 4 functions' list of groups.
+        * This over-allocates slightly, since not all groups are mux groups.
+        */
+       pmx->group_pins = devm_kzalloc(&pdev->dev,
+               soc_data->ngroups * 4 * sizeof(*pmx->group_pins),
+               GFP_KERNEL);
+       if (!pmx->group_pins)
+               return -ENOMEM;
+
+       group_pins = pmx->group_pins;
+       for (fn = 0; fn < soc_data->nfunctions; fn++) {
+               struct tegra_function *func = &soc_data->functions[fn];
+
+               func->groups = group_pins;
+
+               for (gn = 0; gn < soc_data->ngroups; gn++) {
+                       const struct tegra_pingroup *g = &soc_data->groups[gn];
+
+                       if (g->mux_reg == -1)
+                               continue;
+
+                       for (gfn = 0; gfn < 4; gfn++)
+                               if (g->funcs[gfn] == fn)
+                                       break;
+                       if (gfn == 4)
+                               continue;
+
+                       BUG_ON(group_pins - pmx->group_pins >=
+                               soc_data->ngroups * 4);
+                       *group_pins++ = g->name;
+                       func->ngroups++;
+               }
+       }
+
        tegra_pinctrl_gpio_range.npins = pmx->soc->ngpios;
        tegra_pinctrl_desc.name = dev_name(&pdev->dev);
        tegra_pinctrl_desc.pins = pmx->soc->pins;
index 817f7061dc4cd0b332488dc92c2c647cd19bc149..6053832d433e7b9667c8a8f97bf732990412896f 100644 (file)
@@ -72,7 +72,7 @@ enum tegra_pinconf_tristate {
  */
 struct tegra_function {
        const char *name;
-       const char * const *groups;
+       const char **groups;
        unsigned ngroups;
 };
 
@@ -193,7 +193,7 @@ struct tegra_pinctrl_soc_data {
        unsigned ngpios;
        const struct pinctrl_pin_desc *pins;
        unsigned npins;
-       const struct tegra_function *functions;
+       struct tegra_function *functions;
        unsigned nfunctions;
        const struct tegra_pingroup *groups;
        unsigned ngroups;
index 93c9e3899d5e4a0230e91a7269c68b7d7d8433f4..63fe7619d3ff9d96c0978765fe2de63608e53e77 100644 (file)
@@ -1,10 +1,8 @@
 /*
- * Pinctrl data and driver for the NVIDIA Tegra114 pinmux
+ * Pinctrl data for the NVIDIA Tegra114 pinmux
  *
  * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
  *
- * Author:  Pritesh Raithatha <praithatha@nvidia.com>
- *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  * version 2, as published by the Free Software Foundation.
@@ -13,9 +11,6 @@
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
 #include <linux/module.h>
 #define TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5                _GPIO(245)
 
 /* All non-GPIO pins follow */
-#define NUM_GPIOS      (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
-#define _PIN(offset)   (NUM_GPIOS + (offset))
+#define NUM_GPIOS                              (TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5 + 1)
+#define _PIN(offset)                           (NUM_GPIOS + (offset))
 
 /* Non-GPIO pins */
 #define TEGRA_PIN_CORE_PWR_REQ                 _PIN(0)
 #define TEGRA_PIN_PWR_INT_N                    _PIN(2)
 #define TEGRA_PIN_RESET_OUT_N                  _PIN(3)
 #define TEGRA_PIN_OWR                          _PIN(4)
+#define TEGRA_PIN_JTAG_RTCK                    _PIN(5)
+#define TEGRA_PIN_CLK_32K_IN                   _PIN(6)
+#define TEGRA_PIN_GMI_CLK_LB                   _PIN(7)
 
-static const struct pinctrl_pin_desc  tegra114_pins[] = {
+static const struct pinctrl_pin_desc tegra114_pins[] = {
        PINCTRL_PIN(TEGRA_PIN_CLK_32K_OUT_PA0, "CLK_32K_OUT PA0"),
        PINCTRL_PIN(TEGRA_PIN_UART3_CTS_N_PA1, "UART3_CTS_N PA1"),
        PINCTRL_PIN(TEGRA_PIN_DAP2_FS_PA2, "DAP2_FS PA2"),
@@ -385,9 +383,12 @@ static const struct pinctrl_pin_desc  tegra114_pins[] = {
        PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
        PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
        PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
-       PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
        PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
        PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
+       PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
+       PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
+       PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
 };
 
 static const unsigned clk_32k_out_pa0_pins[] = {
@@ -1074,10 +1075,6 @@ static const unsigned cpu_pwr_req_pins[] = {
        TEGRA_PIN_CPU_PWR_REQ,
 };
 
-static const unsigned owr_pins[] = {
-       TEGRA_PIN_OWR,
-};
-
 static const unsigned pwr_int_n_pins[] = {
        TEGRA_PIN_PWR_INT_N,
 };
@@ -1086,6 +1083,22 @@ static const unsigned reset_out_n_pins[] = {
        TEGRA_PIN_RESET_OUT_N,
 };
 
+static const unsigned owr_pins[] = {
+       TEGRA_PIN_OWR,
+};
+
+static const unsigned jtag_rtck_pins[] = {
+       TEGRA_PIN_JTAG_RTCK,
+};
+
+static const unsigned clk_32k_in_pins[] = {
+       TEGRA_PIN_CLK_32K_IN,
+};
+
+static const unsigned gmi_clk_lb_pins[] = {
+       TEGRA_PIN_GMI_CLK_LB,
+};
+
 static const unsigned drive_ao1_pins[] = {
        TEGRA_PIN_KB_ROW0_PR0,
        TEGRA_PIN_KB_ROW1_PR1,
@@ -1127,7 +1140,6 @@ static const unsigned drive_at1_pins[] = {
        TEGRA_PIN_GMI_AD13_PH5,
        TEGRA_PIN_GMI_AD14_PH6,
        TEGRA_PIN_GMI_AD15_PH7,
-
        TEGRA_PIN_GMI_IORDY_PI5,
        TEGRA_PIN_GMI_CS7_N_PI6,
 };
@@ -1141,15 +1153,12 @@ static const unsigned drive_at2_pins[] = {
        TEGRA_PIN_GMI_AD5_PG5,
        TEGRA_PIN_GMI_AD6_PG6,
        TEGRA_PIN_GMI_AD7_PG7,
-
        TEGRA_PIN_GMI_WR_N_PI0,
        TEGRA_PIN_GMI_OE_N_PI1,
        TEGRA_PIN_GMI_CS6_N_PI3,
        TEGRA_PIN_GMI_RST_N_PI4,
        TEGRA_PIN_GMI_WAIT_PI7,
-
        TEGRA_PIN_GMI_DQS_P_PJ3,
-
        TEGRA_PIN_GMI_ADV_N_PK0,
        TEGRA_PIN_GMI_CLK_PK1,
        TEGRA_PIN_GMI_CS4_N_PK2,
@@ -1342,14 +1351,37 @@ static const unsigned drive_uda_pins[] = {
 };
 
 static const unsigned drive_dev3_pins[] = {
-       TEGRA_PIN_CLK3_OUT_PEE0,
-       TEGRA_PIN_CLK3_REQ_PEE1,
+};
+
+static const unsigned drive_cec_pins[] = {
+};
+
+static const unsigned drive_at6_pins[] = {
+};
+
+static const unsigned drive_dap5_pins[] = {
+};
+
+static const unsigned drive_usb_vbus_en_pins[] = {
+};
+
+static const unsigned drive_ao3_pins[] = {
+};
+
+static const unsigned drive_hv0_pins[] = {
+};
+
+static const unsigned drive_sdio4_pins[] = {
+};
+
+static const unsigned drive_ao0_pins[] = {
 };
 
 enum tegra_mux {
        TEGRA_MUX_BLINK,
        TEGRA_MUX_CEC,
        TEGRA_MUX_CLDVFS,
+       TEGRA_MUX_CLK,
        TEGRA_MUX_CLK12,
        TEGRA_MUX_CPU,
        TEGRA_MUX_DAP,
@@ -1394,6 +1426,7 @@ enum tegra_mux {
        TEGRA_MUX_RSVD2,
        TEGRA_MUX_RSVD3,
        TEGRA_MUX_RSVD4,
+       TEGRA_MUX_RTCK,
        TEGRA_MUX_SDMMC1,
        TEGRA_MUX_SDMMC2,
        TEGRA_MUX_SDMMC3,
@@ -1425,944 +1458,16 @@ enum tegra_mux {
        TEGRA_MUX_VI_ALT3,
 };
 
-static const char * const blink_groups[] = {
-       "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
-       "hdmi_cec_pee3",
-};
-
-static const char * const cldvfs_groups[] = {
-       "gmi_ad9_ph1",
-       "gmi_ad10_ph2",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "dvfs_pwm_px0",
-       "dvfs_clk_px2",
-};
-
-static const char * const clk12_groups[] = {
-       "sdmmc1_wp_n_pv3",
-       "sdmmc1_clk_pz0",
-};
-
-static const char * const cpu_groups[] = {
-       "cpu_pwr_req",
-};
-
-static const char * const dap_groups[] = {
-       "clk1_req_pee2",
-       "clk2_req_pcc5",
-};
-
-static const char * const dap1_groups[] = {
-       "clk1_req_pee2",
-};
-
-static const char * const dap2_groups[] = {
-       "clk1_out_pw4",
-       "gpio_x4_aud_px4",
-};
-
-static const char * const dev3_groups[] = {
-       "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_sclk_pp3",
-       "uart3_rts_n_pc0",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "pbb6",
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-       "kb_col3_pq3",
-       "sdmmc3_dat2_pb5",
-};
-
-static const char * const displaya_alt_groups[] = {
-       "kb_row6_pr6",
-};
-
-static const char * const displayb_groups[] = {
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_sclk_pp3",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pu6",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "pbb6",
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-       "sdmmc3_dat3_pb4",
-};
-
-static const char * const dtv_groups[] = {
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-       "dap4_fs_pp4",
-       "dap4_dout_pp6",
-       "gmi_wait_pi7",
-       "gmi_ad8_ph0",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-};
-
-static const char * const emc_dll_groups[] = {
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-};
-
-static const char * const extperiph1_groups[] = {
-       "clk1_out_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
-       "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
-       "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
-       "gmi_wp_n_pc7",
-
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_ad8_ph0",
-       "gmi_ad9_ph1",
-       "gmi_ad10_ph2",
-       "gmi_ad11_ph3",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-       "gmi_wr_n_pi0",
-       "gmi_oe_n_pi1",
-       "gmi_cs6_n_pi3",
-       "gmi_rst_n_pi4",
-       "gmi_iordy_pi5",
-       "gmi_cs7_n_pi6",
-       "gmi_wait_pi7",
-       "gmi_cs0_n_pj0",
-       "gmi_cs1_n_pj2",
-       "gmi_dqs_p_pj3",
-       "gmi_adv_n_pk0",
-       "gmi_clk_pk1",
-       "gmi_cs4_n_pk2",
-       "gmi_cs2_n_pk3",
-       "gmi_cs3_n_pk4",
-       "gmi_a16_pj7",
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gmi_a19_pk7",
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_sclk_pn3",
-};
-
-static const char * const gmi_alt_groups[] = {
-       "gmi_wp_n_pc7",
-       "gmi_cs3_n_pk4",
-       "gmi_a16_pj7",
-};
-
-static const char * const hda_groups[] = {
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_sclk_pn3",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-};
-
-static const char * const hsi_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "gpio_w2_aud_pw2",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const i2c2_groups[] = {
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-};
-
-static const char * const i2c4_groups[] = {
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-};
-
-static const char * const i2s2_groups[] = {
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
-       "dap4_fs_pp4",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
-       "pcc1",
-       "pbb0",
-       "pbb7",
-       "pcc2",
-};
-
-static const char * const irda_groups[] = {
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-};
-
-static const char * const kbc_groups[] = {
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-};
-
-static const char * const nand_groups[] = {
-       "gmi_wp_n_pc7",
-       "gmi_wait_pi7",
-       "gmi_adv_n_pk0",
-       "gmi_clk_pk1",
-       "gmi_cs0_n_pj0",
-       "gmi_cs1_n_pj2",
-       "gmi_cs2_n_pk3",
-       "gmi_cs3_n_pk4",
-       "gmi_cs4_n_pk2",
-       "gmi_cs6_n_pi3",
-       "gmi_cs7_n_pi6",
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_ad8_ph0",
-       "gmi_ad9_ph1",
-       "gmi_ad10_ph2",
-       "gmi_ad11_ph3",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-       "gmi_wr_n_pi0",
-       "gmi_oe_n_pi1",
-       "gmi_dqs_p_pj3",
-       "gmi_rst_n_pi4",
-};
-
-static const char * const nand_alt_groups[] = {
-       "gmi_cs6_n_pi3",
-       "gmi_cs7_n_pi6",
-       "gmi_rst_n_pi4",
-};
-
-static const char * const owr_groups[] = {
-       "pu0",
-       "kb_col4_pq4",
-       "owr",
-       "sdmmc3_cd_n_pv2",
-};
-
-static const char * const pmi_groups[] = {
-       "pwr_int_n",
-};
-
-static const char * const pwm0_groups[] = {
-       "sdmmc1_dat2_py5",
-       "uart3_rts_n_pc0",
-       "pu3",
-       "gmi_ad8_ph0",
-       "sdmmc3_dat3_pb4",
-};
-
-static const char * const pwm1_groups[] = {
-       "sdmmc1_dat1_py6",
-       "pu4",
-       "gmi_ad9_ph1",
-       "sdmmc3_dat2_pb5",
-};
-
-static const char * const pwm2_groups[] = {
-       "pu5",
-       "gmi_ad10_ph2",
-       "kb_col3_pq3",
-       "sdmmc3_dat1_pb6",
-};
-
-static const char * const pwm3_groups[] = {
-       "pu6",
-       "gmi_ad11_ph3",
-       "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwron_groups[] = {
-       "core_pwr_req",
-};
-
-static const char * const reset_out_n_groups[] = {
-       "reset_out_n",
-};
-
-static const char * const rsvd1_groups[] = {
-       "pv1",
-       "hdmi_int_pn7",
-       "pu1",
-       "pu2",
-       "gmi_wp_n_pc7",
-       "gmi_adv_n_pk0",
-       "gmi_cs0_n_pj0",
-       "gmi_cs1_n_pj2",
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_wr_n_pi0",
-       "gmi_oe_n_pi1",
-       "gpio_x4_aud_px4",
-       "gpio_x5_aud_px5",
-       "gpio_x7_aud_px7",
-
-       "reset_out_n",
-};
-
-static const char * const rsvd2_groups[] = {
-       "pv0",
-       "pv1",
-       "sdmmc1_dat0_py7",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "hdmi_int_pn7",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "dap4_fs_pp4",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_sclk_pp7",
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-       "gmi_iordy_pi5",
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat7_paa7",
-       "pcc1",
-       "pbb7",
-       "pcc2",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "sys_clk_req_pz5",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "pwr_int_n",
-       "owr",
-       "spdif_out_pk5",
-       "gpio_x1_aud_px1",
-       "sdmmc3_clk_pa6",
-       "sdmmc3_dat0_pb7",
-       "gpio_w2_aud_pw2",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-       "sdmmc3_clk_lb_out_pee4",
-       "sdmmc3_clk_lb_in_pee5",
-       "reset_out_n",
-};
-
-static const char * const rsvd3_groups[] = {
-       "pv0",
-       "pv1",
-       "sdmmc1_clk_pz0",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "hdmi_int_pn7",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-       "pu0",
-       "pu1",
-       "pu2",
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "dap4_din_pp5",
-       "dap4_sclk_pp7",
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-       "pcc1",
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-       "pbb7",
-       "pcc2",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row3_pr3",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "clk_32k_out_pa0",
-       "sys_clk_req_pz5",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "pwr_int_n",
-       "owr",
-       "clk1_req_pee2",
-       "clk1_out_pw4",
-       "spdif_out_pk5",
-       "spdif_in_pk6",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dvfs_pwm_px0",
-       "gpio_x1_aud_px1",
-       "gpio_x3_aud_px3",
-       "dvfs_clk_px2",
-       "sdmmc3_clk_pa6",
-       "sdmmc3_dat0_pb7",
-       "hdmi_cec_pee3",
-       "sdmmc3_cd_n_pv2",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-       "sdmmc3_clk_lb_out_pee4",
-       "sdmmc3_clk_lb_in_pee5",
-       "reset_out_n",
-};
-
-static const char * const rsvd4_groups[] = {
-       "pv0",
-       "pv1",
-       "sdmmc1_clk_pz0",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "hdmi_int_pn7",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-       "pu0",
-       "pu1",
-       "pu2",
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "dap4_fs_pp4",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_sclk_pp7",
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_rst_n_pi4",
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-       "cam_mclk_pcc0",
-       "pcc1",
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "pbb6",
-       "pbb7",
-       "pcc2",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_col2_pq2",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "clk_32k_out_pa0",
-       "sys_clk_req_pz5",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "pwr_int_n",
-       "owr",
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_sclk_pn3",
-       "clk1_req_pee2",
-       "clk1_out_pw4",
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dvfs_pwm_px0",
-       "gpio_x1_aud_px1",
-       "gpio_x3_aud_px3",
-       "dvfs_clk_px2",
-       "gpio_x5_aud_px5",
-       "gpio_x6_aud_px6",
-       "gpio_x7_aud_px7",
-       "sdmmc3_cd_n_pv2",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-       "sdmmc3_clk_lb_in_pee5",
-       "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc1_groups[] = {
-
-       "sdmmc1_clk_pz0",
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat0_py7",
-       "uart3_cts_n_pa1",
-       "kb_col5_pq5",
-       "sdmmc1_wp_n_pv3",
-};
-
-static const char * const sdmmc2_groups[] = {
-       "gmi_iordy_pi5",
-       "gmi_clk_pk1",
-       "gmi_cs2_n_pk3",
-       "gmi_cs3_n_pk4",
-       "gmi_cs7_n_pi6",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-       "gmi_dqs_p_pj3",
-};
-
-static const char * const sdmmc3_groups[] = {
-       "kb_col4_pq4",
-       "sdmmc3_clk_pa6",
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat3_pb4",
-       "hdmi_cec_pee3",
-       "sdmmc3_cd_n_pv2",
-       "sdmmc3_clk_lb_in_pee5",
-       "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc4_groups[] = {
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-};
-
-static const char * const soc_groups[] = {
-       "gmi_cs1_n_pj2",
-       "gmi_oe_n_pi1",
-       "clk_32k_out_pa0",
-       "hdmi_cec_pee3",
-};
-
-static const char * const spdif_groups[] = {
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-};
-
-static const char * const spi1_groups[] = {
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-       "gpio_x3_aud_px3",
-       "gpio_x4_aud_px4",
-       "gpio_x5_aud_px5",
-       "gpio_x6_aud_px6",
-       "gpio_x7_aud_px7",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const spi2_groups[] = {
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "gpio_x4_aud_px4",
-       "gpio_x5_aud_px5",
-       "gpio_x6_aud_px6",
-       "gpio_x7_aud_px7",
-       "gpio_w2_aud_pw2",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const spi3_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc3_clk_pa6",
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat3_pb4",
-};
-
-static const char * const spi4_groups[] = {
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat0_py7",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-       "uart3_cts_n_pa1",
-       "gmi_wait_pi7",
-       "gmi_cs6_n_pi3",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_a19_pk7",
-       "gmi_wr_n_pi0",
-       "sdmmc1_wp_n_pv3",
-};
-
-static const char * const spi5_groups[] = {
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_sclk_pp3",
-};
-
-static const char * const spi6_groups[] = {
-       "dvfs_pwm_px0",
-       "gpio_x1_aud_px1",
-       "gpio_x3_aud_px3",
-       "dvfs_clk_px2",
-       "gpio_x6_aud_px6",
-       "gpio_w2_aud_pw2",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const sysclk_groups[] = {
-       "sys_clk_req_pz5",
-};
-
-static const char * const trace_groups[] = {
-       "gmi_iordy_pi5",
-       "gmi_adv_n_pk0",
-       "gmi_clk_pk1",
-       "gmi_cs2_n_pk3",
-       "gmi_cs4_n_pk2",
-       "gmi_a16_pj7",
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gmi_a19_pk7",
-       "gmi_dqs_p_pj3",
-};
-
-static const char * const uarta_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat0_py7",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-       "pu0",
-       "pu1",
-       "pu2",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pu6",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc1_wp_n_pv3",
-};
-
-static const char * const uartb_groups[] = {
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-};
-
-static const char * const uartc_groups[] = {
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-};
-
-static const char * const uartd_groups[] = {
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-       "gmi_a16_pj7",
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gmi_a19_pk7",
-};
-
-static const char * const ulpi_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-};
-
-static const char * const usb_groups[] = {
-       "pv0",
-       "pu6",
-       "gmi_cs0_n_pj0",
-       "gmi_cs4_n_pk2",
-       "gmi_ad11_ph3",
-       "kb_col0_pq0",
-       "spdif_in_pk6",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-};
-
-static const char * const vgp1_groups[] = {
-       "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
-       "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
-       "pbb3",
-};
-
-static const char * const vgp4_groups[] = {
-       "pbb4",
-};
-
-static const char * const vgp5_groups[] = {
-       "pbb5",
-};
-
-static const char * const vgp6_groups[] = {
-       "pbb6",
-};
-
-static const char * const vi_groups[] = {
-       "cam_mclk_pcc0",
-       "pbb0",
-};
-
-static const char * const vi_alt1_groups[] = {
-       "cam_mclk_pcc0",
-       "pbb0",
-};
-
-static const char * const vi_alt3_groups[] = {
-       "cam_mclk_pcc0",
-       "pbb0",
-};
-
 #define FUNCTION(fname)                                        \
        {                                               \
                .name = #fname,                         \
-               .groups = fname##_groups,               \
-               .ngroups = ARRAY_SIZE(fname##_groups),  \
        }
 
-static const struct tegra_function  tegra114_functions[] = {
+static struct tegra_function tegra114_functions[] = {
        FUNCTION(blink),
        FUNCTION(cec),
        FUNCTION(cldvfs),
+       FUNCTION(clk),
        FUNCTION(clk12),
        FUNCTION(cpu),
        FUNCTION(dap),
@@ -2407,6 +1512,7 @@ static const struct tegra_function  tegra114_functions[] = {
        FUNCTION(rsvd2),
        FUNCTION(rsvd3),
        FUNCTION(rsvd4),
+       FUNCTION(rtck),
        FUNCTION(sdmmc1),
        FUNCTION(sdmmc2),
        FUNCTION(sdmmc3),
@@ -2438,11 +1544,11 @@ static const struct tegra_function  tegra114_functions[] = {
        FUNCTION(vi_alt3),
 };
 
-#define DRV_PINGROUP_REG_START                 0x868   /* bank 0 */
-#define PINGROUP_REG_START                     0x3000  /* bank 1 */
+#define DRV_PINGROUP_REG_A             0x868   /* bank 0 */
+#define PINGROUP_REG_A                 0x3000  /* bank 1 */
 
-#define PINGROUP_REG_Y(r)                      ((r) - PINGROUP_REG_START)
-#define PINGROUP_REG_N(r)                      -1
+#define PINGROUP_REG_Y(r)              ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r)              -1
 
 #define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
        {                                                               \
@@ -2484,13 +1590,14 @@ static const struct tegra_function  tegra114_functions[] = {
                .drvtype_reg = -1,                                      \
        }
 
-#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_START)
-#define DRV_PINGROUP_DVRTYPE_N(r) -1
+#define DRV_PINGROUP_REG_Y(r)          ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r)          -1
+
 
 #define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b,             \
-                       drvdn_b, drvdn_w, drvup_b, drvup_w,             \
-                       slwr_b, slwr_w, slwf_b, slwf_w,                 \
-                       drvtype)                                        \
+                    drvdn_b, drvdn_w, drvup_b, drvup_w,                \
+                    slwr_b, slwr_w, slwf_b, slwf_w,                    \
+                    drvtype)                                           \
        {                                                               \
                .name = "drive_" #pg_name,                              \
                .pins = drive_##pg_name##_pins,                         \
@@ -2503,7 +1610,7 @@ static const struct tegra_function  tegra114_functions[] = {
                .lock_reg = -1,                                         \
                .ioreset_reg = -1,                                      \
                .rcv_sel_reg = -1,                                      \
-               .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r),                   \
+               .drv_reg = DRV_PINGROUP_REG_Y(r),                       \
                .drv_bank = 0,                                          \
                .hsm_bit = hsm_b,                                       \
                .schmitt_bit = schmitt_b,                               \
@@ -2516,14 +1623,13 @@ static const struct tegra_function  tegra114_functions[] = {
                .slwr_width = slwr_w,                                   \
                .slwf_bit = slwf_b,                                     \
                .slwf_width = slwf_w,                                   \
-               .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r),       \
+               .drvtype_reg = DRV_PINGROUP_REG_##drvtype(r),           \
                .drvtype_bank = 0,                                      \
                .drvtype_bit = 6,                                       \
        }
 
 static const struct tegra_pingroup tegra114_groups[] = {
        /*       pg_name,                f0,         f1,         f2,           f3,          safe,     r,      od, ior, rcv_sel */
-       /* FIXME: Fill in correct data in safe column */
        PINGROUP(ulpi_data0_po1,         SPI3,       HSI,        UARTA,        ULPI,        ULPI,     0x3000,  N,  N,  N),
        PINGROUP(ulpi_data1_po2,         SPI3,       HSI,        UARTA,        ULPI,        ULPI,     0x3004,  N,  N,  N),
        PINGROUP(ulpi_data2_po3,         SPI3,       HSI,        UARTA,        ULPI,        ULPI,     0x3008,  N,  N,  N),
@@ -2635,6 +1741,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
        PINGROUP(pbb6,                   VGP6,       DISPLAYA,   DISPLAYB,     RSVD4,       RSVD4,    0x32a4,  N,  N,  N),
        PINGROUP(pbb7,                   I2S4,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32a8,  N,  N,  N),
        PINGROUP(pcc2,                   I2S4,       RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32ac,  N,  N,  N),
+       PINGROUP(jtag_rtck,              RTCK,       RSVD2,      RSVD3,        RSVD4,       RTCK,     0x32b0,  N,  N,  N),
        PINGROUP(pwr_i2c_scl_pz6,        I2CPWR,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32b4,  Y,  N,  N),
        PINGROUP(pwr_i2c_sda_pz7,        I2CPWR,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32b8,  Y,  N,  N),
        PINGROUP(kb_row0_pr0,            KBC,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x32bc,  N,  N,  N),
@@ -2661,6 +1768,7 @@ static const struct tegra_pingroup tegra114_groups[] = {
        PINGROUP(core_pwr_req,           PWRON,      RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3324,  N,  N,  N),
        PINGROUP(cpu_pwr_req,            CPU,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3328,  N,  N,  N),
        PINGROUP(pwr_int_n,              PMI,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x332c,  N,  N,  N),
+       PINGROUP(clk_32k_in,             CLK,        RSVD2,      RSVD3,        RSVD4,       CLK,      0x3330,  N,  N,  N),
        PINGROUP(owr,                    OWR,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3334,  N,  N,  Y),
        PINGROUP(dap1_fs_pn0,            I2S0,       HDA,        GMI,          RSVD4,       RSVD4,    0x3338,  N,  N,  N),
        PINGROUP(dap1_din_pn1,           I2S0,       HDA,        GMI,          RSVD4,       RSVD4,    0x333c,  N,  N,  N),
@@ -2697,38 +1805,48 @@ static const struct tegra_pingroup tegra114_groups[] = {
        PINGROUP(usb_vbus_en1_pn5,       USB,        RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x33f8,  Y,  N,  N),
        PINGROUP(sdmmc3_clk_lb_in_pee5,  SDMMC3,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x33fc,  N,  N,  N),
        PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3,     RSVD2,      RSVD3,        RSVD4,       RSVD4,    0x3400,  N,  N,  N),
+       PINGROUP(gmi_clk_lb,             SDMMC2,     NAND,       GMI,          RSVD4,       GMI,      0x3404,  N,  N,  N),
        PINGROUP(reset_out_n,            RSVD1,      RSVD2,      RSVD3,        RESET_OUT_N, RSVD3,    0x3408,  N,  N,  N),
 
        /* pg_name, r, hsm_b, schmitt_b, lpmd_b, drvdn_b, drvdn_w, drvup_b, drvup_w, slwr_b, slwr_w, slwf_b, slwf_w, drvtype */
-       DRV_PINGROUP(ao1,   0x868,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(ao2,   0x86c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(at1,   0x870,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
-       DRV_PINGROUP(at2,   0x874,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
-       DRV_PINGROUP(at3,   0x878,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
-       DRV_PINGROUP(at4,   0x87c,  2,  3,  4,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
-       DRV_PINGROUP(at5,   0x880,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(cdev1, 0x884,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(cdev2, 0x888,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(dap1,  0x890,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(dap2,  0x894,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(dap3,  0x898,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(dap4,  0x89c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(dbg,   0x8a0,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(sdio3, 0x8b0,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(spi,   0x8b4,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(uaa,   0x8b8,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(uab,   0x8bc,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(uart2, 0x8c0,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(uart3, 0x8c4,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(sdio1, 0x8ec,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(ddc,   0x8fc,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(gma,   0x900,  2,  3,  4,  14,  5,  20,  5,  28,  2,  30,  2,  Y),
-       DRV_PINGROUP(gme,   0x910,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(gmf,   0x914,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(gmg,   0x918,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(gmh,   0x91c,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(owr,   0x920,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
-       DRV_PINGROUP(uda,   0x924,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(ao1,         0x868,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(ao2,         0x86c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(at1,         0x870,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at2,         0x874,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at3,         0x878,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at4,         0x87c,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(at5,         0x880,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(cdev1,       0x884,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(cdev2,       0x888,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap1,        0x890,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap2,        0x894,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap3,        0x898,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dap4,        0x89c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dbg,         0x8a0,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(sdio3,       0x8b0,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(spi,         0x8b4,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uaa,         0x8b8,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uab,         0x8bc,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uart2,       0x8c0,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uart3,       0x8c4,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(sdio1,       0x8ec,  2,  3, -1,  12,  7,  20,  7,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(ddc,         0x8fc,  2,  3, -1,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gma,         0x900,  2,  3, -1,  14,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gme,         0x910,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gmf,         0x914,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gmg,         0x918,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(gmh,         0x91c,  2,  3,  4,  14,  5,  19,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(owr,         0x920,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(uda,         0x924,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(dev3,        0x92c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(cec,         0x938,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(at6,         0x994,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  Y),
+       DRV_PINGROUP(dap5,        0x998,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(usb_vbus_en, 0x99c,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(ao3,         0x9a0,  2,  3,  4,  12,  5,  -1, -1,  28,  2,  -1, -1,  N),
+       DRV_PINGROUP(hv0,         0x9a4,  2,  3,  4,  12,  5,  -1, -1,  28,  2,  -1, -1,  N),
+       DRV_PINGROUP(sdio4,       0x9a8,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
+       DRV_PINGROUP(ao0,         0x9ac,  2,  3,  4,  12,  5,  20,  5,  28,  2,  30,  2,  N),
 };
 
 static const struct tegra_pinctrl_soc_data tegra114_pinctrl = {
index c20e0e1dda83cc1bb0e6567b4d9bd10e2609b271..73773706755b6f91ff9779c22debaaef624f2dac 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Pinctrl data for the NVIDIA Tegra124 pinmux
  *
- * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #define TEGRA_PIN_PFF2                         _GPIO(250)
 
 /* All non-GPIO pins follow */
-#define NUM_GPIOS      (TEGRA_PIN_PFF2 + 1)
-#define _PIN(offset)   (NUM_GPIOS + (offset))
+#define NUM_GPIOS                              (TEGRA_PIN_PFF2 + 1)
+#define _PIN(offset)                           (NUM_GPIOS + (offset))
 
 /* Non-GPIO pins */
 #define TEGRA_PIN_CORE_PWR_REQ                 _PIN(0)
@@ -325,13 +325,13 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
        PINCTRL_PIN(TEGRA_PIN_KB_ROW8_PS0, "KB_ROW8 PS0"),
        PINCTRL_PIN(TEGRA_PIN_KB_ROW9_PS1, "KB_ROW9 PS1"),
        PINCTRL_PIN(TEGRA_PIN_KB_ROW10_PS2, "KB_ROW10 PS2"),
-       PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW10 PS3"),
-       PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW10 PS4"),
-       PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW10 PS5"),
-       PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW10 PS6"),
-       PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW10 PS7"),
-       PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW10 PT0"),
-       PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW10 PT1"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW11_PS3, "KB_ROW11 PS3"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW12_PS4, "KB_ROW12 PS4"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW13_PS5, "KB_ROW13 PS5"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW14_PS6, "KB_ROW14 PS6"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW15_PS7, "KB_ROW15 PS7"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW16_PT0, "KB_ROW16 PT0"),
+       PINCTRL_PIN(TEGRA_PIN_KB_ROW17_PT1, "KB_ROW17 PT1"),
        PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SCL_PT5, "GEN2_I2C_SCL PT5"),
        PINCTRL_PIN(TEGRA_PIN_GEN2_I2C_SDA_PT6, "GEN2_I2C_SDA PT6"),
        PINCTRL_PIN(TEGRA_PIN_SDMMC4_CMD_PT7, "SDMMC4_CMD PT7"),
@@ -406,16 +406,16 @@ static const struct pinctrl_pin_desc tegra124_pins[] = {
        PINCTRL_PIN(TEGRA_PIN_HDMI_CEC_PEE3, "HDMI_CEC PEE3"),
        PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_OUT_PEE4, "SDMMC3_CLK_LB_OUT PEE4"),
        PINCTRL_PIN(TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5, "SDMMC3_CLK_LB_IN PEE5"),
+       PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
+       PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
+       PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
        PINCTRL_PIN(TEGRA_PIN_CORE_PWR_REQ, "CORE_PWR_REQ"),
        PINCTRL_PIN(TEGRA_PIN_CPU_PWR_REQ, "CPU_PWR_REQ"),
-       PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
        PINCTRL_PIN(TEGRA_PIN_PWR_INT_N, "PWR_INT_N"),
+       PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
        PINCTRL_PIN(TEGRA_PIN_RESET_OUT_N, "RESET_OUT_N"),
-       PINCTRL_PIN(TEGRA_PIN_DP_HPD_PFF0, "DP_HPD PFF0"),
-       PINCTRL_PIN(TEGRA_PIN_USB_VBUS_EN2_PFF1, "USB_VBUS_EN2 PFF1"),
-       PINCTRL_PIN(TEGRA_PIN_PFF2, "PFF2"),
+       PINCTRL_PIN(TEGRA_PIN_OWR, "OWR"),
        PINCTRL_PIN(TEGRA_PIN_CLK_32K_IN, "CLK_32K_IN"),
-       PINCTRL_PIN(TEGRA_PIN_GMI_CLK_LB, "GMI_CLK_LB"),
        PINCTRL_PIN(TEGRA_PIN_JTAG_RTCK, "JTAG_RTCK"),
 };
 
@@ -1138,6 +1138,7 @@ static const unsigned sdmmc3_clk_lb_out_pee4_pins[] = {
 static const unsigned sdmmc3_clk_lb_in_pee5_pins[] = {
        TEGRA_PIN_SDMMC3_CLK_LB_IN_PEE5,
 };
+
 static const unsigned dp_hpd_pff0_pins[] = {
        TEGRA_PIN_DP_HPD_PFF0,
 };
@@ -1158,24 +1159,24 @@ static const unsigned cpu_pwr_req_pins[] = {
        TEGRA_PIN_CPU_PWR_REQ,
 };
 
-static const unsigned owr_pins[] = {
-       TEGRA_PIN_OWR,
-};
-
 static const unsigned pwr_int_n_pins[] = {
        TEGRA_PIN_PWR_INT_N,
 };
 
+static const unsigned gmi_clk_lb_pins[] = {
+       TEGRA_PIN_GMI_CLK_LB,
+};
+
 static const unsigned reset_out_n_pins[] = {
        TEGRA_PIN_RESET_OUT_N,
 };
 
-static const unsigned clk_32k_in_pins[] = {
-       TEGRA_PIN_CLK_32K_IN,
+static const unsigned owr_pins[] = {
+       TEGRA_PIN_OWR,
 };
 
-static const unsigned gmi_clk_lb_pins[] = {
-       TEGRA_PIN_GMI_CLK_LB,
+static const unsigned clk_32k_in_pins[] = {
+       TEGRA_PIN_CLK_32K_IN,
 };
 
 static const unsigned jtag_rtck_pins[] = {
@@ -1441,15 +1442,15 @@ static const unsigned drive_gpv_pins[] = {
        TEGRA_PIN_PFF2,
 };
 
-static const unsigned drive_cec_pins[] = {
-       TEGRA_PIN_HDMI_CEC_PEE3,
-};
-
 static const unsigned drive_dev3_pins[] = {
        TEGRA_PIN_CLK3_OUT_PEE0,
        TEGRA_PIN_CLK3_REQ_PEE1,
 };
 
+static const unsigned drive_cec_pins[] = {
+       TEGRA_PIN_HDMI_CEC_PEE3,
+};
+
 static const unsigned drive_at6_pins[] = {
        TEGRA_PIN_PK1,
        TEGRA_PIN_PK3,
@@ -1496,8 +1497,10 @@ static const unsigned drive_ao4_pins[] = {
 
 enum tegra_mux {
        TEGRA_MUX_BLINK,
+       TEGRA_MUX_CCLA,
        TEGRA_MUX_CEC,
        TEGRA_MUX_CLDVFS,
+       TEGRA_MUX_CLK,
        TEGRA_MUX_CLK12,
        TEGRA_MUX_CPU,
        TEGRA_MUX_DAP,
@@ -1507,6 +1510,7 @@ enum tegra_mux {
        TEGRA_MUX_DISPLAYA,
        TEGRA_MUX_DISPLAYA_ALT,
        TEGRA_MUX_DISPLAYB,
+       TEGRA_MUX_DP,
        TEGRA_MUX_DTV,
        TEGRA_MUX_EXTPERIPH1,
        TEGRA_MUX_EXTPERIPH2,
@@ -1528,6 +1532,9 @@ enum tegra_mux {
        TEGRA_MUX_IRDA,
        TEGRA_MUX_KBC,
        TEGRA_MUX_OWR,
+       TEGRA_MUX_PE,
+       TEGRA_MUX_PE0,
+       TEGRA_MUX_PE1,
        TEGRA_MUX_PMI,
        TEGRA_MUX_PWM0,
        TEGRA_MUX_PWM1,
@@ -1539,6 +1546,8 @@ enum tegra_mux {
        TEGRA_MUX_RSVD2,
        TEGRA_MUX_RSVD3,
        TEGRA_MUX_RSVD4,
+       TEGRA_MUX_RTCK,
+       TEGRA_MUX_SATA,
        TEGRA_MUX_SDMMC1,
        TEGRA_MUX_SDMMC2,
        TEGRA_MUX_SDMMC3,
@@ -1551,6 +1560,8 @@ enum tegra_mux {
        TEGRA_MUX_SPI4,
        TEGRA_MUX_SPI5,
        TEGRA_MUX_SPI6,
+       TEGRA_MUX_SYS,
+       TEGRA_MUX_TMDS,
        TEGRA_MUX_TRACE,
        TEGRA_MUX_UARTA,
        TEGRA_MUX_UARTB,
@@ -1569,1134 +1580,19 @@ enum tegra_mux {
        TEGRA_MUX_VI_ALT3,
        TEGRA_MUX_VIMCLK2,
        TEGRA_MUX_VIMCLK2_ALT,
-       TEGRA_MUX_SATA,
-       TEGRA_MUX_CCLA,
-       TEGRA_MUX_PE0,
-       TEGRA_MUX_PE,
-       TEGRA_MUX_PE1,
-       TEGRA_MUX_DP,
-       TEGRA_MUX_RTCK,
-       TEGRA_MUX_SYS,
-       TEGRA_MUX_CLK,
-       TEGRA_MUX_TMDS,
-};
-
-static const char * const blink_groups[] = {
-       "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
-       "hdmi_cec_pee3",
-};
-
-static const char * const cldvfs_groups[] = {
-       "ph2",
-       "ph3",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "dvfs_pwm_px0",
-       "dvfs_clk_px2",
-};
-
-static const char * const clk12_groups[] = {
-       "sdmmc1_wp_n_pv3",
-       "sdmmc1_clk_pz0",
-};
-
-static const char * const cpu_groups[] = {
-       "cpu_pwr_req",
-};
-
-static const char * const dap_groups[] = {
-       "dap_mclk1_pee2",
-       "clk2_req_pcc5",
-};
-
-static const char * const dap1_groups[] = {
-       "dap_mclk1_pee2",
-};
-
-static const char * const dap2_groups[] = {
-       "dap_mclk1_pw4",
-       "gpio_x4_aud_px4",
-};
-
-static const char * const dev3_groups[] = {
-       "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "ph1",
-       "pi4",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-       "kb_col3_pq3",
-       "sdmmc3_dat2_pb5",
-};
-
-static const char * const displaya_alt_groups[] = {
-       "kb_row6_pr6",
-};
-
-static const char * const displayb_groups[] = {
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_sclk_pp3",
-
-       "pu3",
-       "pu4",
-       "pu5",
-
-       "pbb3",
-       "pbb4",
-       "pbb6",
-
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-
-       "sdmmc3_dat3_pb4",
-};
-
-static const char * const dtv_groups[] = {
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-       "dap4_fs_pp4",
-       "dap4_dout_pp6",
-       "pi7",
-       "ph0",
-       "ph6",
-       "ph7",
-};
-
-static const char * const extperiph1_groups[] = {
-       "dap_mclk1_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
-       "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
-       "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
-       "uart2_cts_n_pj5",
-       "uart2_rts_n_pj6",
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-
-       "pu0",
-       "pu1",
-       "pu2",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pu6",
-
-       "dap4_fs_pp4",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_sclk_pp7",
-
-       "pc7",
-
-       "pg0",
-       "pg1",
-       "pg2",
-       "pg3",
-       "pg4",
-       "pg5",
-       "pg6",
-       "pg7",
-
-       "ph0",
-       "ph1",
-       "ph2",
-       "ph3",
-       "ph4",
-       "ph5",
-       "ph6",
-       "ph7",
-
-       "pi0",
-       "pi1",
-       "pi2",
-       "pi3",
-       "pi4",
-       "pi5",
-       "pi6",
-       "pi7",
-
-       "pj0",
-       "pj2",
-
-       "pk0",
-       "pk1",
-       "pk2",
-       "pk3",
-       "pk4",
-
-       "pj7",
-       "pb0",
-       "pb1",
-       "pk7",
-
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "gmi_clk_lb",
-
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_sclk_pn3",
-
-       "dap2_fs_pa2",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dap2_sclk_pa3",
-
-       "dvfs_pwm_px0",
-       "dvfs_clk_px2",
-       "gpio_x1_aud_px1",
-       "gpio_x3_aud_px3",
-       "gpio_x4_aud_px4",
-       "gpio_x5_aud_px5",
-       "gpio_x6_aud_px6",
-};
-
-static const char * const gmi_alt_groups[] = {
-       "pc7",
-       "pk4",
-       "pj7",
-};
-
-static const char * const hda_groups[] = {
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_sclk_pn3",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-};
-
-static const char * const hsi_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "gpio_w2_aud_pw2",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const i2c2_groups[] = {
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-};
-
-static const char * const i2c4_groups[] = {
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-};
-
-static const char * const i2s2_groups[] = {
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
-       "dap4_fs_pp4",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
-       "pcc1",
-       "pbb6",
-       "pbb7",
-       "pcc2",
-};
-
-static const char * const irda_groups[] = {
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-};
-
-static const char * const kbc_groups[] = {
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-       "kb_row15_ps7",
-       "kb_row16_pt0",
-       "kb_row17_pt1",
-
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-};
-
-static const char * const owr_groups[] = {
-       "pu0",
-       "kb_col4_pq4",
-       "owr",
-       "sdmmc3_cd_n_pv2",
-};
-
-static const char * const pmi_groups[] = {
-       "pwr_int_n",
-};
-
-static const char * const pwm0_groups[] = {
-       "sdmmc1_dat2_py5",
-       "uart3_rts_n_pc0",
-       "pu3",
-       "ph0",
-       "sdmmc3_dat3_pb4",
-};
-
-static const char * const pwm1_groups[] = {
-       "sdmmc1_dat1_py6",
-       "pu4",
-       "ph1",
-       "sdmmc3_dat2_pb5",
-};
-
-static const char * const pwm2_groups[] = {
-       "pu5",
-       "ph2",
-       "kb_col3_pq3",
-       "sdmmc3_dat1_pb6",
-};
-
-static const char * const pwm3_groups[] = {
-       "pu6",
-       "ph3",
-       "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwron_groups[] = {
-       "core_pwr_req",
-};
-
-static const char * const reset_out_n_groups[] = {
-       "reset_out_n",
-};
-
-static const char * const rsvd1_groups[] = {
-       "pv0",
-       "pv1",
-
-       "hdmi_int_pn7",
-       "pu1",
-       "pu2",
-       "pc7",
-       "pi7",
-       "pk0",
-       "pj0",
-       "pj2",
-       "pk2",
-       "pi3",
-       "pi6",
-
-       "pg0",
-       "pg1",
-       "pg2",
-       "pg3",
-       "pg4",
-       "pg5",
-       "pg6",
-       "pg7",
-
-       "pi0",
-       "pi1",
-
-       "gpio_x7_aud_px7",
-
-       "reset_out_n",
-};
-
-static const char * const rsvd2_groups[] = {
-       "pv0",
-       "pv1",
-
-       "sdmmc1_dat0_py7",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "hdmi_int_pn7",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-
-       "clk2_out_pee0",
-       "clk2_req_pee1",
-       "pc7",
-       "pi5",
-       "pj0",
-       "pj2",
-
-       "pk4",
-       "pk2",
-       "pi3",
-       "pi6",
-       "pg0",
-       "pg1",
-       "pg5",
-       "pg6",
-       "pg7",
-
-       "ph4",
-       "ph5",
-       "pj7",
-       "pb0",
-       "pb1",
-       "pk7",
-       "pi0",
-       "pi1",
-
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat7_paa7",
-       "pcc1",
-       "pbb6",
-       "pbb7",
-       "pcc2",
-       "jtag_rtck",
-
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "pwr_int_n",
-       "clk_32k_in",
-       "owr",
-
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-       "gpio_x1_aud_px1",
-
-       "sdmmc3_clk_pa6",
-       "sdmmc3_dat0_pb7",
-
-       "pex_l0_rst_n_pdd1",
-       "pex_l0_clkreq_n_pdd2",
-       "pex_wake_n_pdd3",
-       "pex_l1_rst_n_pdd5",
-       "pex_l1_clkreq_n_pdd6",
-       "hdmi_cec_pee3",
-
-       "gpio_w2_aud_pw2",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-       "sdmmc3_clk_lb_out_pee4",
-       "sdmmc3_clk_lb_in_pee5",
-       "gmi_clk_lb",
-       "reset_out_n",
-       "kb_row16_pt0",
-       "kb_row17_pt1",
-       "dp_hpd_pff0",
-       "usb_vbus_en2_pff1",
-       "pff2",
-};
-
-static const char * const rsvd3_groups[] = {
-       "dap3_sclk_pp3",
-       "pv0",
-       "pv1",
-       "sdmmc1_clk_pz0",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "hdmi_int_pn7",
-
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-
-       "pu6",
-
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-
-       "dap4_din_pp5",
-       "dap4_sclk_pp7",
-
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-
-       "sdmmc4_dat5_paa5",
-       "gpio_pcc1",
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-       "pbb5",
-       "pbb7",
-       "jtag_rtck",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-       "kb_row15_ps7",
-
-       "clk_32k_out_pa0",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "pwr_int_n",
-       "clk_32k_in",
-       "owr",
-
-       "dap_mclk1_pw4",
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-       "sdmmc3_clk_pa6",
-       "sdmmc3_dat0_pb7",
-
-       "pex_l0_rst_n_pdd1",
-       "pex_l0_clkreq_n_pdd2",
-       "pex_wake_n_pdd3",
-       "pex_l1_rst_n_pdd5",
-       "pex_l1_clkreq_n_pdd6",
-       "hdmi_cec_pee3",
-
-       "sdmmc3_cd_n_pv2",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-       "sdmmc3_clk_lb_out_pee4",
-       "sdmmc3_clk_lb_in_pee5",
-       "reset_out_n",
-       "kb_row16_pt0",
-       "kb_row17_pt1",
-       "dp_hpd_pff0",
-       "usb_vbus_en2_pff1",
-       "pff2",
-};
-
-static const char * const rsvd4_groups[] = {
-       "dap3_dout_pp2",
-       "pv0",
-       "pv1",
-       "sdmmc1_clk_pz0",
-
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "hdmi_int_pn7",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-
-       "pu0",
-       "pu1",
-       "pu2",
-
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-
-       "dap4_fs_pp4",
-       "dap4_dout_pp6",
-       "dap4_din_pp5",
-       "dap4_sclk_pp7",
-
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-
-       "pi5",
-       "pk1",
-       "pk2",
-       "pg0",
-       "pg1",
-       "pg2",
-       "pg3",
-       "ph4",
-       "ph5",
-       "pb0",
-       "pb1",
-       "pk7",
-       "pi0",
-       "pi1",
-       "pi2",
-
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-
-       "jtag_rtck",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-       "kb_row15_ps7",
-
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col5_pq5",
-
-       "clk_32k_out_pa0",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "pwr_int_n",
-       "clk_32k_in",
-       "owr",
-
-       "dap1_fs_pn0",
-       "dap1_din_pn1",
-       "dap1_sclk_pn3",
-       "dap_mclk1_req_pee2",
-       "dap_mclk1_pw5",
-
-       "dap2_fs_pa2",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dap2_sclk_pa3",
-
-       "dvfs_pwm_px0",
-       "dvfs_clk_px2",
-       "gpio_x1_aud_px1",
-       "gpio_x3_aud_px3",
-
-       "gpio_x5_aud_px5",
-       "gpio_x7_aud_px7",
-
-       "pex_l0_rst_n_pdd1",
-       "pex_l0_clkreq_n_pdd2",
-       "pex_wake_n_pdd3",
-       "pex_l1_rst_n_pdd5",
-       "pex_l1_clkreq_n_pdd6",
-       "hdmi_cec_pee3",
-
-       "sdmmc3_cd_n_pv2",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-       "sdmmc3_clk_lb_out_pee4",
-       "sdmmc3_clk_lb_in_pee5",
-       "gmi_clk_lb",
-
-       "dp_hpd_pff0",
-       "usb_vbus_en2_pff1",
-       "pff2",
-};
-
-static const char * const sdmmc1_groups[] = {
-       "sdmmc1_clk_pz0",
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat0_py7",
-       "clk2_out_pw5",
-       "clk2_req_pcc",
-       "uart3_cts_n_pa1",
-       "sdmmc1_wp_n_pv3",
-};
-
-static const char * const sdmmc2_groups[] = {
-       "pi5",
-       "pk1",
-       "pk3",
-       "pk4",
-       "pi6",
-       "ph4",
-       "ph5",
-       "ph6",
-       "ph7",
-       "pi2",
-       "cam_mclk_pcc0",
-       "pcc1",
-       "pbb0",
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "pbb6",
-       "pbb7",
-       "pcc2",
-       "gmi_clk_lb",
-};
-
-static const char * const sdmmc3_groups[] = {
-       "pk0",
-       "pcc2",
-
-       "kb_col4_pq4",
-       "kb_col5_pq5",
-
-       "sdmmc3_clk_pa6",
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat3_pb4",
-
-       "sdmmc3_cd_n_pv2",
-       "sdmmc3_clk_lb_in_pee5",
-       "sdmmc3_clk_lb_out_pee4",
-};
-
-static const char * const sdmmc4_groups[] = {
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-};
-
-static const char * const soc_groups[] = {
-       "pk0",
-       "pj2",
-       "kb_row15_ps7",
-       "clk_32k_out_pa0",
-};
-
-static const char * const spdif_groups[] = {
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-};
-
-static const char * const spi1_groups[] = {
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-       "gpio_x3_aud_px3",
-       "gpio_x4_aud_px4",
-       "gpio_x5_aud_px5",
-       "gpio_x6_aud_px6",
-       "gpio_x7_aud_px7",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const spi2_groups[] = {
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-       "kb_row15_ps7",
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "gpio_x4_aud_px4",
-       "gpio_x5_aud_px5",
-       "gpio_x6_aud_px6",
-       "gpio_x7_aud_px7",
-       "gpio_w2_aud_pw2",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const spi3_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc3_clk_pa6",
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat3_pb4",
-};
-
-static const char * const spi4_groups[] = {
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat0_py7",
-
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-
-       "pi3",
-       "pg4",
-       "pg5",
-       "pg6",
-       "pg7",
-       "ph3",
-       "pi4",
-       "sdmmc1_wp_n_pv3",
-};
-
-static const char * const spi5_groups[] = {
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-       "dap3_fs_pp0",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_sclk_pp3",
-};
-
-static const char * const spi6_groups[] = {
-       "dvfs_pwm_px0",
-       "gpio_x1_aud_px1",
-       "gpio_x3_aud_px3",
-       "dvfs_clk_px2",
-       "gpio_x6_aud_px6",
-       "gpio_w2_aud_pw2",
-       "gpio_w3_aud_pw3",
-};
-
-static const char * const trace_groups[] = {
-       "pi2",
-       "pi4",
-       "pi7",
-       "ph0",
-       "ph6",
-       "ph7",
-       "pg2",
-       "pg3",
-       "pk1",
-       "pk3",
-};
-
-static const char * const uarta_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat3_py4",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat0_py7",
-
-
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-
-       "pu0",
-       "pu1",
-       "pu2",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pu6",
-
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "kb_row10_ps2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc1_wp_n_pv3",
-
-};
-
-static const char * const uartb_groups[] = {
-       "uart2_rts_n_pj6",
-       "uart2_cts_n_pj5",
-};
-
-static const char * const uartc_groups[] = {
-       "uart3_txd_pw6",
-       "uart3_rxd_pw7",
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-       "kb_row16_pt0",
-       "kn_row17_pt1",
-};
-
-static const char * const uartd_groups[] = {
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-       "pj7",
-       "pb0",
-       "pb1",
-       "pk7",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-};
-
-static const char * const ulpi_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-};
-
-static const char * const usb_groups[] = {
-       "pj0",
-       "usb_vbus_en0_pn4",
-       "usb_vbus_en1_pn5",
-       "usb_vbus_en2_pff1",
-};
-
-static const char * const vgp1_groups[] = {
-       "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
-       "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
-       "pbb3",
-};
-
-static const char * const vgp4_groups[] = {
-       "pbb4",
-};
-
-static const char * const vgp5_groups[] = {
-       "pbb5",
-};
-
-static const char * const vgp6_groups[] = {
-       "pbb0",
-};
-
-static const char * const vi_groups[] = {
-       "cam_mclk_pcc0",
-};
-
-static const char * const vi_alt1_groups[] = {
-       "cam_mclk_pcc0",
-};
-
-static const char * const vi_alt3_groups[] = {
-       "cam_mclk_pcc0",
-};
-
-static const char * const vimclk2_groups[] = {
-       "pbb0",
-};
-
-static const char * const vimclk2_alt_groups[] = {
-       "pbb0",
-};
-
-static const char * const sata_groups[] = {
-       "dap_mclk1_req_pee2",
-       "dap1_dout_pn2",
-       "pff2",
-};
-
-static const char * const ccla_groups[] = {
-       "pk3",
-};
-
-static const char * const rtck_groups[] = {
-       "jtag_rtck",
-};
-
-static const char * const sys_groups[] = {
-       "kb_row3_pr3",
-};
-
-static const char * const pe0_groups[] = {
-       "pex_l0_rst_n_pdd1",
-       "pex_l0_clkreq_n_pdd2",
-};
-
-static const char * const pe_groups[] = {
-       "pex_wake_n_pdd3",
-};
-
-static const char * const pe1_groups[] = {
-       "pex_l1_rst_n_pdd5",
-       "pex_l1_clkreq_n_pdd6",
-};
-
-static const char * const dp_groups[] = {
-       "dp_hpd_pff0",
-};
-
-static const char * const clk_groups[] = {
-       "clk_32k_in",
-};
-
-static const char * const tmds_groups[] = {
-       "pg4",
-       "ph1",
-       "ph2",
 };
 
 #define FUNCTION(fname)                                        \
        {                                               \
                .name = #fname,                         \
-               .groups = fname##_groups,               \
-               .ngroups = ARRAY_SIZE(fname##_groups),  \
        }
 
-static const struct tegra_function tegra124_functions[] = {
+static struct tegra_function tegra124_functions[] = {
        FUNCTION(blink),
+       FUNCTION(ccla),
        FUNCTION(cec),
        FUNCTION(cldvfs),
+       FUNCTION(clk),
        FUNCTION(clk12),
        FUNCTION(cpu),
        FUNCTION(dap),
@@ -2706,6 +1602,7 @@ static const struct tegra_function tegra124_functions[] = {
        FUNCTION(displaya),
        FUNCTION(displaya_alt),
        FUNCTION(displayb),
+       FUNCTION(dp),
        FUNCTION(dtv),
        FUNCTION(extperiph1),
        FUNCTION(extperiph2),
@@ -2727,6 +1624,9 @@ static const struct tegra_function tegra124_functions[] = {
        FUNCTION(irda),
        FUNCTION(kbc),
        FUNCTION(owr),
+       FUNCTION(pe),
+       FUNCTION(pe0),
+       FUNCTION(pe1),
        FUNCTION(pmi),
        FUNCTION(pwm0),
        FUNCTION(pwm1),
@@ -2738,6 +1638,8 @@ static const struct tegra_function tegra124_functions[] = {
        FUNCTION(rsvd2),
        FUNCTION(rsvd3),
        FUNCTION(rsvd4),
+       FUNCTION(rtck),
+       FUNCTION(sata),
        FUNCTION(sdmmc1),
        FUNCTION(sdmmc2),
        FUNCTION(sdmmc3),
@@ -2750,6 +1652,8 @@ static const struct tegra_function tegra124_functions[] = {
        FUNCTION(spi4),
        FUNCTION(spi5),
        FUNCTION(spi6),
+       FUNCTION(sys),
+       FUNCTION(tmds),
        FUNCTION(trace),
        FUNCTION(uarta),
        FUNCTION(uartb),
@@ -2768,23 +1672,13 @@ static const struct tegra_function tegra124_functions[] = {
        FUNCTION(vi_alt3),
        FUNCTION(vimclk2),
        FUNCTION(vimclk2_alt),
-       FUNCTION(sata),
-       FUNCTION(ccla),
-       FUNCTION(pe0),
-       FUNCTION(pe),
-       FUNCTION(pe1),
-       FUNCTION(dp),
-       FUNCTION(rtck),
-       FUNCTION(sys),
-       FUNCTION(clk),
-       FUNCTION(tmds),
 };
 
-#define DRV_PINGROUP_REG_A     0x868   /* bank 0 */
-#define PINGROUP_REG_A         0x3000  /* bank 1 */
+#define DRV_PINGROUP_REG_A             0x868   /* bank 0 */
+#define PINGROUP_REG_A                 0x3000  /* bank 1 */
 
-#define PINGROUP_REG_Y(r)      ((r) - PINGROUP_REG_A)
-#define PINGROUP_REG_N(r)      -1
+#define PINGROUP_REG_Y(r)              ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r)              -1
 
 #define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior, rcv_sel) \
        {                                                               \
@@ -2792,12 +1686,12 @@ static const struct tegra_function tegra124_functions[] = {
                .pins = pg_name##_pins,                                 \
                .npins = ARRAY_SIZE(pg_name##_pins),                    \
                .funcs = {                                              \
-                       TEGRA_MUX_ ## f0,                               \
-                       TEGRA_MUX_ ## f1,                               \
-                       TEGRA_MUX_ ## f2,                               \
-                       TEGRA_MUX_ ## f3,                               \
+                       TEGRA_MUX_##f0,                                 \
+                       TEGRA_MUX_##f1,                                 \
+                       TEGRA_MUX_##f2,                                 \
+                       TEGRA_MUX_##f3,                                 \
                },                                                      \
-               .func_safe = TEGRA_MUX_ ## f_safe,                      \
+               .func_safe = TEGRA_MUX_##f_safe,                        \
                .mux_reg = PINGROUP_REG_Y(r),                           \
                .mux_bank = 1,                                          \
                .mux_bit = 0,                                           \
@@ -2826,8 +1720,9 @@ static const struct tegra_function tegra124_functions[] = {
                .drvtype_reg = -1,                                      \
        }
 
-#define DRV_PINGROUP_DVRTYPE_Y(r) ((r) - DRV_PINGROUP_REG_A)
-#define DRV_PINGROUP_DVRTYPE_N(r) -1
+#define DRV_PINGROUP_REG_Y(r)          ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r)          -1
+
 
 #define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b,             \
                     drvdn_b, drvdn_w, drvup_b, drvup_w,                \
@@ -2845,7 +1740,7 @@ static const struct tegra_function tegra124_functions[] = {
                .lock_reg = -1,                                         \
                .ioreset_reg = -1,                                      \
                .rcv_sel_reg = -1,                                      \
-               .drv_reg = DRV_PINGROUP_DVRTYPE_Y(r),                   \
+               .drv_reg = DRV_PINGROUP_REG_Y(r),                       \
                .drv_bank = 0,                                          \
                .hsm_bit = hsm_b,                                       \
                .schmitt_bit = schmitt_b,                               \
@@ -2858,7 +1753,7 @@ static const struct tegra_function tegra124_functions[] = {
                .slwr_width = slwr_w,                                   \
                .slwf_bit = slwf_b,                                     \
                .slwf_width = slwf_w,                                   \
-               .drvtype_reg = DRV_PINGROUP_DVRTYPE_##drvtype(r),       \
+               .drvtype_reg = DRV_PINGROUP_REG_##drvtype(r),           \
                .drvtype_bank = 0,                                      \
                .drvtype_bit = 6,                                       \
        }
@@ -2909,8 +1804,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
        PINGROUP(pu4,                    PWM1,       UARTA,      GMI,          DISPLAYB,    PWM1,       0x3194,  N,  N,  N),
        PINGROUP(pu5,                    PWM2,       UARTA,      GMI,          DISPLAYB,    PWM2,       0x3198,  N,  N,  N),
        PINGROUP(pu6,                    PWM3,       UARTA,      RSVD3,        GMI,         RSVD3,      0x319c,  N,  N,  N),
-       PINGROUP(gen1_i2c_scl_pc4,       I2C1,       RSVD2,      RSVD3,        RSVD4,       I2C1,       0x31a0,  Y,  N,  N),
-       PINGROUP(gen1_i2c_sda_pc5,       I2C1,       RSVD2,      RSVD3,        RSVD4,       I2C1,       0x31a4,  Y,  N,  N),
+       PINGROUP(gen1_i2c_sda_pc5,       I2C1,       RSVD2,      RSVD3,        RSVD4,       I2C1,       0x31a0,  Y,  N,  N),
+       PINGROUP(gen1_i2c_scl_pc4,       I2C1,       RSVD2,      RSVD3,        RSVD4,       I2C1,       0x31a4,  Y,  N,  N),
        PINGROUP(dap4_fs_pp4,            I2S3,       GMI,        DTV,          RSVD4,       I2S3,       0x31a8,  N,  N,  N),
        PINGROUP(dap4_din_pp5,           I2S3,       GMI,        RSVD3,        RSVD4,       I2S3,       0x31ac,  N,  N,  N),
        PINGROUP(dap4_dout_pp6,          I2S3,       GMI,        DTV,          RSVD4,       I2S3,       0x31b0,  N,  N,  N),
@@ -2964,9 +1859,9 @@ static const struct tegra_pingroup tegra124_groups[] = {
        PINGROUP(sdmmc4_dat4_paa4,       SDMMC4,     SPI3,       GMI,          RSVD4,       SDMMC4,     0x3270,  N,  Y,  N),
        PINGROUP(sdmmc4_dat5_paa5,       SDMMC4,     SPI3,       RSVD3,        RSVD4,       SDMMC4,     0x3274,  N,  Y,  N),
        PINGROUP(sdmmc4_dat6_paa6,       SDMMC4,     SPI3,       GMI,          RSVD4,       SDMMC4,     0x3278,  N,  Y,  N),
-       PINGROUP(sdmmc4_dat7_paa7,       SDMMC4,     RSVD1,      GMI,          RSVD4,       SDMMC4,     0x327c,  N,  Y,  N),
+       PINGROUP(sdmmc4_dat7_paa7,       SDMMC4,     RSVD2,      GMI,          RSVD4,       SDMMC4,     0x327c,  N,  Y,  N),
        PINGROUP(cam_mclk_pcc0,          VI,         VI_ALT1,    VI_ALT3,      SDMMC2,      VI,         0x3284,  N,  N,  N),
-       PINGROUP(pcc1,                   I2S4,       RSVD1,      RSVD3,        SDMMC2,      I2S4,       0x3288,  N,  N,  N),
+       PINGROUP(pcc1,                   I2S4,       RSVD2,      RSVD3,        SDMMC2,      I2S4,       0x3288,  N,  N,  N),
        PINGROUP(pbb0,                   VGP6,       VIMCLK2,    SDMMC2,       VIMCLK2_ALT, VGP6,       0x328c,  N,  N,  N),
        PINGROUP(cam_i2c_scl_pbb1,       VGP1,       I2C3,       RSVD3,        SDMMC2,      VGP1,       0x3290,  Y,  N,  N),
        PINGROUP(cam_i2c_sda_pbb2,       VGP2,       I2C3,       RSVD3,        SDMMC2,      VGP2,       0x3294,  Y,  N,  N),
@@ -3047,8 +1942,8 @@ static const struct tegra_pingroup tegra124_groups[] = {
        PINGROUP(gpio_w3_aud_pw3,        SPI6,       SPI1,       SPI2,         I2C1,        SPI1,       0x33f0,  N,  N,  N),
        PINGROUP(usb_vbus_en0_pn4,       USB,        RSVD2,      RSVD3,        RSVD4,       USB,        0x33f4,  Y,  N,  N),
        PINGROUP(usb_vbus_en1_pn5,       USB,        RSVD2,      RSVD3,        RSVD4,       USB,        0x33f8,  Y,  N,  N),
-       PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3,     RSVD2,      RSVD3,        RSVD4,       SDMMC3,     0x33fc,  N,  N,  N),
-       PINGROUP(sdmmc3_clk_lb_in_pee5,  SDMMC3,     RSVD2,      RSVD3,        RSVD4,       SDMMC3,     0x3400,  N,  N,  N),
+       PINGROUP(sdmmc3_clk_lb_in_pee5,  SDMMC3,     RSVD2,      RSVD3,        RSVD4,       SDMMC3,     0x33fc,  N,  N,  N),
+       PINGROUP(sdmmc3_clk_lb_out_pee4, SDMMC3,     RSVD2,      RSVD3,        RSVD4,       SDMMC3,     0x3400,  N,  N,  N),
        PINGROUP(gmi_clk_lb,             SDMMC2,     RSVD2,      GMI,          RSVD4,       SDMMC2,     0x3404,  N,  N,  N),
        PINGROUP(reset_out_n,            RSVD1,      RSVD2,      RSVD3,        RESET_OUT_N, RSVD1,      0x3408,  N,  N,  N),
        PINGROUP(kb_row16_pt0,           KBC,        RSVD2,      RSVD3,        UARTC,       KBC,        0x340c,  N,  N,  N),
index fcfb7d012c5b68bda551917f89c657956f2af6dd..e0b5040883873ed06738f5d21858a3cc22a72f77 100644 (file)
@@ -1894,637 +1894,12 @@ enum tegra_mux {
        TEGRA_MUX_XIO,
 };
 
-static const char * const ahb_clk_groups[] = {
-       "cdev2",
-};
-
-static const char * const apb_clk_groups[] = {
-       "cdev2",
-};
-
-static const char * const audio_sync_groups[] = {
-       "cdev1",
-};
-
-static const char * const crt_groups[] = {
-       "crtp",
-       "lm1",
-};
-
-static const char * const dap1_groups[] = {
-       "dap1",
-};
-
-static const char * const dap2_groups[] = {
-       "dap2",
-};
-
-static const char * const dap3_groups[] = {
-       "dap3",
-};
-
-static const char * const dap4_groups[] = {
-       "dap4",
-};
-
-static const char * const dap5_groups[] = {
-       "gme",
-};
-
-static const char * const displaya_groups[] = {
-       "lcsn",
-       "ld0",
-       "ld1",
-       "ld10",
-       "ld11",
-       "ld12",
-       "ld13",
-       "ld14",
-       "ld15",
-       "ld16",
-       "ld17",
-       "ld2",
-       "ld3",
-       "ld4",
-       "ld5",
-       "ld6",
-       "ld7",
-       "ld8",
-       "ld9",
-       "ldc",
-       "ldi",
-       "lhp0",
-       "lhp1",
-       "lhp2",
-       "lhs",
-       "lm0",
-       "lm1",
-       "lpp",
-       "lpw0",
-       "lpw1",
-       "lpw2",
-       "lsc0",
-       "lsc1",
-       "lsck",
-       "lsda",
-       "lsdi",
-       "lspi",
-       "lvp0",
-       "lvp1",
-       "lvs",
-};
-
-static const char * const displayb_groups[] = {
-       "lcsn",
-       "ld0",
-       "ld1",
-       "ld10",
-       "ld11",
-       "ld12",
-       "ld13",
-       "ld14",
-       "ld15",
-       "ld16",
-       "ld17",
-       "ld2",
-       "ld3",
-       "ld4",
-       "ld5",
-       "ld6",
-       "ld7",
-       "ld8",
-       "ld9",
-       "ldc",
-       "ldi",
-       "lhp0",
-       "lhp1",
-       "lhp2",
-       "lhs",
-       "lm0",
-       "lm1",
-       "lpp",
-       "lpw0",
-       "lpw1",
-       "lpw2",
-       "lsc0",
-       "lsc1",
-       "lsck",
-       "lsda",
-       "lsdi",
-       "lspi",
-       "lvp0",
-       "lvp1",
-       "lvs",
-};
-
-static const char * const emc_test0_dll_groups[] = {
-       "kbca",
-};
-
-static const char * const emc_test1_dll_groups[] = {
-       "kbcc",
-};
-
-static const char * const gmi_groups[] = {
-       "ata",
-       "atb",
-       "atc",
-       "atd",
-       "ate",
-       "dap1",
-       "dap2",
-       "dap4",
-       "gma",
-       "gmb",
-       "gmc",
-       "gmd",
-       "gme",
-       "gpu",
-       "irrx",
-       "irtx",
-       "pta",
-       "spia",
-       "spib",
-       "spic",
-       "spid",
-       "spie",
-       "uca",
-       "ucb",
-};
-
-static const char * const gmi_int_groups[] = {
-       "gmb",
-};
-
-static const char * const hdmi_groups[] = {
-       "hdint",
-       "lpw0",
-       "lpw2",
-       "lsc1",
-       "lsck",
-       "lsda",
-       "lspi",
-       "pta",
-};
-
-static const char * const i2cp_groups[] = {
-       "i2cp",
-};
-
-static const char * const i2c1_groups[] = {
-       "rm",
-       "spdi",
-       "spdo",
-       "spig",
-       "spih",
-};
-
-static const char * const i2c2_groups[] = {
-       "ddc",
-       "pta",
-};
-
-static const char * const i2c3_groups[] = {
-       "dtf",
-};
-
-static const char * const ide_groups[] = {
-       "ata",
-       "atb",
-       "atc",
-       "atd",
-       "ate",
-       "gmb",
-};
-
-static const char * const irda_groups[] = {
-       "uad",
-};
-
-static const char * const kbc_groups[] = {
-       "kbca",
-       "kbcb",
-       "kbcc",
-       "kbcd",
-       "kbce",
-       "kbcf",
-};
-
-static const char * const mio_groups[] = {
-       "kbcb",
-       "kbcd",
-       "kbcf",
-};
-
-static const char * const mipi_hs_groups[] = {
-       "uaa",
-       "uab",
-};
-
-static const char * const nand_groups[] = {
-       "ata",
-       "atb",
-       "atc",
-       "atd",
-       "ate",
-       "gmb",
-       "gmd",
-       "kbca",
-       "kbcb",
-       "kbcc",
-       "kbcd",
-       "kbce",
-       "kbcf",
-};
-
-static const char * const osc_groups[] = {
-       "cdev1",
-       "cdev2",
-};
-
-static const char * const owr_groups[] = {
-       "kbce",
-       "owc",
-       "uac",
-};
-
-static const char * const pcie_groups[] = {
-       "gpv",
-       "slxa",
-       "slxk",
-};
-
-static const char * const plla_out_groups[] = {
-       "cdev1",
-};
-
-static const char * const pllc_out1_groups[] = {
-       "csus",
-};
-
-static const char * const pllm_out1_groups[] = {
-       "cdev1",
-};
-
-static const char * const pllp_out2_groups[] = {
-       "csus",
-};
-
-static const char * const pllp_out3_groups[] = {
-       "csus",
-};
-
-static const char * const pllp_out4_groups[] = {
-       "cdev2",
-};
-
-static const char * const pwm_groups[] = {
-       "gpu",
-       "sdb",
-       "sdc",
-       "sdd",
-       "ucb",
-};
-
-static const char * const pwr_intr_groups[] = {
-       "pmc",
-};
-
-static const char * const pwr_on_groups[] = {
-       "pmc",
-};
-
-static const char * const rsvd1_groups[] = {
-       "dta",
-       "dtb",
-       "dtc",
-       "dtd",
-       "dte",
-       "gmd",
-       "gme",
-};
-
-static const char * const rsvd2_groups[] = {
-       "crtp",
-       "dap1",
-       "dap3",
-       "dap4",
-       "ddc",
-       "dtb",
-       "dtc",
-       "dte",
-       "dtf",
-       "gpu7",
-       "gpv",
-       "hdint",
-       "i2cp",
-       "owc",
-       "rm",
-       "sdio1",
-       "spdi",
-       "spdo",
-       "uac",
-       "uca",
-       "uda",
-};
-
-static const char * const rsvd3_groups[] = {
-       "crtp",
-       "dap2",
-       "dap3",
-       "ddc",
-       "gpu7",
-       "gpv",
-       "hdint",
-       "i2cp",
-       "ld17",
-       "ldc",
-       "ldi",
-       "lhp0",
-       "lhp1",
-       "lhp2",
-       "lm1",
-       "lpp",
-       "lpw1",
-       "lvp0",
-       "lvp1",
-       "owc",
-       "pmc",
-       "rm",
-       "uac",
-};
-
-static const char * const rsvd4_groups[] = {
-       "ata",
-       "ate",
-       "crtp",
-       "dap3",
-       "dap4",
-       "ddc",
-       "dta",
-       "dtc",
-       "dtd",
-       "dtf",
-       "gpu",
-       "gpu7",
-       "gpv",
-       "hdint",
-       "i2cp",
-       "kbce",
-       "lcsn",
-       "ld0",
-       "ld1",
-       "ld2",
-       "ld3",
-       "ld4",
-       "ld5",
-       "ld6",
-       "ld7",
-       "ld8",
-       "ld9",
-       "ld10",
-       "ld11",
-       "ld12",
-       "ld13",
-       "ld14",
-       "ld15",
-       "ld16",
-       "ld17",
-       "ldc",
-       "ldi",
-       "lhp0",
-       "lhp1",
-       "lhp2",
-       "lhs",
-       "lm0",
-       "lpp",
-       "lpw1",
-       "lsc0",
-       "lsdi",
-       "lvp0",
-       "lvp1",
-       "lvs",
-       "owc",
-       "pmc",
-       "pta",
-       "rm",
-       "spif",
-       "uac",
-       "uca",
-       "ucb",
-};
-
-static const char * const rtck_groups[] = {
-       "gpu7",
-};
-
-static const char * const sdio1_groups[] = {
-       "sdio1",
-};
-
-static const char * const sdio2_groups[] = {
-       "dap1",
-       "dta",
-       "dtd",
-       "kbca",
-       "kbcb",
-       "kbcd",
-       "spdi",
-       "spdo",
-};
-
-static const char * const sdio3_groups[] = {
-       "sdb",
-       "sdc",
-       "sdd",
-       "slxa",
-       "slxc",
-       "slxd",
-       "slxk",
-};
-
-static const char * const sdio4_groups[] = {
-       "atb",
-       "atc",
-       "atd",
-       "gma",
-       "gme",
-};
-
-static const char * const sflash_groups[] = {
-       "gmc",
-       "gmd",
-};
-
-static const char * const spdif_groups[] = {
-       "slxc",
-       "slxd",
-       "spdi",
-       "spdo",
-       "uad",
-};
-
-static const char * const spi1_groups[] = {
-       "dtb",
-       "dte",
-       "spia",
-       "spib",
-       "spic",
-       "spid",
-       "spie",
-       "spif",
-       "uda",
-};
-
-static const char * const spi2_groups[] = {
-       "sdb",
-       "slxa",
-       "slxc",
-       "slxd",
-       "slxk",
-       "spia",
-       "spib",
-       "spic",
-       "spid",
-       "spie",
-       "spif",
-       "spig",
-       "spih",
-       "uab",
-};
-
-static const char * const spi2_alt_groups[] = {
-       "spid",
-       "spie",
-       "spig",
-       "spih",
-};
-
-static const char * const spi3_groups[] = {
-       "gma",
-       "lcsn",
-       "lm0",
-       "lpw0",
-       "lpw2",
-       "lsc1",
-       "lsck",
-       "lsda",
-       "lsdi",
-       "sdc",
-       "sdd",
-       "spia",
-       "spib",
-       "spic",
-       "spif",
-       "spig",
-       "spih",
-       "uaa",
-};
-
-static const char * const spi4_groups[] = {
-       "gmc",
-       "irrx",
-       "irtx",
-       "slxa",
-       "slxc",
-       "slxd",
-       "slxk",
-       "uad",
-};
-
-static const char * const trace_groups[] = {
-       "kbcc",
-       "kbcf",
-};
-
-static const char * const twc_groups[] = {
-       "dap2",
-       "sdc",
-};
-
-static const char * const uarta_groups[] = {
-       "gpu",
-       "irrx",
-       "irtx",
-       "sdb",
-       "sdd",
-       "sdio1",
-       "uaa",
-       "uab",
-       "uad",
-};
-
-static const char * const uartb_groups[] = {
-       "irrx",
-       "irtx",
-};
-
-static const char * const uartc_groups[] = {
-       "uca",
-       "ucb",
-};
-
-static const char * const uartd_groups[] = {
-       "gmc",
-       "uda",
-};
-
-static const char * const uarte_groups[] = {
-       "gma",
-       "sdio1",
-};
-
-static const char * const ulpi_groups[] = {
-       "uaa",
-       "uab",
-       "uda",
-};
-
-static const char * const vi_groups[] = {
-       "dta",
-       "dtb",
-       "dtc",
-       "dtd",
-       "dte",
-       "dtf",
-};
-
-static const char * const vi_sensor_clk_groups[] = {
-       "csus",
-};
-
-static const char * const xio_groups[] = {
-       "ld0",
-       "ld1",
-       "ld10",
-       "ld11",
-       "ld12",
-       "ld13",
-       "ld14",
-       "ld15",
-       "ld16",
-       "ld2",
-       "ld3",
-       "ld4",
-       "ld5",
-       "ld6",
-       "ld7",
-       "ld8",
-       "ld9",
-       "lhs",
-       "lsc0",
-       "lspi",
-       "lvs",
-};
-
 #define FUNCTION(fname)                                        \
        {                                               \
                .name = #fname,                         \
-               .groups = fname##_groups,               \
-               .ngroups = ARRAY_SIZE(fname##_groups),  \
        }
 
-static const struct tegra_function tegra20_functions[] = {
+static struct tegra_function tegra20_functions[] = {
        FUNCTION(ahb_clk),
        FUNCTION(apb_clk),
        FUNCTION(audio_sync),
@@ -2881,18 +2256,7 @@ static struct platform_driver tegra20_pinctrl_driver = {
        .probe = tegra20_pinctrl_probe,
        .remove = tegra_pinctrl_remove,
 };
-
-static int __init tegra20_pinctrl_init(void)
-{
-       return platform_driver_register(&tegra20_pinctrl_driver);
-}
-arch_initcall(tegra20_pinctrl_init);
-
-static void __exit tegra20_pinctrl_exit(void)
-{
-       platform_driver_unregister(&tegra20_pinctrl_driver);
-}
-module_exit(tegra20_pinctrl_exit);
+module_platform_driver(tegra20_pinctrl_driver);
 
 MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
 MODULE_DESCRIPTION("NVIDIA Tegra20 pinctrl driver");
index 2300deba25bd4b39157a5a8f1e8524f965488bd3..41d24f5c28540953bb18bbb47f44fdf50e910a08 100644 (file)
@@ -25,7 +25,7 @@
  * Most pins affected by the pinmux can also be GPIOs. Define these first.
  * These must match how the GPIO driver names/numbers its pins.
  */
-#define _GPIO(offset)                          (offset)
+#define _GPIO(offset)                  (offset)
 
 #define TEGRA_PIN_CLK_32K_OUT_PA0      _GPIO(0)
 #define TEGRA_PIN_UART3_CTS_N_PA1      _GPIO(1)
 #define TEGRA_PIN_PEE7                 _GPIO(247)
 
 /* All non-GPIO pins follow */
-#define NUM_GPIOS                              (TEGRA_PIN_PEE7 + 1)
-#define _PIN(offset)                           (NUM_GPIOS + (offset))
+#define NUM_GPIOS                      (TEGRA_PIN_PEE7 + 1)
+#define _PIN(offset)                   (NUM_GPIOS + (offset))
 
 /* Non-GPIO pins */
 #define TEGRA_PIN_CLK_32K_IN           _PIN(0)
@@ -2015,1253 +2015,13 @@ enum tegra_mux {
        TEGRA_MUX_VI_ALT2,
        TEGRA_MUX_VI_ALT3,
 };
-static const char * const blink_groups[] = {
-       "clk_32k_out_pa0",
-};
-
-static const char * const cec_groups[] = {
-       "hdmi_cec_pee3",
-       "owr",
-};
-
-static const char * const clk_12m_out_groups[] = {
-       "pv3",
-};
-
-static const char * const clk_32k_in_groups[] = {
-       "clk_32k_in",
-};
-
-static const char * const core_pwr_req_groups[] = {
-       "core_pwr_req",
-};
-
-static const char * const cpu_pwr_req_groups[] = {
-       "cpu_pwr_req",
-};
-
-static const char * const crt_groups[] = {
-       "crt_hsync_pv6",
-       "crt_vsync_pv7",
-};
-
-static const char * const dap_groups[] = {
-       "clk1_req_pee2",
-       "clk2_req_pcc5",
-};
-
-static const char * const ddr_groups[] = {
-       "vi_d0_pt4",
-       "vi_d1_pd5",
-       "vi_d10_pt2",
-       "vi_d11_pt3",
-       "vi_d2_pl0",
-       "vi_d3_pl1",
-       "vi_d4_pl2",
-       "vi_d5_pl3",
-       "vi_d6_pl4",
-       "vi_d7_pl5",
-       "vi_d8_pl6",
-       "vi_d9_pl7",
-       "vi_hsync_pd7",
-       "vi_vsync_pd6",
-};
-
-static const char * const dev3_groups[] = {
-       "clk3_req_pee1",
-};
-
-static const char * const displaya_groups[] = {
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_fs_pp0",
-       "dap3_sclk_pp3",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "pbb6",
-       "lcd_cs0_n_pn4",
-       "lcd_cs1_n_pw0",
-       "lcd_d0_pe0",
-       "lcd_d1_pe1",
-       "lcd_d10_pf2",
-       "lcd_d11_pf3",
-       "lcd_d12_pf4",
-       "lcd_d13_pf5",
-       "lcd_d14_pf6",
-       "lcd_d15_pf7",
-       "lcd_d16_pm0",
-       "lcd_d17_pm1",
-       "lcd_d18_pm2",
-       "lcd_d19_pm3",
-       "lcd_d2_pe2",
-       "lcd_d20_pm4",
-       "lcd_d21_pm5",
-       "lcd_d22_pm6",
-       "lcd_d23_pm7",
-       "lcd_d3_pe3",
-       "lcd_d4_pe4",
-       "lcd_d5_pe5",
-       "lcd_d6_pe6",
-       "lcd_d7_pe7",
-       "lcd_d8_pf0",
-       "lcd_d9_pf1",
-       "lcd_dc0_pn6",
-       "lcd_dc1_pd2",
-       "lcd_de_pj1",
-       "lcd_hsync_pj3",
-       "lcd_m1_pw1",
-       "lcd_pclk_pb3",
-       "lcd_pwr0_pb2",
-       "lcd_pwr1_pc1",
-       "lcd_pwr2_pc6",
-       "lcd_sck_pz4",
-       "lcd_sdin_pz2",
-       "lcd_sdout_pn5",
-       "lcd_vsync_pj4",
-       "lcd_wr_n_pz3",
-};
-
-static const char * const displayb_groups[] = {
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_fs_pp0",
-       "dap3_sclk_pp3",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "pbb6",
-       "lcd_cs0_n_pn4",
-       "lcd_cs1_n_pw0",
-       "lcd_d0_pe0",
-       "lcd_d1_pe1",
-       "lcd_d10_pf2",
-       "lcd_d11_pf3",
-       "lcd_d12_pf4",
-       "lcd_d13_pf5",
-       "lcd_d14_pf6",
-       "lcd_d15_pf7",
-       "lcd_d16_pm0",
-       "lcd_d17_pm1",
-       "lcd_d18_pm2",
-       "lcd_d19_pm3",
-       "lcd_d2_pe2",
-       "lcd_d20_pm4",
-       "lcd_d21_pm5",
-       "lcd_d22_pm6",
-       "lcd_d23_pm7",
-       "lcd_d3_pe3",
-       "lcd_d4_pe4",
-       "lcd_d5_pe5",
-       "lcd_d6_pe6",
-       "lcd_d7_pe7",
-       "lcd_d8_pf0",
-       "lcd_d9_pf1",
-       "lcd_dc0_pn6",
-       "lcd_dc1_pd2",
-       "lcd_de_pj1",
-       "lcd_hsync_pj3",
-       "lcd_m1_pw1",
-       "lcd_pclk_pb3",
-       "lcd_pwr0_pb2",
-       "lcd_pwr1_pc1",
-       "lcd_pwr2_pc6",
-       "lcd_sck_pz4",
-       "lcd_sdin_pz2",
-       "lcd_sdout_pn5",
-       "lcd_vsync_pj4",
-       "lcd_wr_n_pz3",
-};
-
-static const char * const dtv_groups[] = {
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gmi_cs0_n_pj0",
-       "gmi_cs1_n_pj2",
-};
-
-static const char * const extperiph1_groups[] = {
-       "clk1_out_pw4",
-};
-
-static const char * const extperiph2_groups[] = {
-       "clk2_out_pw5",
-};
-
-static const char * const extperiph3_groups[] = {
-       "clk3_out_pee0",
-};
-
-static const char * const gmi_groups[] = {
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_fs_pn0",
-       "dap1_sclk_pn3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_fs_pp4",
-       "dap4_sclk_pp7",
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-       "gmi_a16_pj7",
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gmi_a19_pk7",
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad10_ph2",
-       "gmi_ad11_ph3",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_ad8_ph0",
-       "gmi_ad9_ph1",
-       "gmi_adv_n_pk0",
-       "gmi_clk_pk1",
-       "gmi_cs0_n_pj0",
-       "gmi_cs1_n_pj2",
-       "gmi_cs2_n_pk3",
-       "gmi_cs3_n_pk4",
-       "gmi_cs4_n_pk2",
-       "gmi_cs6_n_pi3",
-       "gmi_cs7_n_pi6",
-       "gmi_dqs_pi2",
-       "gmi_iordy_pi5",
-       "gmi_oe_n_pi1",
-       "gmi_rst_n_pi4",
-       "gmi_wait_pi7",
-       "gmi_wp_n_pc7",
-       "gmi_wr_n_pi0",
-       "pu0",
-       "pu1",
-       "pu2",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pu6",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-       "spi1_cs0_n_px6",
-       "spi1_mosi_px4",
-       "spi1_sck_px5",
-       "spi2_cs0_n_px3",
-       "spi2_miso_px1",
-       "spi2_mosi_px0",
-       "spi2_sck_px2",
-       "uart2_cts_n_pj5",
-       "uart2_rts_n_pj6",
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-       "uart3_rxd_pw7",
-       "uart3_txd_pw6",
-};
-
-static const char * const gmi_alt_groups[] = {
-       "gmi_a16_pj7",
-       "gmi_cs3_n_pk4",
-       "gmi_cs7_n_pi6",
-       "gmi_wp_n_pc7",
-};
-
-static const char * const hda_groups[] = {
-       "clk1_req_pee2",
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_fs_pn0",
-       "dap1_sclk_pn3",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "pex_l0_clkreq_n_pdd2",
-       "pex_l0_prsnt_n_pdd0",
-       "pex_l0_rst_n_pdd1",
-       "pex_l1_clkreq_n_pdd6",
-       "pex_l1_prsnt_n_pdd4",
-       "pex_l1_rst_n_pdd5",
-       "pex_l2_clkreq_n_pcc7",
-       "pex_l2_prsnt_n_pdd7",
-       "pex_l2_rst_n_pcc6",
-       "pex_wake_n_pdd3",
-       "spdif_in_pk6",
-};
-
-static const char * const hdcp_groups[] = {
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-       "lcd_pwr0_pb2",
-       "lcd_pwr2_pc6",
-       "lcd_sck_pz4",
-       "lcd_sdout_pn5",
-       "lcd_wr_n_pz3",
-};
-
-static const char * const hdmi_groups[] = {
-       "hdmi_int_pn7",
-};
-
-static const char * const hsi_groups[] = {
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-};
-
-static const char * const i2c1_groups[] = {
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-       "spi2_cs1_n_pw2",
-       "spi2_cs2_n_pw3",
-};
-
-static const char * const i2c2_groups[] = {
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-};
-
-static const char * const i2c3_groups[] = {
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat4_paa4",
-};
-
-static const char * const i2c4_groups[] = {
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-};
-
-static const char * const i2cpwr_groups[] = {
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-};
-
-static const char * const i2s0_groups[] = {
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_fs_pn0",
-       "dap1_sclk_pn3",
-};
-
-static const char * const i2s1_groups[] = {
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-};
-
-static const char * const i2s2_groups[] = {
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_fs_pp0",
-       "dap3_sclk_pp3",
-};
-
-static const char * const i2s3_groups[] = {
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_fs_pp4",
-       "dap4_sclk_pp7",
-};
-
-static const char * const i2s4_groups[] = {
-       "pbb0",
-       "pbb7",
-       "pcc1",
-       "pcc2",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-};
-
-static const char * const invalid_groups[] = {
-       "kb_row3_pr3",
-       "sdmmc4_clk_pcc4",
-};
-
-static const char * const kbc_groups[] = {
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row10_ps2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-       "kb_row15_ps7",
-       "kb_row2_pr2",
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-};
-
-static const char * const mio_groups[] = {
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "kb_row10_ps2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-       "kb_row15_ps7",
-       "kb_row6_pr6",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-};
-
-static const char * const nand_groups[] = {
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad10_ph2",
-       "gmi_ad11_ph3",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_ad8_ph0",
-       "gmi_ad9_ph1",
-       "gmi_adv_n_pk0",
-       "gmi_clk_pk1",
-       "gmi_cs0_n_pj0",
-       "gmi_cs1_n_pj2",
-       "gmi_cs2_n_pk3",
-       "gmi_cs3_n_pk4",
-       "gmi_cs4_n_pk2",
-       "gmi_cs6_n_pi3",
-       "gmi_cs7_n_pi6",
-       "gmi_dqs_pi2",
-       "gmi_iordy_pi5",
-       "gmi_oe_n_pi1",
-       "gmi_rst_n_pi4",
-       "gmi_wait_pi7",
-       "gmi_wp_n_pc7",
-       "gmi_wr_n_pi0",
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row10_ps2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-       "kb_row15_ps7",
-       "kb_row2_pr2",
-       "kb_row3_pr3",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-       "kb_row6_pr6",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-};
-
-static const char * const nand_alt_groups[] = {
-       "gmi_cs6_n_pi3",
-       "gmi_cs7_n_pi6",
-       "gmi_rst_n_pi4",
-};
-
-static const char * const owr_groups[] = {
-       "pu0",
-       "pv2",
-       "kb_row5_pr5",
-       "owr",
-};
-
-static const char * const pcie_groups[] = {
-       "pex_l0_clkreq_n_pdd2",
-       "pex_l0_prsnt_n_pdd0",
-       "pex_l0_rst_n_pdd1",
-       "pex_l1_clkreq_n_pdd6",
-       "pex_l1_prsnt_n_pdd4",
-       "pex_l1_rst_n_pdd5",
-       "pex_l2_clkreq_n_pcc7",
-       "pex_l2_prsnt_n_pdd7",
-       "pex_l2_rst_n_pcc6",
-       "pex_wake_n_pdd3",
-};
-
-static const char * const pwm0_groups[] = {
-       "gmi_ad8_ph0",
-       "pu3",
-       "sdmmc3_dat3_pb4",
-       "sdmmc3_dat5_pd0",
-       "uart3_rts_n_pc0",
-};
-
-static const char * const pwm1_groups[] = {
-       "gmi_ad9_ph1",
-       "pu4",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat4_pd1",
-};
-
-static const char * const pwm2_groups[] = {
-       "gmi_ad10_ph2",
-       "pu5",
-       "sdmmc3_clk_pa6",
-};
-
-static const char * const pwm3_groups[] = {
-       "gmi_ad11_ph3",
-       "pu6",
-       "sdmmc3_cmd_pa7",
-};
-
-static const char * const pwr_int_n_groups[] = {
-       "pwr_int_n",
-};
-
-static const char * const rsvd1_groups[] = {
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_adv_n_pk0",
-       "gmi_clk_pk1",
-       "gmi_cs0_n_pj0",
-       "gmi_cs1_n_pj2",
-       "gmi_cs2_n_pk3",
-       "gmi_cs3_n_pk4",
-       "gmi_cs4_n_pk2",
-       "gmi_dqs_pi2",
-       "gmi_iordy_pi5",
-       "gmi_oe_n_pi1",
-       "gmi_wait_pi7",
-       "gmi_wp_n_pc7",
-       "gmi_wr_n_pi0",
-       "pu1",
-       "pu2",
-       "pv0",
-       "pv1",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat3_pb4",
-       "vi_pclk_pt0",
-};
-
-static const char * const rsvd2_groups[] = {
-       "clk1_out_pw4",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-       "clk_32k_in",
-       "clk_32k_out_pa0",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "crt_hsync_pv6",
-       "crt_vsync_pv7",
-       "dap3_din_pp1",
-       "dap3_dout_pp2",
-       "dap3_fs_pp0",
-       "dap3_sclk_pp3",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_fs_pp4",
-       "dap4_sclk_pp7",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "pbb0",
-       "pbb7",
-       "pcc1",
-       "pcc2",
-       "pv0",
-       "pv1",
-       "pv2",
-       "pv3",
-       "hdmi_cec_pee3",
-       "hdmi_int_pn7",
-       "jtag_rtck_pu7",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-       "pwr_int_n",
-       "sdmmc1_clk_pz0",
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat0_py7",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat3_py4",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc4_rst_n_pcc3",
-       "spdif_out_pk5",
-       "sys_clk_req_pz5",
-       "uart3_cts_n_pa1",
-       "uart3_rxd_pw7",
-       "uart3_txd_pw6",
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-       "vi_d0_pt4",
-       "vi_d10_pt2",
-       "vi_d11_pt3",
-       "vi_hsync_pd7",
-       "vi_vsync_pd6",
-};
-
-static const char * const rsvd3_groups[] = {
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-       "clk1_out_pw4",
-       "clk1_req_pee2",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-       "clk_32k_in",
-       "clk_32k_out_pa0",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "crt_hsync_pv6",
-       "crt_vsync_pv7",
-       "dap2_din_pa4",
-       "dap2_dout_pa5",
-       "dap2_fs_pa2",
-       "dap2_sclk_pa3",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "pbb0",
-       "pbb7",
-       "pcc1",
-       "pcc2",
-       "pv0",
-       "pv1",
-       "pv2",
-       "pv3",
-       "hdmi_cec_pee3",
-       "hdmi_int_pn7",
-       "jtag_rtck_pu7",
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row3_pr3",
-       "lcd_d0_pe0",
-       "lcd_d1_pe1",
-       "lcd_d10_pf2",
-       "lcd_d11_pf3",
-       "lcd_d12_pf4",
-       "lcd_d13_pf5",
-       "lcd_d14_pf6",
-       "lcd_d15_pf7",
-       "lcd_d16_pm0",
-       "lcd_d17_pm1",
-       "lcd_d18_pm2",
-       "lcd_d19_pm3",
-       "lcd_d2_pe2",
-       "lcd_d20_pm4",
-       "lcd_d21_pm5",
-       "lcd_d22_pm6",
-       "lcd_d23_pm7",
-       "lcd_d3_pe3",
-       "lcd_d4_pe4",
-       "lcd_d5_pe5",
-       "lcd_d6_pe6",
-       "lcd_d7_pe7",
-       "lcd_d8_pf0",
-       "lcd_d9_pf1",
-       "lcd_dc0_pn6",
-       "lcd_dc1_pd2",
-       "lcd_de_pj1",
-       "lcd_hsync_pj3",
-       "lcd_m1_pw1",
-       "lcd_pclk_pb3",
-       "lcd_pwr1_pc1",
-       "lcd_vsync_pj4",
-       "owr",
-       "pex_l0_clkreq_n_pdd2",
-       "pex_l0_prsnt_n_pdd0",
-       "pex_l0_rst_n_pdd1",
-       "pex_l1_clkreq_n_pdd6",
-       "pex_l1_prsnt_n_pdd4",
-       "pex_l1_rst_n_pdd5",
-       "pex_l2_clkreq_n_pcc7",
-       "pex_l2_prsnt_n_pdd7",
-       "pex_l2_rst_n_pcc6",
-       "pex_wake_n_pdd3",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-       "pwr_int_n",
-       "sdmmc1_clk_pz0",
-       "sdmmc1_cmd_pz1",
-       "sdmmc4_rst_n_pcc3",
-       "sys_clk_req_pz5",
-};
-
-static const char * const rsvd4_groups[] = {
-       "clk1_out_pw4",
-       "clk1_req_pee2",
-       "clk2_out_pw5",
-       "clk2_req_pcc5",
-       "clk3_out_pee0",
-       "clk3_req_pee1",
-       "clk_32k_in",
-       "clk_32k_out_pa0",
-       "core_pwr_req",
-       "cpu_pwr_req",
-       "crt_hsync_pv6",
-       "crt_vsync_pv7",
-       "dap4_din_pp5",
-       "dap4_dout_pp6",
-       "dap4_fs_pp4",
-       "dap4_sclk_pp7",
-       "ddc_scl_pv4",
-       "ddc_sda_pv5",
-       "gen1_i2c_scl_pc4",
-       "gen1_i2c_sda_pc5",
-       "gen2_i2c_scl_pt5",
-       "gen2_i2c_sda_pt6",
-       "gmi_a19_pk7",
-       "gmi_ad0_pg0",
-       "gmi_ad1_pg1",
-       "gmi_ad10_ph2",
-       "gmi_ad11_ph3",
-       "gmi_ad12_ph4",
-       "gmi_ad13_ph5",
-       "gmi_ad14_ph6",
-       "gmi_ad15_ph7",
-       "gmi_ad2_pg2",
-       "gmi_ad3_pg3",
-       "gmi_ad4_pg4",
-       "gmi_ad5_pg5",
-       "gmi_ad6_pg6",
-       "gmi_ad7_pg7",
-       "gmi_ad8_ph0",
-       "gmi_ad9_ph1",
-       "gmi_adv_n_pk0",
-       "gmi_clk_pk1",
-       "gmi_cs2_n_pk3",
-       "gmi_cs4_n_pk2",
-       "gmi_dqs_pi2",
-       "gmi_iordy_pi5",
-       "gmi_oe_n_pi1",
-       "gmi_rst_n_pi4",
-       "gmi_wait_pi7",
-       "gmi_wr_n_pi0",
-       "pcc2",
-       "pu0",
-       "pu1",
-       "pu2",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pu6",
-       "pv0",
-       "pv1",
-       "pv2",
-       "pv3",
-       "hdmi_cec_pee3",
-       "hdmi_int_pn7",
-       "jtag_rtck_pu7",
-       "kb_col2_pq2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-       "kb_col5_pq5",
-       "kb_row0_pr0",
-       "kb_row1_pr1",
-       "kb_row2_pr2",
-       "kb_row4_pr4",
-       "lcd_cs0_n_pn4",
-       "lcd_cs1_n_pw0",
-       "lcd_d0_pe0",
-       "lcd_d1_pe1",
-       "lcd_d10_pf2",
-       "lcd_d11_pf3",
-       "lcd_d12_pf4",
-       "lcd_d13_pf5",
-       "lcd_d14_pf6",
-       "lcd_d15_pf7",
-       "lcd_d16_pm0",
-       "lcd_d17_pm1",
-       "lcd_d18_pm2",
-       "lcd_d19_pm3",
-       "lcd_d2_pe2",
-       "lcd_d20_pm4",
-       "lcd_d21_pm5",
-       "lcd_d22_pm6",
-       "lcd_d23_pm7",
-       "lcd_d3_pe3",
-       "lcd_d4_pe4",
-       "lcd_d5_pe5",
-       "lcd_d6_pe6",
-       "lcd_d7_pe7",
-       "lcd_d8_pf0",
-       "lcd_d9_pf1",
-       "lcd_dc0_pn6",
-       "lcd_dc1_pd2",
-       "lcd_de_pj1",
-       "lcd_hsync_pj3",
-       "lcd_m1_pw1",
-       "lcd_pclk_pb3",
-       "lcd_pwr1_pc1",
-       "lcd_sdin_pz2",
-       "lcd_vsync_pj4",
-       "owr",
-       "pex_l0_clkreq_n_pdd2",
-       "pex_l0_prsnt_n_pdd0",
-       "pex_l0_rst_n_pdd1",
-       "pex_l1_clkreq_n_pdd6",
-       "pex_l1_prsnt_n_pdd4",
-       "pex_l1_rst_n_pdd5",
-       "pex_l2_clkreq_n_pcc7",
-       "pex_l2_prsnt_n_pdd7",
-       "pex_l2_rst_n_pcc6",
-       "pex_wake_n_pdd3",
-       "pwr_i2c_scl_pz6",
-       "pwr_i2c_sda_pz7",
-       "pwr_int_n",
-       "spi1_miso_px7",
-       "sys_clk_req_pz5",
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-       "uart3_rxd_pw7",
-       "uart3_txd_pw6",
-       "vi_d0_pt4",
-       "vi_d1_pd5",
-       "vi_d10_pt2",
-       "vi_d11_pt3",
-       "vi_d2_pl0",
-       "vi_d3_pl1",
-       "vi_d4_pl2",
-       "vi_d5_pl3",
-       "vi_d6_pl4",
-       "vi_d7_pl5",
-       "vi_d8_pl6",
-       "vi_d9_pl7",
-       "vi_hsync_pd7",
-       "vi_pclk_pt0",
-       "vi_vsync_pd6",
-};
-
-static const char * const rtck_groups[] = {
-       "jtag_rtck_pu7",
-};
-
-static const char * const sata_groups[] = {
-       "gmi_cs6_n_pi3",
-};
-
-static const char * const sdmmc1_groups[] = {
-       "sdmmc1_clk_pz0",
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat0_py7",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat3_py4",
-};
-
-static const char * const sdmmc2_groups[] = {
-       "dap1_din_pn1",
-       "dap1_dout_pn2",
-       "dap1_fs_pn0",
-       "dap1_sclk_pn3",
-       "kb_row10_ps2",
-       "kb_row11_ps3",
-       "kb_row12_ps4",
-       "kb_row13_ps5",
-       "kb_row14_ps6",
-       "kb_row15_ps7",
-       "kb_row6_pr6",
-       "kb_row7_pr7",
-       "kb_row8_ps0",
-       "kb_row9_ps1",
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-       "vi_d1_pd5",
-       "vi_d2_pl0",
-       "vi_d3_pl1",
-       "vi_d4_pl2",
-       "vi_d5_pl3",
-       "vi_d6_pl4",
-       "vi_d7_pl5",
-       "vi_d8_pl6",
-       "vi_d9_pl7",
-       "vi_pclk_pt0",
-};
-
-static const char * const sdmmc3_groups[] = {
-       "sdmmc3_clk_pa6",
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat3_pb4",
-       "sdmmc3_dat4_pd1",
-       "sdmmc3_dat5_pd0",
-       "sdmmc3_dat6_pd3",
-       "sdmmc3_dat7_pd4",
-};
-
-static const char * const sdmmc4_groups[] = {
-       "cam_i2c_scl_pbb1",
-       "cam_i2c_sda_pbb2",
-       "cam_mclk_pcc0",
-       "pbb0",
-       "pbb3",
-       "pbb4",
-       "pbb5",
-       "pbb6",
-       "pbb7",
-       "pcc1",
-       "sdmmc4_clk_pcc4",
-       "sdmmc4_cmd_pt7",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "sdmmc4_dat4_paa4",
-       "sdmmc4_dat5_paa5",
-       "sdmmc4_dat6_paa6",
-       "sdmmc4_dat7_paa7",
-       "sdmmc4_rst_n_pcc3",
-};
-
-static const char * const spdif_groups[] = {
-       "sdmmc3_dat6_pd3",
-       "sdmmc3_dat7_pd4",
-       "spdif_in_pk6",
-       "spdif_out_pk5",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-};
-
-static const char * const spi1_groups[] = {
-       "spi1_cs0_n_px6",
-       "spi1_miso_px7",
-       "spi1_mosi_px4",
-       "spi1_sck_px5",
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-};
-
-static const char * const spi2_groups[] = {
-       "sdmmc3_cmd_pa7",
-       "sdmmc3_dat4_pd1",
-       "sdmmc3_dat5_pd0",
-       "sdmmc3_dat6_pd3",
-       "sdmmc3_dat7_pd4",
-       "spi1_cs0_n_px6",
-       "spi1_mosi_px4",
-       "spi1_sck_px5",
-       "spi2_cs0_n_px3",
-       "spi2_cs1_n_pw2",
-       "spi2_cs2_n_pw3",
-       "spi2_miso_px1",
-       "spi2_mosi_px0",
-       "spi2_sck_px2",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-};
-
-static const char * const spi2_alt_groups[] = {
-       "spi1_cs0_n_px6",
-       "spi1_miso_px7",
-       "spi1_mosi_px4",
-       "spi1_sck_px5",
-       "spi2_cs1_n_pw2",
-       "spi2_cs2_n_pw3",
-};
-
-static const char * const spi3_groups[] = {
-       "sdmmc3_clk_pa6",
-       "sdmmc3_dat0_pb7",
-       "sdmmc3_dat1_pb6",
-       "sdmmc3_dat2_pb5",
-       "sdmmc3_dat3_pb4",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-       "spi1_miso_px7",
-       "spi2_cs0_n_px3",
-       "spi2_cs1_n_pw2",
-       "spi2_cs2_n_pw3",
-       "spi2_miso_px1",
-       "spi2_mosi_px0",
-       "spi2_sck_px2",
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-};
-
-static const char * const spi4_groups[] = {
-       "gmi_a16_pj7",
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gmi_a19_pk7",
-       "sdmmc3_dat4_pd1",
-       "sdmmc3_dat5_pd0",
-       "sdmmc3_dat6_pd3",
-       "sdmmc3_dat7_pd4",
-       "uart2_cts_n_pj5",
-       "uart2_rts_n_pj6",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-};
-
-static const char * const spi5_groups[] = {
-       "lcd_cs0_n_pn4",
-       "lcd_cs1_n_pw0",
-       "lcd_pwr0_pb2",
-       "lcd_pwr2_pc6",
-       "lcd_sck_pz4",
-       "lcd_sdin_pz2",
-       "lcd_sdout_pn5",
-       "lcd_wr_n_pz3",
-};
-
-static const char * const spi6_groups[] = {
-       "spi2_cs0_n_px3",
-       "spi2_miso_px1",
-       "spi2_mosi_px0",
-       "spi2_sck_px2",
-};
-
-static const char * const sysclk_groups[] = {
-       "sys_clk_req_pz5",
-};
-
-static const char * const test_groups[] = {
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-};
-
-static const char * const trace_groups[] = {
-       "kb_col0_pq0",
-       "kb_col1_pq1",
-       "kb_col2_pq2",
-       "kb_col3_pq3",
-       "kb_col4_pq4",
-       "kb_col5_pq5",
-       "kb_col6_pq6",
-       "kb_col7_pq7",
-       "kb_row4_pr4",
-       "kb_row5_pr5",
-};
-
-static const char * const uarta_groups[] = {
-       "pu0",
-       "pu1",
-       "pu2",
-       "pu3",
-       "pu4",
-       "pu5",
-       "pu6",
-       "sdmmc1_clk_pz0",
-       "sdmmc1_cmd_pz1",
-       "sdmmc1_dat0_py7",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat3_py4",
-       "sdmmc3_clk_pa6",
-       "sdmmc3_cmd_pa7",
-       "uart2_cts_n_pj5",
-       "uart2_rts_n_pj6",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-};
-
-static const char * const uartb_groups[] = {
-       "uart2_cts_n_pj5",
-       "uart2_rts_n_pj6",
-       "uart2_rxd_pc3",
-       "uart2_txd_pc2",
-};
-
-static const char * const uartc_groups[] = {
-       "uart3_cts_n_pa1",
-       "uart3_rts_n_pc0",
-       "uart3_rxd_pw7",
-       "uart3_txd_pw6",
-};
-
-static const char * const uartd_groups[] = {
-       "gmi_a16_pj7",
-       "gmi_a17_pb0",
-       "gmi_a18_pb1",
-       "gmi_a19_pk7",
-       "ulpi_clk_py0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-};
-
-static const char * const uarte_groups[] = {
-       "sdmmc1_dat0_py7",
-       "sdmmc1_dat1_py6",
-       "sdmmc1_dat2_py5",
-       "sdmmc1_dat3_py4",
-       "sdmmc4_dat0_paa0",
-       "sdmmc4_dat1_paa1",
-       "sdmmc4_dat2_paa2",
-       "sdmmc4_dat3_paa3",
-};
-
-static const char * const ulpi_groups[] = {
-       "ulpi_clk_py0",
-       "ulpi_data0_po1",
-       "ulpi_data1_po2",
-       "ulpi_data2_po3",
-       "ulpi_data3_po4",
-       "ulpi_data4_po5",
-       "ulpi_data5_po6",
-       "ulpi_data6_po7",
-       "ulpi_data7_po0",
-       "ulpi_dir_py1",
-       "ulpi_nxt_py2",
-       "ulpi_stp_py3",
-};
-
-static const char * const vgp1_groups[] = {
-       "cam_i2c_scl_pbb1",
-};
-
-static const char * const vgp2_groups[] = {
-       "cam_i2c_sda_pbb2",
-};
-
-static const char * const vgp3_groups[] = {
-       "pbb3",
-       "sdmmc4_dat5_paa5",
-};
-
-static const char * const vgp4_groups[] = {
-       "pbb4",
-       "sdmmc4_dat6_paa6",
-};
-
-static const char * const vgp5_groups[] = {
-       "pbb5",
-       "sdmmc4_dat7_paa7",
-};
-
-static const char * const vgp6_groups[] = {
-       "pbb6",
-       "sdmmc4_rst_n_pcc3",
-};
-
-static const char * const vi_groups[] = {
-       "cam_mclk_pcc0",
-       "vi_d0_pt4",
-       "vi_d1_pd5",
-       "vi_d10_pt2",
-       "vi_d11_pt3",
-       "vi_d2_pl0",
-       "vi_d3_pl1",
-       "vi_d4_pl2",
-       "vi_d5_pl3",
-       "vi_d6_pl4",
-       "vi_d7_pl5",
-       "vi_d8_pl6",
-       "vi_d9_pl7",
-       "vi_hsync_pd7",
-       "vi_mclk_pt1",
-       "vi_pclk_pt0",
-       "vi_vsync_pd6",
-};
-
-static const char * const vi_alt1_groups[] = {
-       "cam_mclk_pcc0",
-       "vi_mclk_pt1",
-};
-
-static const char * const vi_alt2_groups[] = {
-       "vi_mclk_pt1",
-};
-
-static const char * const vi_alt3_groups[] = {
-       "cam_mclk_pcc0",
-       "vi_mclk_pt1",
-};
 
 #define FUNCTION(fname)                                        \
        {                                               \
                .name = #fname,                         \
-               .groups = fname##_groups,               \
-               .ngroups = ARRAY_SIZE(fname##_groups),  \
        }
 
-static const struct tegra_function tegra30_functions[] = {
+static struct tegra_function tegra30_functions[] = {
        FUNCTION(blink),
        FUNCTION(cec),
        FUNCTION(clk_12m_out),
@@ -3345,11 +2105,11 @@ static const struct tegra_function tegra30_functions[] = {
        FUNCTION(vi_alt3),
 };
 
-#define DRV_PINGROUP_REG_A     0x868   /* bank 0 */
-#define PINGROUP_REG_A         0x3000  /* bank 1 */
+#define DRV_PINGROUP_REG_A             0x868   /* bank 0 */
+#define PINGROUP_REG_A                 0x3000  /* bank 1 */
 
-#define PINGROUP_REG_Y(r) ((r) - PINGROUP_REG_A)
-#define PINGROUP_REG_N(r) -1
+#define PINGROUP_REG_Y(r)              ((r) - PINGROUP_REG_A)
+#define PINGROUP_REG_N(r)              -1
 
 #define PINGROUP(pg_name, f0, f1, f2, f3, f_safe, r, od, ior)  \
        {                                                       \
@@ -3357,12 +2117,12 @@ static const struct tegra_function tegra30_functions[] = {
                .pins = pg_name##_pins,                         \
                .npins = ARRAY_SIZE(pg_name##_pins),            \
                .funcs = {                                      \
-                       TEGRA_MUX_ ## f0,                       \
-                       TEGRA_MUX_ ## f1,                       \
-                       TEGRA_MUX_ ## f2,                       \
-                       TEGRA_MUX_ ## f3,                       \
+                       TEGRA_MUX_##f0,                         \
+                       TEGRA_MUX_##f1,                         \
+                       TEGRA_MUX_##f2,                         \
+                       TEGRA_MUX_##f3,                         \
                },                                              \
-               .func_safe = TEGRA_MUX_ ## f_safe,              \
+               .func_safe = TEGRA_MUX_##f_safe,                \
                .mux_reg = PINGROUP_REG_Y(r),                   \
                .mux_bank = 1,                                  \
                .mux_bit = 0,                                   \
@@ -3389,6 +2149,9 @@ static const struct tegra_function tegra30_functions[] = {
                .drvtype_reg = -1,                              \
        }
 
+#define DRV_PINGROUP_REG_Y(r)          ((r) - DRV_PINGROUP_REG_A)
+#define DRV_PINGROUP_REG_N(r)          -1
+
 #define DRV_PINGROUP(pg_name, r, hsm_b, schmitt_b, lpmd_b,     \
                     drvdn_b, drvdn_w, drvup_b, drvup_w,        \
                     slwr_b, slwr_w, slwf_b, slwf_w)            \
@@ -3404,7 +2167,7 @@ static const struct tegra_function tegra30_functions[] = {
                .lock_reg = -1,                                 \
                .ioreset_reg = -1,                              \
                .rcv_sel_reg = -1,                              \
-               .drv_reg = ((r) - DRV_PINGROUP_REG_A),          \
+               .drv_reg = DRV_PINGROUP_REG_Y(r),               \
                .drv_bank = 0,                                  \
                .hsm_bit = hsm_b,                               \
                .schmitt_bit = schmitt_b,                       \
@@ -3422,7 +2185,6 @@ static const struct tegra_function tegra30_functions[] = {
 
 static const struct tegra_pingroup tegra30_groups[] = {
        /*       pg_name,              f0,           f1,           f2,           f3,           safe,         r,      od, ior */
-       /* FIXME: Fill in correct data in safe column */
        PINGROUP(clk_32k_out_pa0,      BLINK,        RSVD2,        RSVD3,        RSVD4,        RSVD4,        0x331c, N, N),
        PINGROUP(uart3_cts_n_pa1,      UARTC,        RSVD2,        GMI,          RSVD4,        RSVD4,        0x317c, N, N),
        PINGROUP(dap2_fs_pa2,          I2S1,         HDA,          RSVD3,        GMI,          RSVD3,        0x3358, N, N),
@@ -3735,6 +2497,7 @@ static struct of_device_id tegra30_pinctrl_of_match[] = {
        { .compatible = "nvidia,tegra30-pinmux", },
        { },
 };
+MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
 
 static struct platform_driver tegra30_pinctrl_driver = {
        .driver = {
@@ -3745,20 +2508,8 @@ static struct platform_driver tegra30_pinctrl_driver = {
        .probe = tegra30_pinctrl_probe,
        .remove = tegra_pinctrl_remove,
 };
-
-static int __init tegra30_pinctrl_init(void)
-{
-       return platform_driver_register(&tegra30_pinctrl_driver);
-}
-arch_initcall(tegra30_pinctrl_init);
-
-static void __exit tegra30_pinctrl_exit(void)
-{
-       platform_driver_unregister(&tegra30_pinctrl_driver);
-}
-module_exit(tegra30_pinctrl_exit);
+module_platform_driver(tegra30_pinctrl_driver);
 
 MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
 MODULE_DESCRIPTION("NVIDIA Tegra30 pinctrl driver");
 MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, tegra30_pinctrl_of_match);
index c381ae63c5083a51b190d5a9161ffbe3099a9173..48093719167abd91e27f93eb869eab3f1edd5c51 100644 (file)
@@ -2260,6 +2260,42 @@ static const unsigned int msiof0_tx_pins[] = {
 static const unsigned int msiof0_tx_mux[] = {
        MSIOF0_TXD_MARK,
 };
+
+static const unsigned int msiof0_clk_b_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(1, 23),
+};
+static const unsigned int msiof0_clk_b_mux[] = {
+       MSIOF0_SCK_B_MARK,
+};
+static const unsigned int msiof0_ss1_b_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(1, 12),
+};
+static const unsigned int msiof0_ss1_b_mux[] = {
+       MSIOF0_SS1_B_MARK,
+};
+static const unsigned int msiof0_ss2_b_pins[] = {
+       /* SS2 */
+       RCAR_GP_PIN(1, 10),
+};
+static const unsigned int msiof0_ss2_b_mux[] = {
+       MSIOF0_SS2_B_MARK,
+};
+static const unsigned int msiof0_rx_b_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(1, 29),
+};
+static const unsigned int msiof0_rx_b_mux[] = {
+       MSIOF0_RXD_B_MARK,
+};
+static const unsigned int msiof0_tx_b_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(1, 28),
+};
+static const unsigned int msiof0_tx_b_mux[] = {
+       MSIOF0_TXD_B_MARK,
+};
 /* - MSIOF1 ----------------------------------------------------------------- */
 static const unsigned int msiof1_clk_pins[] = {
        /* SCK */
@@ -2303,6 +2339,42 @@ static const unsigned int msiof1_tx_pins[] = {
 static const unsigned int msiof1_tx_mux[] = {
        MSIOF1_TXD_MARK,
 };
+
+static const unsigned int msiof1_clk_b_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(1, 16),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+       MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+       MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+       /* SS2 */
+       RCAR_GP_PIN(0, 19),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+       MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_rx_b_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(1, 17),
+};
+static const unsigned int msiof1_rx_b_mux[] = {
+       MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_tx_b_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(0, 20),
+};
+static const unsigned int msiof1_tx_b_mux[] = {
+       MSIOF1_TXD_B_MARK,
+};
 /* - MSIOF2 ----------------------------------------------------------------- */
 static const unsigned int msiof2_clk_pins[] = {
        /* SCK */
@@ -2389,6 +2461,58 @@ static const unsigned int msiof3_tx_pins[] = {
 static const unsigned int msiof3_tx_mux[] = {
        MSIOF3_TXD_MARK,
 };
+
+static const unsigned int msiof3_clk_b_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(0, 0),
+};
+static const unsigned int msiof3_clk_b_mux[] = {
+       MSIOF3_SCK_B_MARK,
+};
+static const unsigned int msiof3_sync_b_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(0, 1),
+};
+static const unsigned int msiof3_sync_b_mux[] = {
+       MSIOF3_SYNC_B_MARK,
+};
+static const unsigned int msiof3_rx_b_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(0, 2),
+};
+static const unsigned int msiof3_rx_b_mux[] = {
+       MSIOF3_RXD_B_MARK,
+};
+static const unsigned int msiof3_tx_b_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(0, 3),
+};
+static const unsigned int msiof3_tx_b_mux[] = {
+       MSIOF3_TXD_B_MARK,
+};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi_ctrl_pins[] = {
+       /* SPCLK, SSL */
+       RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int qspi_ctrl_mux[] = {
+       SPCLK_MARK, SSL_MARK,
+};
+static const unsigned int qspi_data2_pins[] = {
+       /* MOSI_IO0, MISO_IO1 */
+       RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int qspi_data2_mux[] = {
+       MOSI_IO0_MARK, MISO_IO1_MARK,
+};
+static const unsigned int qspi_data4_pins[] = {
+       /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+       RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+       RCAR_GP_PIN(1, 8),
+};
+static const unsigned int qspi_data4_mux[] = {
+       MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK,
+};
 /* - SCIF0 ------------------------------------------------------------------ */
 static const unsigned int scif0_data_pins[] = {
        /* RX, TX */
@@ -3231,6 +3355,13 @@ static const unsigned int usb0_pins[] = {
 static const unsigned int usb0_mux[] = {
        USB0_PWEN_MARK, USB0_OVC_VBUS_MARK,
 };
+static const unsigned int usb0_ovc_vbus_pins[] = {
+       /* OVC/VBUS */
+       RCAR_GP_PIN(5, 19),
+};
+static const unsigned int usb0_ovc_vbus_mux[] = {
+       USB0_OVC_VBUS_MARK,
+};
 /* - USB1 ------------------------------------------------------------------- */
 static const unsigned int usb1_pins[] = {
        /* PWEN, OVC */
@@ -3653,12 +3784,22 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(msiof0_ss2),
        SH_PFC_PIN_GROUP(msiof0_rx),
        SH_PFC_PIN_GROUP(msiof0_tx),
+       SH_PFC_PIN_GROUP(msiof0_clk_b),
+       SH_PFC_PIN_GROUP(msiof0_ss1_b),
+       SH_PFC_PIN_GROUP(msiof0_ss2_b),
+       SH_PFC_PIN_GROUP(msiof0_rx_b),
+       SH_PFC_PIN_GROUP(msiof0_tx_b),
        SH_PFC_PIN_GROUP(msiof1_clk),
        SH_PFC_PIN_GROUP(msiof1_sync),
        SH_PFC_PIN_GROUP(msiof1_ss1),
        SH_PFC_PIN_GROUP(msiof1_ss2),
        SH_PFC_PIN_GROUP(msiof1_rx),
        SH_PFC_PIN_GROUP(msiof1_tx),
+       SH_PFC_PIN_GROUP(msiof1_clk_b),
+       SH_PFC_PIN_GROUP(msiof1_ss1_b),
+       SH_PFC_PIN_GROUP(msiof1_ss2_b),
+       SH_PFC_PIN_GROUP(msiof1_rx_b),
+       SH_PFC_PIN_GROUP(msiof1_tx_b),
        SH_PFC_PIN_GROUP(msiof2_clk),
        SH_PFC_PIN_GROUP(msiof2_sync),
        SH_PFC_PIN_GROUP(msiof2_ss1),
@@ -3671,6 +3812,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(msiof3_ss2),
        SH_PFC_PIN_GROUP(msiof3_rx),
        SH_PFC_PIN_GROUP(msiof3_tx),
+       SH_PFC_PIN_GROUP(msiof3_clk_b),
+       SH_PFC_PIN_GROUP(msiof3_sync_b),
+       SH_PFC_PIN_GROUP(msiof3_rx_b),
+       SH_PFC_PIN_GROUP(msiof3_tx_b),
+       SH_PFC_PIN_GROUP(qspi_ctrl),
+       SH_PFC_PIN_GROUP(qspi_data2),
+       SH_PFC_PIN_GROUP(qspi_data4),
        SH_PFC_PIN_GROUP(scif0_data),
        SH_PFC_PIN_GROUP(scif0_clk),
        SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -3789,6 +3937,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(tpu0_to2),
        SH_PFC_PIN_GROUP(tpu0_to3),
        SH_PFC_PIN_GROUP(usb0),
+       SH_PFC_PIN_GROUP(usb0_ovc_vbus),
        SH_PFC_PIN_GROUP(usb1),
        SH_PFC_PIN_GROUP(usb2),
        VIN_DATA_PIN_GROUP(vin0_data, 24),
@@ -3941,6 +4090,11 @@ static const char * const msiof0_groups[] = {
        "msiof0_ss2",
        "msiof0_rx",
        "msiof0_tx",
+       "msiof0_clk_b",
+       "msiof0_ss1_b",
+       "msiof0_ss2_b",
+       "msiof0_rx_b",
+       "msiof0_tx_b",
 };
 
 static const char * const msiof1_groups[] = {
@@ -3950,6 +4104,11 @@ static const char * const msiof1_groups[] = {
        "msiof1_ss2",
        "msiof1_rx",
        "msiof1_tx",
+       "msiof1_clk_b",
+       "msiof1_ss1_b",
+       "msiof1_ss2_b",
+       "msiof1_rx_b",
+       "msiof1_tx_b",
 };
 
 static const char * const msiof2_groups[] = {
@@ -3968,6 +4127,16 @@ static const char * const msiof3_groups[] = {
        "msiof3_ss2",
        "msiof3_rx",
        "msiof3_tx",
+       "msiof3_clk_b",
+       "msiof3_sync_b",
+       "msiof3_rx_b",
+       "msiof3_tx_b",
+};
+
+static const char * const qspi_groups[] = {
+       "qspi_ctrl",
+       "qspi_data2",
+       "qspi_data4",
 };
 
 static const char * const scif0_groups[] = {
@@ -4134,6 +4303,7 @@ static const char * const tpu0_groups[] = {
 
 static const char * const usb0_groups[] = {
        "usb0",
+       "usb0_ovc_vbus",
 };
 
 static const char * const usb1_groups[] = {
@@ -4213,6 +4383,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
        SH_PFC_FUNCTION(msiof1),
        SH_PFC_FUNCTION(msiof2),
        SH_PFC_FUNCTION(msiof3),
+       SH_PFC_FUNCTION(qspi),
        SH_PFC_FUNCTION(scif0),
        SH_PFC_FUNCTION(scif1),
        SH_PFC_FUNCTION(scif2),
index 567d6918d50b226b7841c84a98b2343ad552a03e..5186d70c49d43326bc0a3e1f0405332d512cb989 100644 (file)
@@ -1945,6 +1945,50 @@ static const unsigned int i2c4_c_pins[] = {
 static const unsigned int i2c4_c_mux[] = {
        SCL4_C_MARK, SDA4_C_MARK,
 };
+/* - I2C7 ------------------------------------------------------------------- */
+static const unsigned int i2c7_pins[] = {
+       /* SCL, SDA */
+       RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int i2c7_mux[] = {
+       SCL7_MARK, SDA7_MARK,
+};
+static const unsigned int i2c7_b_pins[] = {
+       /* SCL, SDA */
+       RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+};
+static const unsigned int i2c7_b_mux[] = {
+       SCL7_B_MARK, SDA7_B_MARK,
+};
+static const unsigned int i2c7_c_pins[] = {
+       /* SCL, SDA */
+       RCAR_GP_PIN(6, 28), RCAR_GP_PIN(6, 29),
+};
+static const unsigned int i2c7_c_mux[] = {
+       SCL7_C_MARK, SDA7_C_MARK,
+};
+/* - I2C8 ------------------------------------------------------------------- */
+static const unsigned int i2c8_pins[] = {
+       /* SCL, SDA */
+       RCAR_GP_PIN(4, 13), RCAR_GP_PIN(4, 14),
+};
+static const unsigned int i2c8_mux[] = {
+       SCL8_MARK, SDA8_MARK,
+};
+static const unsigned int i2c8_b_pins[] = {
+       /* SCL, SDA */
+       RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+};
+static const unsigned int i2c8_b_mux[] = {
+       SCL8_B_MARK, SDA8_B_MARK,
+};
+static const unsigned int i2c8_c_pins[] = {
+       /* SCL, SDA */
+       RCAR_GP_PIN(6, 22), RCAR_GP_PIN(6, 23),
+};
+static const unsigned int i2c8_c_mux[] = {
+       SCL8_C_MARK, SDA8_C_MARK,
+};
 /* - INTC ------------------------------------------------------------------- */
 static const unsigned int intc_irq0_pins[] = {
        /* IRQ */
@@ -2051,6 +2095,92 @@ static const unsigned int msiof0_tx_pins[] = {
 static const unsigned int msiof0_tx_mux[] = {
        MSIOF0_TXD_MARK,
 };
+
+static const unsigned int msiof0_clk_b_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(0, 16),
+};
+static const unsigned int msiof0_clk_b_mux[] = {
+       MSIOF0_SCK_B_MARK,
+};
+static const unsigned int msiof0_sync_b_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(0, 17),
+};
+static const unsigned int msiof0_sync_b_mux[] = {
+       MSIOF0_SYNC_B_MARK,
+};
+static const unsigned int msiof0_ss1_b_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(0, 18),
+};
+static const unsigned int msiof0_ss1_b_mux[] = {
+       MSIOF0_SS1_B_MARK,
+};
+static const unsigned int msiof0_ss2_b_pins[] = {
+       /* SS2 */
+       RCAR_GP_PIN(0, 19),
+};
+static const unsigned int msiof0_ss2_b_mux[] = {
+       MSIOF0_SS2_B_MARK,
+};
+static const unsigned int msiof0_rx_b_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(0, 21),
+};
+static const unsigned int msiof0_rx_b_mux[] = {
+       MSIOF0_RXD_B_MARK,
+};
+static const unsigned int msiof0_tx_b_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(0, 20),
+};
+static const unsigned int msiof0_tx_b_mux[] = {
+       MSIOF0_TXD_B_MARK,
+};
+
+static const unsigned int msiof0_clk_c_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(5, 26),
+};
+static const unsigned int msiof0_clk_c_mux[] = {
+       MSIOF0_SCK_C_MARK,
+};
+static const unsigned int msiof0_sync_c_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(5, 25),
+};
+static const unsigned int msiof0_sync_c_mux[] = {
+       MSIOF0_SYNC_C_MARK,
+};
+static const unsigned int msiof0_ss1_c_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(5, 27),
+};
+static const unsigned int msiof0_ss1_c_mux[] = {
+       MSIOF0_SS1_C_MARK,
+};
+static const unsigned int msiof0_ss2_c_pins[] = {
+       /* SS2 */
+       RCAR_GP_PIN(5, 28),
+};
+static const unsigned int msiof0_ss2_c_mux[] = {
+       MSIOF0_SS2_C_MARK,
+};
+static const unsigned int msiof0_rx_c_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(5, 29),
+};
+static const unsigned int msiof0_rx_c_mux[] = {
+       MSIOF0_RXD_C_MARK,
+};
+static const unsigned int msiof0_tx_c_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(5, 30),
+};
+static const unsigned int msiof0_tx_c_mux[] = {
+       MSIOF0_TXD_C_MARK,
+};
 /* - MSIOF1 ----------------------------------------------------------------- */
 static const unsigned int msiof1_clk_pins[] = {
        /* SCK */
@@ -2094,6 +2224,143 @@ static const unsigned int msiof1_tx_pins[] = {
 static const unsigned int msiof1_tx_mux[] = {
        MSIOF1_TXD_MARK,
 };
+
+static const unsigned int msiof1_clk_b_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(2, 29),
+};
+static const unsigned int msiof1_clk_b_mux[] = {
+       MSIOF1_SCK_B_MARK,
+};
+static const unsigned int msiof1_sync_b_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(2, 30),
+};
+static const unsigned int msiof1_sync_b_mux[] = {
+       MSIOF1_SYNC_B_MARK,
+};
+static const unsigned int msiof1_ss1_b_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(2, 31),
+};
+static const unsigned int msiof1_ss1_b_mux[] = {
+       MSIOF1_SS1_B_MARK,
+};
+static const unsigned int msiof1_ss2_b_pins[] = {
+       /* SS2 */
+       RCAR_GP_PIN(7, 16),
+};
+static const unsigned int msiof1_ss2_b_mux[] = {
+       MSIOF1_SS2_B_MARK,
+};
+static const unsigned int msiof1_rx_b_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(7, 18),
+};
+static const unsigned int msiof1_rx_b_mux[] = {
+       MSIOF1_RXD_B_MARK,
+};
+static const unsigned int msiof1_tx_b_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(7, 17),
+};
+static const unsigned int msiof1_tx_b_mux[] = {
+       MSIOF1_TXD_B_MARK,
+};
+
+static const unsigned int msiof1_clk_c_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(2, 15),
+};
+static const unsigned int msiof1_clk_c_mux[] = {
+       MSIOF1_SCK_C_MARK,
+};
+static const unsigned int msiof1_sync_c_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(2, 16),
+};
+static const unsigned int msiof1_sync_c_mux[] = {
+       MSIOF1_SYNC_C_MARK,
+};
+static const unsigned int msiof1_rx_c_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(2, 18),
+};
+static const unsigned int msiof1_rx_c_mux[] = {
+       MSIOF1_RXD_C_MARK,
+};
+static const unsigned int msiof1_tx_c_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(2, 17),
+};
+static const unsigned int msiof1_tx_c_mux[] = {
+       MSIOF1_TXD_C_MARK,
+};
+
+static const unsigned int msiof1_clk_d_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(0, 28),
+};
+static const unsigned int msiof1_clk_d_mux[] = {
+       MSIOF1_SCK_D_MARK,
+};
+static const unsigned int msiof1_sync_d_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(0, 30),
+};
+static const unsigned int msiof1_sync_d_mux[] = {
+       MSIOF1_SYNC_D_MARK,
+};
+static const unsigned int msiof1_ss1_d_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(0, 29),
+};
+static const unsigned int msiof1_ss1_d_mux[] = {
+       MSIOF1_SS1_D_MARK,
+};
+static const unsigned int msiof1_rx_d_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(0, 27),
+};
+static const unsigned int msiof1_rx_d_mux[] = {
+       MSIOF1_RXD_D_MARK,
+};
+static const unsigned int msiof1_tx_d_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(0, 26),
+};
+static const unsigned int msiof1_tx_d_mux[] = {
+       MSIOF1_TXD_D_MARK,
+};
+
+static const unsigned int msiof1_clk_e_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(5, 18),
+};
+static const unsigned int msiof1_clk_e_mux[] = {
+       MSIOF1_SCK_E_MARK,
+};
+static const unsigned int msiof1_sync_e_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(5, 19),
+};
+static const unsigned int msiof1_sync_e_mux[] = {
+       MSIOF1_SYNC_E_MARK,
+};
+static const unsigned int msiof1_rx_e_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof1_rx_e_mux[] = {
+       MSIOF1_RXD_E_MARK,
+};
+static const unsigned int msiof1_tx_e_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(5, 20),
+};
+static const unsigned int msiof1_tx_e_mux[] = {
+       MSIOF1_TXD_E_MARK,
+};
 /* - MSIOF2 ----------------------------------------------------------------- */
 static const unsigned int msiof2_clk_pins[] = {
        /* SCK */
@@ -2137,6 +2404,197 @@ static const unsigned int msiof2_tx_pins[] = {
 static const unsigned int msiof2_tx_mux[] = {
        MSIOF2_TXD_MARK,
 };
+
+static const unsigned int msiof2_clk_b_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(3, 0),
+};
+static const unsigned int msiof2_clk_b_mux[] = {
+       MSIOF2_SCK_B_MARK,
+};
+static const unsigned int msiof2_sync_b_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(3, 1),
+};
+static const unsigned int msiof2_sync_b_mux[] = {
+       MSIOF2_SYNC_B_MARK,
+};
+static const unsigned int msiof2_ss1_b_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(3, 8),
+};
+static const unsigned int msiof2_ss1_b_mux[] = {
+       MSIOF2_SS1_B_MARK,
+};
+static const unsigned int msiof2_ss2_b_pins[] = {
+       /* SS2 */
+       RCAR_GP_PIN(3, 9),
+};
+static const unsigned int msiof2_ss2_b_mux[] = {
+       MSIOF2_SS2_B_MARK,
+};
+static const unsigned int msiof2_rx_b_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(3, 17),
+};
+static const unsigned int msiof2_rx_b_mux[] = {
+       MSIOF2_RXD_B_MARK,
+};
+static const unsigned int msiof2_tx_b_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(3, 16),
+};
+static const unsigned int msiof2_tx_b_mux[] = {
+       MSIOF2_TXD_B_MARK,
+};
+
+static const unsigned int msiof2_clk_c_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(2, 2),
+};
+static const unsigned int msiof2_clk_c_mux[] = {
+       MSIOF2_SCK_C_MARK,
+};
+static const unsigned int msiof2_sync_c_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(2, 3),
+};
+static const unsigned int msiof2_sync_c_mux[] = {
+       MSIOF2_SYNC_C_MARK,
+};
+static const unsigned int msiof2_rx_c_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(2, 5),
+};
+static const unsigned int msiof2_rx_c_mux[] = {
+       MSIOF2_RXD_C_MARK,
+};
+static const unsigned int msiof2_tx_c_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(2, 4),
+};
+static const unsigned int msiof2_tx_c_mux[] = {
+       MSIOF2_TXD_C_MARK,
+};
+
+static const unsigned int msiof2_clk_d_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(2, 14),
+};
+static const unsigned int msiof2_clk_d_mux[] = {
+       MSIOF2_SCK_D_MARK,
+};
+static const unsigned int msiof2_sync_d_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(2, 15),
+};
+static const unsigned int msiof2_sync_d_mux[] = {
+       MSIOF2_SYNC_D_MARK,
+};
+static const unsigned int msiof2_ss1_d_pins[] = {
+       /* SS1 */
+       RCAR_GP_PIN(2, 17),
+};
+static const unsigned int msiof2_ss1_d_mux[] = {
+       MSIOF2_SS1_D_MARK,
+};
+static const unsigned int msiof2_ss2_d_pins[] = {
+       /* SS2 */
+       RCAR_GP_PIN(2, 19),
+};
+static const unsigned int msiof2_ss2_d_mux[] = {
+       MSIOF2_SS2_D_MARK,
+};
+static const unsigned int msiof2_rx_d_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(2, 18),
+};
+static const unsigned int msiof2_rx_d_mux[] = {
+       MSIOF2_RXD_D_MARK,
+};
+static const unsigned int msiof2_tx_d_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(2, 16),
+};
+static const unsigned int msiof2_tx_d_mux[] = {
+       MSIOF2_TXD_D_MARK,
+};
+
+static const unsigned int msiof2_clk_e_pins[] = {
+       /* SCK */
+       RCAR_GP_PIN(7, 15),
+};
+static const unsigned int msiof2_clk_e_mux[] = {
+       MSIOF2_SCK_E_MARK,
+};
+static const unsigned int msiof2_sync_e_pins[] = {
+       /* SYNC */
+       RCAR_GP_PIN(7, 16),
+};
+static const unsigned int msiof2_sync_e_mux[] = {
+       MSIOF2_SYNC_E_MARK,
+};
+static const unsigned int msiof2_rx_e_pins[] = {
+       /* RXD */
+       RCAR_GP_PIN(7, 14),
+};
+static const unsigned int msiof2_rx_e_mux[] = {
+       MSIOF2_RXD_E_MARK,
+};
+static const unsigned int msiof2_tx_e_pins[] = {
+       /* TXD */
+       RCAR_GP_PIN(7, 13),
+};
+static const unsigned int msiof2_tx_e_mux[] = {
+       MSIOF2_TXD_E_MARK,
+};
+/* - QSPI ------------------------------------------------------------------- */
+static const unsigned int qspi_ctrl_pins[] = {
+       /* SPCLK, SSL */
+       RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int qspi_ctrl_mux[] = {
+       SPCLK_MARK, SSL_MARK,
+};
+static const unsigned int qspi_data2_pins[] = {
+       /* MOSI_IO0, MISO_IO1 */
+       RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6),
+};
+static const unsigned int qspi_data2_mux[] = {
+       MOSI_IO0_MARK, MISO_IO1_MARK,
+};
+static const unsigned int qspi_data4_pins[] = {
+       /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+       RCAR_GP_PIN(1, 5), RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+       RCAR_GP_PIN(1, 8),
+};
+static const unsigned int qspi_data4_mux[] = {
+       MOSI_IO0_MARK, MISO_IO1_MARK, IO2_MARK, IO3_MARK,
+};
+
+static const unsigned int qspi_ctrl_b_pins[] = {
+       /* SPCLK, SSL */
+       RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 5),
+};
+static const unsigned int qspi_ctrl_b_mux[] = {
+       SPCLK_B_MARK, SSL_B_MARK,
+};
+static const unsigned int qspi_data2_b_pins[] = {
+       /* MOSI_IO0, MISO_IO1 */
+       RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2),
+};
+static const unsigned int qspi_data2_b_mux[] = {
+       MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
+};
+static const unsigned int qspi_data4_b_pins[] = {
+       /* MOSI_IO0, MISO_IO1, IO2, IO3 */
+       RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2), RCAR_GP_PIN(6, 3),
+       RCAR_GP_PIN(6, 4),
+};
+static const unsigned int qspi_data4_b_mux[] = {
+       SPCLK_B_MARK, MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
+       IO2_B_MARK, IO3_B_MARK, SSL_B_MARK,
+};
 /* - SCIF0 ------------------------------------------------------------------ */
 static const unsigned int scif0_data_pins[] = {
        /* RX, TX */
@@ -3125,6 +3583,12 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(i2c4),
        SH_PFC_PIN_GROUP(i2c4_b),
        SH_PFC_PIN_GROUP(i2c4_c),
+       SH_PFC_PIN_GROUP(i2c7),
+       SH_PFC_PIN_GROUP(i2c7_b),
+       SH_PFC_PIN_GROUP(i2c7_c),
+       SH_PFC_PIN_GROUP(i2c8),
+       SH_PFC_PIN_GROUP(i2c8_b),
+       SH_PFC_PIN_GROUP(i2c8_c),
        SH_PFC_PIN_GROUP(intc_irq0),
        SH_PFC_PIN_GROUP(intc_irq1),
        SH_PFC_PIN_GROUP(intc_irq2),
@@ -3139,18 +3603,75 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
        SH_PFC_PIN_GROUP(msiof0_ss2),
        SH_PFC_PIN_GROUP(msiof0_rx),
        SH_PFC_PIN_GROUP(msiof0_tx),
+       SH_PFC_PIN_GROUP(msiof0_clk_b),
+       SH_PFC_PIN_GROUP(msiof0_sync_b),
+       SH_PFC_PIN_GROUP(msiof0_ss1_b),
+       SH_PFC_PIN_GROUP(msiof0_ss2_b),
+       SH_PFC_PIN_GROUP(msiof0_rx_b),
+       SH_PFC_PIN_GROUP(msiof0_tx_b),
+       SH_PFC_PIN_GROUP(msiof0_clk_c),
+       SH_PFC_PIN_GROUP(msiof0_sync_c),
+       SH_PFC_PIN_GROUP(msiof0_ss1_c),
+       SH_PFC_PIN_GROUP(msiof0_ss2_c),
+       SH_PFC_PIN_GROUP(msiof0_rx_c),
+       SH_PFC_PIN_GROUP(msiof0_tx_c),
        SH_PFC_PIN_GROUP(msiof1_clk),
        SH_PFC_PIN_GROUP(msiof1_sync),
        SH_PFC_PIN_GROUP(msiof1_ss1),
        SH_PFC_PIN_GROUP(msiof1_ss2),
        SH_PFC_PIN_GROUP(msiof1_rx),
        SH_PFC_PIN_GROUP(msiof1_tx),
+       SH_PFC_PIN_GROUP(msiof1_clk_b),
+       SH_PFC_PIN_GROUP(msiof1_sync_b),
+       SH_PFC_PIN_GROUP(msiof1_ss1_b),
+       SH_PFC_PIN_GROUP(msiof1_ss2_b),
+       SH_PFC_PIN_GROUP(msiof1_rx_b),
+       SH_PFC_PIN_GROUP(msiof1_tx_b),
+       SH_PFC_PIN_GROUP(msiof1_clk_c),
+       SH_PFC_PIN_GROUP(msiof1_sync_c),
+       SH_PFC_PIN_GROUP(msiof1_rx_c),
+       SH_PFC_PIN_GROUP(msiof1_tx_c),
+       SH_PFC_PIN_GROUP(msiof1_clk_d),
+       SH_PFC_PIN_GROUP(msiof1_sync_d),
+       SH_PFC_PIN_GROUP(msiof1_ss1_d),
+       SH_PFC_PIN_GROUP(msiof1_rx_d),
+       SH_PFC_PIN_GROUP(msiof1_tx_d),
+       SH_PFC_PIN_GROUP(msiof1_clk_e),
+       SH_PFC_PIN_GROUP(msiof1_sync_e),
+       SH_PFC_PIN_GROUP(msiof1_rx_e),
+       SH_PFC_PIN_GROUP(msiof1_tx_e),
        SH_PFC_PIN_GROUP(msiof2_clk),
        SH_PFC_PIN_GROUP(msiof2_sync),
        SH_PFC_PIN_GROUP(msiof2_ss1),
        SH_PFC_PIN_GROUP(msiof2_ss2),
        SH_PFC_PIN_GROUP(msiof2_rx),
        SH_PFC_PIN_GROUP(msiof2_tx),
+       SH_PFC_PIN_GROUP(msiof2_clk_b),
+       SH_PFC_PIN_GROUP(msiof2_sync_b),
+       SH_PFC_PIN_GROUP(msiof2_ss1_b),
+       SH_PFC_PIN_GROUP(msiof2_ss2_b),
+       SH_PFC_PIN_GROUP(msiof2_rx_b),
+       SH_PFC_PIN_GROUP(msiof2_tx_b),
+       SH_PFC_PIN_GROUP(msiof2_clk_c),
+       SH_PFC_PIN_GROUP(msiof2_sync_c),
+       SH_PFC_PIN_GROUP(msiof2_rx_c),
+       SH_PFC_PIN_GROUP(msiof2_tx_c),
+       SH_PFC_PIN_GROUP(msiof2_clk_d),
+       SH_PFC_PIN_GROUP(msiof2_sync_d),
+       SH_PFC_PIN_GROUP(msiof2_ss1_d),
+       SH_PFC_PIN_GROUP(msiof2_ss2_d),
+       SH_PFC_PIN_GROUP(msiof2_rx_d),
+       SH_PFC_PIN_GROUP(msiof2_tx_d),
+       SH_PFC_PIN_GROUP(msiof2_clk_e),
+       SH_PFC_PIN_GROUP(msiof2_sync_e),
+       SH_PFC_PIN_GROUP(msiof2_rx_e),
+       SH_PFC_PIN_GROUP(msiof2_tx_e),
+       SH_PFC_PIN_GROUP(qspi_ctrl),
+       SH_PFC_PIN_GROUP(qspi_data2),
+       SH_PFC_PIN_GROUP(qspi_data4),
+       SH_PFC_PIN_GROUP(qspi_ctrl_b),
+       SH_PFC_PIN_GROUP(qspi_data2_b),
+       SH_PFC_PIN_GROUP(qspi_data4_b),
        SH_PFC_PIN_GROUP(scif0_data),
        SH_PFC_PIN_GROUP(scif0_data_b),
        SH_PFC_PIN_GROUP(scif0_data_c),
@@ -3337,6 +3858,18 @@ static const char * const i2c4_groups[] = {
        "i2c4_c",
 };
 
+static const char * const i2c7_groups[] = {
+       "i2c7",
+       "i2c7_b",
+       "i2c7_c",
+};
+
+static const char * const i2c8_groups[] = {
+       "i2c8",
+       "i2c8_b",
+       "i2c8_c",
+};
+
 static const char * const intc_groups[] = {
        "intc_irq0",
        "intc_irq1",
@@ -3358,6 +3891,18 @@ static const char * const msiof0_groups[] = {
        "msiof0_ss2",
        "msiof0_rx",
        "msiof0_tx",
+       "msiof0_clk_b",
+       "msiof0_sync_b",
+       "msiof0_ss1_b",
+       "msiof0_ss2_b",
+       "msiof0_rx_b",
+       "msiof0_tx_b",
+       "msiof0_clk_c",
+       "msiof0_sync_c",
+       "msiof0_ss1_c",
+       "msiof0_ss2_c",
+       "msiof0_rx_c",
+       "msiof0_tx_c",
 };
 
 static const char * const msiof1_groups[] = {
@@ -3367,6 +3912,25 @@ static const char * const msiof1_groups[] = {
        "msiof1_ss2",
        "msiof1_rx",
        "msiof1_tx",
+       "msiof1_clk_b",
+       "msiof1_sync_b",
+       "msiof1_ss1_b",
+       "msiof1_ss2_b",
+       "msiof1_rx_b",
+       "msiof1_tx_b",
+       "msiof1_clk_c",
+       "msiof1_sync_c",
+       "msiof1_rx_c",
+       "msiof1_tx_c",
+       "msiof1_clk_d",
+       "msiof1_sync_d",
+       "msiof1_ss1_d",
+       "msiof1_rx_d",
+       "msiof1_tx_d",
+       "msiof1_clk_e",
+       "msiof1_sync_e",
+       "msiof1_rx_e",
+       "msiof1_tx_e",
 };
 
 static const char * const msiof2_groups[] = {
@@ -3376,6 +3940,35 @@ static const char * const msiof2_groups[] = {
        "msiof2_ss2",
        "msiof2_rx",
        "msiof2_tx",
+       "msiof2_clk_b",
+       "msiof2_sync_b",
+       "msiof2_ss1_b",
+       "msiof2_ss2_b",
+       "msiof2_rx_b",
+       "msiof2_tx_b",
+       "msiof2_clk_c",
+       "msiof2_sync_c",
+       "msiof2_rx_c",
+       "msiof2_tx_c",
+       "msiof2_clk_d",
+       "msiof2_sync_d",
+       "msiof2_ss1_d",
+       "msiof2_ss2_d",
+       "msiof2_rx_d",
+       "msiof2_tx_d",
+       "msiof2_clk_e",
+       "msiof2_sync_e",
+       "msiof2_rx_e",
+       "msiof2_tx_e",
+};
+
+static const char * const qspi_groups[] = {
+       "qspi_ctrl",
+       "qspi_data2",
+       "qspi_data4",
+       "qspi_ctrl_b",
+       "qspi_data2_b",
+       "qspi_data4_b",
 };
 
 static const char * const scif0_groups[] = {
@@ -3568,11 +4161,14 @@ static const struct sh_pfc_function pinmux_functions[] = {
        SH_PFC_FUNCTION(i2c2),
        SH_PFC_FUNCTION(i2c3),
        SH_PFC_FUNCTION(i2c4),
+       SH_PFC_FUNCTION(i2c7),
+       SH_PFC_FUNCTION(i2c8),
        SH_PFC_FUNCTION(intc),
        SH_PFC_FUNCTION(mmc),
        SH_PFC_FUNCTION(msiof0),
        SH_PFC_FUNCTION(msiof1),
        SH_PFC_FUNCTION(msiof2),
+       SH_PFC_FUNCTION(qspi),
        SH_PFC_FUNCTION(scif0),
        SH_PFC_FUNCTION(scif1),
        SH_PFC_FUNCTION(scif2),
index 2b9f32065920a44f68cb2f7e7e958c7598bd4ced..c4dd3d5cf9c35875322ea943b7f7ad9665ab637a 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * pinctrl pads, groups, functions for CSR SiRFatlasVI
  *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
  *
  * Licensed under GPLv2 or later.
  */
@@ -529,6 +530,40 @@ static const struct sirfsoc_padmux usp0_padmux = {
 
 static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
 
+static const struct sirfsoc_muxmask usp0_only_utfs_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
+       },
+};
+
+static const struct sirfsoc_padmux usp0_only_utfs_padmux = {
+       .muxmask_counts = ARRAY_SIZE(usp0_only_utfs_muxmask),
+       .muxmask = usp0_only_utfs_muxmask,
+       .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+       .funcmask = BIT(1) | BIT(2) | BIT(6),
+       .funcval = 0,
+};
+
+static const unsigned usp0_only_utfs_pins[] = { 51, 52, 53, 54 };
+
+static const struct sirfsoc_muxmask usp0_only_urfs_muxmask[] = {
+       {
+               .group = 1,
+               .mask = BIT(19) | BIT(20) | BIT(21) | BIT(23),
+       },
+};
+
+static const struct sirfsoc_padmux usp0_only_urfs_padmux = {
+       .muxmask_counts = ARRAY_SIZE(usp0_only_urfs_muxmask),
+       .muxmask = usp0_only_urfs_muxmask,
+       .ctrlreg = SIRFSOC_RSC_PIN_MUX,
+       .funcmask = BIT(1) | BIT(2) | BIT(9),
+       .funcval = 0,
+};
+
+static const unsigned usp0_only_urfs_pins[] = { 51, 52, 53, 55 };
+
 static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
        {
                .group = 1,
@@ -905,6 +940,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
        SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
        SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
                                        usp0_uart_nostreamctrl_pins),
+       SIRFSOC_PIN_GROUP("usp0_only_utfs_grp", usp0_only_utfs_pins),
+       SIRFSOC_PIN_GROUP("usp0_only_urfs_grp", usp0_only_urfs_pins),
        SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
        SIRFSOC_PIN_GROUP("usp1_uart_nostreamctrl_grp",
                                        usp1_uart_nostreamctrl_pins),
@@ -953,6 +990,9 @@ static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
 static const char * const usp0_uart_nostreamctrl_grp[] = {
                                        "usp0_uart_nostreamctrl_grp" };
 static const char * const usp0grp[] = { "usp0grp" };
+static const char * const usp0_only_utfs_grp[] = { "usp0_only_utfs_grp" };
+static const char * const usp0_only_urfs_grp[] = { "usp0_only_urfs_grp" };
+
 static const char * const usp1grp[] = { "usp1grp" };
 static const char * const usp1_uart_nostreamctrl_grp[] = {
                                        "usp1_uart_nostreamctrl_grp" };
@@ -1003,6 +1043,10 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
        SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
                                                usp0_uart_nostreamctrl_grp,
                                                usp0_uart_nostreamctrl_padmux),
+       SIRFSOC_PMX_FUNCTION("usp0_only_utfs", usp0_only_utfs_grp,
+                                               usp0_only_utfs_padmux),
+       SIRFSOC_PMX_FUNCTION("usp0_only_urfs", usp0_only_urfs_grp,
+                                               usp0_only_urfs_padmux),
        SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
        SIRFSOC_PMX_FUNCTION("usp1_uart_nostreamctrl",
                                                usp1_uart_nostreamctrl_grp,
index dde0285544d6ad95a6c392955df3ce976c3e11a4..8aa76f0776d777ee5beb5598d7f80a354b23caed 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * pinctrl pads, groups, functions for CSR SiRFprimaII
  *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
  *
  * Licensed under GPLv2 or later.
  */
index 617a4916b50fc1d8fec129e001f717c00de13880..5f3adb87c1efe8f3670b9c79d2f5db78671260d8 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * pinmux driver for CSR SiRFprimaII
  *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ * Copyright (c) 2011 - 2014 Cambridge Silicon Radio Limited, a CSR plc group
+ * company.
  *
  * Licensed under GPLv2 or later.
  */
index 5ae65c11d544d4feb9affb727b6e00bec4b13894..5f67843c7fb7388c158ade858a90c18c61f8f2c7 100644 (file)
@@ -27,8 +27,6 @@ config ACER_WMI
        depends on ACPI_WMI
        select INPUT_SPARSEKMAP
        # Acer WMI depends on ACPI_VIDEO when ACPI is enabled
-       # but for select to work, need to select ACPI_VIDEO's dependencies, ick
-        select VIDEO_OUTPUT_CONTROL if ACPI
         select ACPI_VIDEO if ACPI
        ---help---
          This is a driver for newer Acer (and Wistron) laptops. It adds
index be02bcc346d30cb9dc17fff807aa414d0147216a..e6f336270c2191fd9cd1418f869b5aa4dcc499de 100644 (file)
@@ -66,7 +66,6 @@
 #include <linux/backlight.h>
 #include <linux/input.h>
 #include <linux/kfifo.h>
-#include <linux/video_output.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
index 769d265b221b9c3abee2b8ea86a1d84ef00ac795..deb7f4bcdb7b6b6a770ce08f1d2535dccfa82100 100644 (file)
@@ -21,7 +21,7 @@
 
 #include "pnpbios.h"
 
-static struct {
+__visible struct {
        u16 offset;
        u16 segment;
 } pnp_bios_callpoint;
@@ -41,6 +41,7 @@ asmlinkage void pnp_bios_callfunc(void);
 
 __asm__(".text                 \n"
        __ALIGN_STR "\n"
+       ".globl pnp_bios_callfunc\n"
        "pnp_bios_callfunc:\n"
        "       pushl %edx      \n"
        "       pushl %ecx      \n"
@@ -66,9 +67,9 @@ static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
  * after PnP BIOS oopses.
  */
 
-u32 pnp_bios_fault_esp;
-u32 pnp_bios_fault_eip;
-u32 pnp_bios_is_utter_crap = 0;
+__visible u32 pnp_bios_fault_esp;
+__visible u32 pnp_bios_fault_eip;
+__visible u32 pnp_bios_is_utter_crap = 0;
 
 static spinlock_t pnp_bios_lock;
 
index 3c6768378a94600bc487c61bc9d3fffcbf581750..61b51e17d932a5c81db81fd99f46c7411dcf79c6 100644 (file)
@@ -834,7 +834,7 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
 }
 
 static const struct x86_cpu_id energy_unit_quirk_ids[] = {
-       { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+       { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
        {}
 };
 
@@ -947,11 +947,11 @@ static void package_power_limit_irq_restore(int package_id)
 }
 
 static const struct x86_cpu_id rapl_ids[] = {
-       { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
-       { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
-       { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
-       { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
-       { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+       { X86_VENDOR_INTEL, 6, 0x2a},/* Sandy Bridge */
+       { X86_VENDOR_INTEL, 6, 0x2d},/* Sandy Bridge EP */
+       { X86_VENDOR_INTEL, 6, 0x37},/* Valleyview */
+       { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */
+       { X86_VENDOR_INTEL, 6, 0x45},/* Haswell */
        /* TODO: Add more CPU IDs after testing */
        {}
 };
@@ -1147,6 +1147,11 @@ static int rapl_check_domain(int cpu, int domain)
        if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
                return -ENODEV;
 
+       /* PP1/uncore/graphics domain may not be active at the time of
+        * driver loading. So skip further checks.
+        */
+       if (domain == RAPL_DOMAIN_PP1)
+               return 0;
        /* energy counters roll slowly on some domains */
        while (++retry < 10) {
                usleep_range(10000, 15000);
index fb7300837feef25a6b2a80de582baf01031bf22a..bc1e5139ba2957712e9800182fe6d7a24c568468 100644 (file)
@@ -699,8 +699,6 @@ int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes)
 
        BUG_ON(!bytes);
 
-       PREPARE_WORK(&priv->rx_list.work.work, ps3_vuart_work);
-
        spin_lock_irqsave(&priv->rx_list.lock, flags);
        if (priv->rx_list.bytes_held >= bytes) {
                dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n",
@@ -1052,7 +1050,7 @@ static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
        INIT_LIST_HEAD(&priv->rx_list.head);
        spin_lock_init(&priv->rx_list.lock);
 
-       INIT_WORK(&priv->rx_list.work.work, NULL);
+       INIT_WORK(&priv->rx_list.work.work, ps3_vuart_work);
        priv->rx_list.work.trigger = 0;
        priv->rx_list.work.dev = dev;
 
index d333f7eac106f1ae9cab88f0850f2fc4ca03c19f..7a721d67e6aca8ce8425ebc0e29477009fbe9c6c 100644 (file)
@@ -310,10 +310,8 @@ static int pm800_regulator_probe(struct platform_device *pdev)
 
        pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
                                        GFP_KERNEL);
-       if (!pm800_data) {
-               dev_err(&pdev->dev, "Failed to allocate pm800_regualtors");
+       if (!pm800_data)
                return -ENOMEM;
-       }
 
        pm800_data->map = chip->subchip->regmap_power;
        pm800_data->chip = chip;
index f704d83c93c4a95fc729cc48c510611d4b5cd33f..337634ad0562449495c12fb4e39efd989f85edef 100644 (file)
@@ -2,7 +2,7 @@
  * Regulators driver for Marvell 88PM8607
  *
  * Copyright (C) 2009 Marvell International Ltd.
- *     Haojian Zhuang <haojian.zhuang@marvell.com>
+ *     Haojian Zhuang <haojian.zhuang@marvell.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -78,7 +78,7 @@ static const unsigned int BUCK2_suspend_table[] = {
 };
 
 static const unsigned int BUCK3_table[] = {
-              0,   25000,   50000,   75000,  100000,  125000,  150000,  175000,
+             0,   25000,   50000,   75000,  100000,  125000,  150000,  175000,
         200000,  225000,  250000,  275000,  300000,  325000,  350000,  375000,
         400000,  425000,  450000,  475000,  500000,  525000,  550000,  575000,
         600000,  625000,  650000,  675000,  700000,  725000,  750000,  775000,
@@ -89,7 +89,7 @@ static const unsigned int BUCK3_table[] = {
 };
 
 static const unsigned int BUCK3_suspend_table[] = {
-              0,   25000,   50000,   75000,  100000,  125000,  150000,  175000,
+             0,   25000,   50000,   75000,  100000,  125000,  150000,  175000,
         200000,  225000,  250000,  275000,  300000,  325000,  350000,  375000,
         400000,  425000,  450000,  475000,  500000,  525000,  550000,  575000,
         600000,  625000,  650000,  675000,  700000,  725000,  750000,  775000,
@@ -322,7 +322,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
        nproot = of_node_get(pdev->dev.parent->of_node);
        if (!nproot)
                return -ENODEV;
-       nproot = of_find_node_by_name(nproot, "regulators");
+       nproot = of_get_child_by_name(nproot, "regulators");
        if (!nproot) {
                dev_err(&pdev->dev, "failed to find regulators node\n");
                return -ENODEV;
index 6a7932822e373317caba9c5c799c9439f5444927..1cd8584a7b887ffd9fd8fd0089c8148dba595064 100644 (file)
@@ -139,6 +139,14 @@ config REGULATOR_AS3722
          AS3722 PMIC. This will enable support for all the software
          controllable DCDC/LDO regulators.
 
+config REGULATOR_BCM590XX
+       tristate "Broadcom BCM590xx PMU Regulators"
+       depends on MFD_BCM590XX
+       help
+         This driver provides support for the voltage regulators on the
+         BCM590xx PMUs. This will enable support for the software
+         controllable LDO/Switching regulators.
+
 config REGULATOR_DA903X
        tristate "Dialog Semiconductor DA9030/DA9034 regulators"
        depends on PMIC_DA903X
@@ -399,12 +407,12 @@ config REGULATOR_PCF50633
         on PCF50633
 
 config REGULATOR_PFUZE100
-       tristate "Freescale PFUZE100 regulator driver"
+       tristate "Freescale PFUZE100/PFUZE200 regulator driver"
        depends on I2C
        select REGMAP_I2C
        help
-         Say y here to support the regulators found on the Freescale PFUZE100
-         PMIC.
+         Say y here to support the regulators found on the Freescale
+         PFUZE100/PFUZE200 PMIC.
 
 config REGULATOR_RC5T583
        tristate "RICOH RC5T583 Power regulators"
@@ -416,13 +424,21 @@ config REGULATOR_RC5T583
          through regulator interface. The device supports multiple DCDC/LDO
          outputs which can be controlled by i2c communication.
 
+config REGULATOR_S2MPA01
+       tristate "Samsung S2MPA01 voltage regulator"
+       depends on MFD_SEC_CORE
+       help
+        This driver controls Samsung S2MPA01 voltage output regulator
+        via I2C bus. S2MPA01 has 10 Bucks and 26 LDO outputs.
+
 config REGULATOR_S2MPS11
-       tristate "Samsung S2MPS11 voltage regulator"
+       tristate "Samsung S2MPS11/S2MPS14 voltage regulator"
        depends on MFD_SEC_CORE
        help
-        This driver supports a Samsung S2MPS11 voltage output regulator
-        via I2C bus. S2MPS11 is comprised of high efficient Buck converters
-        including Dual-Phase Buck converter, Buck-Boost converter, various LDOs.
+        This driver supports a Samsung S2MPS11/S2MPS14 voltage output
+        regulator via I2C bus. The chip is comprised of high efficient Buck
+        converters including Dual-Phase Buck converter, Buck-Boost converter,
+        various LDOs.
 
 config REGULATOR_S5M8767
        tristate "Samsung S5M8767A voltage regulator"
@@ -432,6 +448,12 @@ config REGULATOR_S5M8767
         via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
         supports DVS mode with 8bits of output voltage control.
 
+config REGULATOR_ST_PWM
+       tristate "STMicroelectronics PWM voltage regulator"
+       depends on ARCH_STI
+       help
+        This driver supports ST's PWM controlled voltage regulators.
+
 config REGULATOR_TI_ABB
        tristate "TI Adaptive Body Bias on-chip LDO"
        depends on ARCH_OMAP
@@ -513,6 +535,15 @@ config REGULATOR_TPS65217
          voltage regulators. It supports software based voltage control
          for different voltage domains
 
+config REGULATOR_TPS65218
+       tristate "TI TPS65218 Power regulators"
+       depends on MFD_TPS65218 && OF
+       help
+         This driver supports TPS65218 voltage regulator chips. TPS65218
+         provides six step-down converters and one general-purpose LDO
+         voltage regulators. It supports software based voltage control
+         for different voltage domains
+
 config REGULATOR_TPS6524X
        tristate "TI TPS6524X Power regulators"
        depends on SPI
index 979f9ddcf259bd5a82b6b5d91f81981902e6185c..f0fe0c50b59c23ffc36680536f17bf0486e4226d 100644 (file)
@@ -20,6 +20,7 @@ obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
 obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
 obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
 obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o
+obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o
 obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
 obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
 obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
@@ -57,8 +58,10 @@ obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
 obj-$(CONFIG_REGULATOR_RC5T583)  += rc5t583-regulator.o
+obj-$(CONFIG_REGULATOR_S2MPA01) += s2mpa01.o
 obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
 obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_ST_PWM) += st-pwm.o
 obj-$(CONFIG_REGULATOR_STW481X_VMMC) += stw481x-vmmc.o
 obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
@@ -67,6 +70,7 @@ obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65090) += tps65090-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65217) += tps65217-regulator.o
+obj-$(CONFIG_REGULATOR_TPS65218) += tps65218-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
 obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
index f70a9bfa5ff2e9b4845d9493e3cbda9cb012e042..c873ee0082cf2d83e79d678efe2c662561c53e81 100644 (file)
@@ -99,6 +99,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
 
 static struct regulator_ops aat2870_ldo_ops = {
        .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_ascend,
        .set_voltage_sel = aat2870_ldo_set_voltage_sel,
        .get_voltage_sel = aat2870_ldo_get_voltage_sel,
        .enable = aat2870_ldo_enable,
index 084cc0819a52f95a24c1f6c2bed63c1f416d5532..b92d7dd01a1899356dff99641164380c693a3911 100644 (file)
@@ -62,7 +62,6 @@
 #define        ACT8865_VOLTAGE_NUM     64
 
 struct act8865 {
-       struct regulator_dev *rdev[ACT8865_REG_NUM];
        struct regmap *regmap;
 };
 
@@ -213,7 +212,7 @@ static int act8865_pdata_from_dt(struct device *dev,
        struct device_node *np;
        struct act8865_regulator_data *regulator;
 
-       np = of_find_node_by_name(dev->of_node, "regulators");
+       np = of_get_child_by_name(dev->of_node, "regulators");
        if (!np) {
                dev_err(dev, "missing 'regulators' subnode in DT\n");
                return -EINVAL;
@@ -221,17 +220,15 @@ static int act8865_pdata_from_dt(struct device *dev,
 
        matched = of_regulator_match(dev, np,
                                act8865_matches, ARRAY_SIZE(act8865_matches));
+       of_node_put(np);
        if (matched <= 0)
                return matched;
 
        pdata->regulators = devm_kzalloc(dev,
                                sizeof(struct act8865_regulator_data) *
                                ARRAY_SIZE(act8865_matches), GFP_KERNEL);
-       if (!pdata->regulators) {
-               dev_err(dev, "%s: failed to allocate act8865 registor\n",
-                                               __func__);
+       if (!pdata->regulators)
                return -ENOMEM;
-       }
 
        pdata->num_regulators = matched;
        regulator = pdata->regulators;
@@ -258,7 +255,7 @@ static inline int act8865_pdata_from_dt(struct device *dev,
 static int act8865_pmic_probe(struct i2c_client *client,
                           const struct i2c_device_id *i2c_id)
 {
-       struct regulator_dev **rdev;
+       struct regulator_dev *rdev;
        struct device *dev = &client->dev;
        struct act8865_platform_data *pdata = dev_get_platdata(dev);
        struct regulator_config config = { };
@@ -292,8 +289,6 @@ static int act8865_pmic_probe(struct i2c_client *client,
        if (!act8865)
                return -ENOMEM;
 
-       rdev = act8865->rdev;
-
        act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
        if (IS_ERR(act8865->regmap)) {
                error = PTR_ERR(act8865->regmap);
@@ -313,12 +308,12 @@ static int act8865_pmic_probe(struct i2c_client *client,
                config.driver_data = act8865;
                config.regmap = act8865->regmap;
 
-               rdev[i] = devm_regulator_register(&client->dev,
-                                               &act8865_reg[i], &config);
-               if (IS_ERR(rdev[i])) {
+               rdev = devm_regulator_register(&client->dev, &act8865_reg[i],
+                                              &config);
+               if (IS_ERR(rdev)) {
                        dev_err(dev, "failed to register %s\n",
                                act8865_reg[id].name);
-                       return PTR_ERR(rdev[i]);
+                       return PTR_ERR(rdev);
                }
        }
 
index 862e63e451d02235a8549bd474c49dbe1829dc4e..7c397bb81e01eb3f8cba61472e4d15b47e16ddc8 100644 (file)
@@ -34,6 +34,9 @@
 #define LDO_RAMP_UP_UNIT_IN_CYCLES      64 /* 64 cycles per step */
 #define LDO_RAMP_UP_FREQ_IN_MHZ         24 /* cycle based on 24M OSC */
 
+#define LDO_POWER_GATE                 0x00
+#define LDO_FET_FULL_ON                        0x1f
+
 struct anatop_regulator {
        const char *name;
        u32 control_reg;
@@ -48,19 +51,10 @@ struct anatop_regulator {
        int max_voltage;
        struct regulator_desc rdesc;
        struct regulator_init_data *initdata;
+       bool bypass;
+       int sel;
 };
 
-static int anatop_regmap_set_voltage_sel(struct regulator_dev *reg,
-                                       unsigned selector)
-{
-       struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
-
-       if (!anatop_reg->control_reg)
-               return -ENOTSUPP;
-
-       return regulator_set_voltage_sel_regmap(reg, selector);
-}
-
 static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
        unsigned int old_sel,
        unsigned int new_sel)
@@ -87,22 +81,99 @@ static int anatop_regmap_set_voltage_time_sel(struct regulator_dev *reg,
        return ret;
 }
 
-static int anatop_regmap_get_voltage_sel(struct regulator_dev *reg)
+static int anatop_regmap_enable(struct regulator_dev *reg)
 {
        struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+       int sel;
 
-       if (!anatop_reg->control_reg)
-               return -ENOTSUPP;
+       sel = anatop_reg->bypass ? LDO_FET_FULL_ON : anatop_reg->sel;
+       return regulator_set_voltage_sel_regmap(reg, sel);
+}
+
+static int anatop_regmap_disable(struct regulator_dev *reg)
+{
+       return regulator_set_voltage_sel_regmap(reg, LDO_POWER_GATE);
+}
+
+static int anatop_regmap_is_enabled(struct regulator_dev *reg)
+{
+       return regulator_get_voltage_sel_regmap(reg) != LDO_POWER_GATE;
+}
+
+static int anatop_regmap_core_set_voltage_sel(struct regulator_dev *reg,
+                                             unsigned selector)
+{
+       struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+       int ret;
+
+       if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg)) {
+               anatop_reg->sel = selector;
+               return 0;
+       }
+
+       ret = regulator_set_voltage_sel_regmap(reg, selector);
+       if (!ret)
+               anatop_reg->sel = selector;
+       return ret;
+}
+
+static int anatop_regmap_core_get_voltage_sel(struct regulator_dev *reg)
+{
+       struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+
+       if (anatop_reg->bypass || !anatop_regmap_is_enabled(reg))
+               return anatop_reg->sel;
 
        return regulator_get_voltage_sel_regmap(reg);
 }
 
+static int anatop_regmap_get_bypass(struct regulator_dev *reg, bool *enable)
+{
+       struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+       int sel;
+
+       sel = regulator_get_voltage_sel_regmap(reg);
+       if (sel == LDO_FET_FULL_ON)
+               WARN_ON(!anatop_reg->bypass);
+       else if (sel != LDO_POWER_GATE)
+               WARN_ON(anatop_reg->bypass);
+
+       *enable = anatop_reg->bypass;
+       return 0;
+}
+
+static int anatop_regmap_set_bypass(struct regulator_dev *reg, bool enable)
+{
+       struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
+       int sel;
+
+       if (enable == anatop_reg->bypass)
+               return 0;
+
+       sel = enable ? LDO_FET_FULL_ON : anatop_reg->sel;
+       anatop_reg->bypass = enable;
+
+       return regulator_set_voltage_sel_regmap(reg, sel);
+}
+
 static struct regulator_ops anatop_rops = {
-       .set_voltage_sel = anatop_regmap_set_voltage_sel,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .list_voltage = regulator_list_voltage_linear,
+       .map_voltage = regulator_map_voltage_linear,
+};
+
+static struct regulator_ops anatop_core_rops = {
+       .enable = anatop_regmap_enable,
+       .disable = anatop_regmap_disable,
+       .is_enabled = anatop_regmap_is_enabled,
+       .set_voltage_sel = anatop_regmap_core_set_voltage_sel,
        .set_voltage_time_sel = anatop_regmap_set_voltage_time_sel,
-       .get_voltage_sel = anatop_regmap_get_voltage_sel,
+       .get_voltage_sel = anatop_regmap_core_get_voltage_sel,
        .list_voltage = regulator_list_voltage_linear,
        .map_voltage = regulator_map_voltage_linear,
+       .get_bypass = anatop_regmap_get_bypass,
+       .set_bypass = anatop_regmap_set_bypass,
 };
 
 static int anatop_regulator_probe(struct platform_device *pdev)
@@ -116,6 +187,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
        struct regulator_init_data *initdata;
        struct regulator_config config = { };
        int ret = 0;
+       u32 val;
 
        initdata = of_get_regulator_init_data(dev, np);
        sreg = devm_kzalloc(dev, sizeof(*sreg), GFP_KERNEL);
@@ -125,7 +197,6 @@ static int anatop_regulator_probe(struct platform_device *pdev)
        sreg->name = of_get_property(np, "regulator-name", NULL);
        rdesc = &sreg->rdesc;
        rdesc->name = sreg->name;
-       rdesc->ops = &anatop_rops;
        rdesc->type = REGULATOR_VOLTAGE;
        rdesc->owner = THIS_MODULE;
 
@@ -197,6 +268,25 @@ static int anatop_regulator_probe(struct platform_device *pdev)
        config.of_node = pdev->dev.of_node;
        config.regmap = sreg->anatop;
 
+       /* Only core regulators have the ramp up delay configuration. */
+       if (sreg->control_reg && sreg->delay_bit_width) {
+               rdesc->ops = &anatop_core_rops;
+
+               ret = regmap_read(config.regmap, rdesc->vsel_reg, &val);
+               if (ret) {
+                       dev_err(dev, "failed to read initial state\n");
+                       return ret;
+               }
+
+               sreg->sel = (val & rdesc->vsel_mask) >> sreg->vol_bit_shift;
+               if (sreg->sel == LDO_FET_FULL_ON) {
+                       sreg->sel = 0;
+                       sreg->bypass = true;
+               }
+       } else {
+               rdesc->ops = &anatop_rops;
+       }
+
        /* register regulator */
        rdev = devm_regulator_register(dev, rdesc, &config);
        if (IS_ERR(rdev)) {
index 4f6c2055f6b210436a5c1a9565b9f302ecdd21da..b1033d30b504c70cd50364bd5dcbdbca198474f6 100644 (file)
@@ -153,11 +153,9 @@ static const struct regulator_desc arizona_ldo1 = {
 
        .vsel_reg = ARIZONA_LDO1_CONTROL_1,
        .vsel_mask = ARIZONA_LDO1_VSEL_MASK,
-       .bypass_reg = ARIZONA_LDO1_CONTROL_1,
-       .bypass_mask = ARIZONA_LDO1_BYPASS,
        .min_uV = 900000,
-       .uV_step = 50000,
-       .n_voltages = 7,
+       .uV_step = 25000,
+       .n_voltages = 13,
        .enable_time = 500,
 
        .owner = THIS_MODULE,
@@ -189,10 +187,8 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
        int ret;
 
        ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
-       if (ldo1 == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!ldo1)
                return -ENOMEM;
-       }
 
        ldo1->arizona = arizona;
 
@@ -203,6 +199,7 @@ static int arizona_ldo1_probe(struct platform_device *pdev)
         */
        switch (arizona->type) {
        case WM5102:
+       case WM8997:
                desc = &arizona_ldo1_hc;
                ldo1->init_data = arizona_ldo1_dvfs;
                break;
index 034ece7070838991a59d45f88f0e0ace95c6abba..6fdd9bf6927fcb39763156d34d2747c34ff2ebee 100644 (file)
@@ -204,10 +204,8 @@ static int arizona_micsupp_probe(struct platform_device *pdev)
        int ret;
 
        micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL);
-       if (micsupp == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!micsupp)
                return -ENOMEM;
-       }
 
        micsupp->arizona = arizona;
        INIT_WORK(&micsupp->check_cp_work, arizona_micsupp_check_cp);
index c77a58478cca6b11ffd2c5a69990eddda1900230..b47283f91e2db940974b48c0103c9a5207b462af 100644 (file)
@@ -191,7 +191,7 @@ static int as3711_regulator_parse_dt(struct device *dev,
 {
        struct as3711_regulator_pdata *pdata = dev_get_platdata(dev);
        struct device_node *regulators =
-               of_find_node_by_name(dev->parent->of_node, "regulators");
+               of_get_child_by_name(dev->parent->of_node, "regulators");
        struct of_regulator_match *match;
        int ret, i;
 
@@ -221,7 +221,6 @@ static int as3711_regulator_probe(struct platform_device *pdev)
 {
        struct as3711_regulator_pdata *pdata = dev_get_platdata(&pdev->dev);
        struct as3711 *as3711 = dev_get_drvdata(pdev->dev.parent);
-       struct regulator_init_data *reg_data;
        struct regulator_config config = {.dev = &pdev->dev,};
        struct as3711_regulator *reg = NULL;
        struct as3711_regulator *regs;
@@ -246,22 +245,14 @@ static int as3711_regulator_probe(struct platform_device *pdev)
 
        regs = devm_kzalloc(&pdev->dev, AS3711_REGULATOR_NUM *
                        sizeof(struct as3711_regulator), GFP_KERNEL);
-       if (!regs) {
-               dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+       if (!regs)
                return -ENOMEM;
-       }
 
        for (id = 0, ri = as3711_reg_info; id < AS3711_REGULATOR_NUM; ++id, ri++) {
-               reg_data = pdata->init_data[id];
-
-               /* No need to register if there is no regulator data */
-               if (!reg_data)
-                       continue;
-
                reg = &regs[id];
                reg->reg_info = ri;
 
-               config.init_data = reg_data;
+               config.init_data = pdata->init_data[id];
                config.driver_data = reg;
                config.regmap = as3711->regmap;
                config.of_node = of_node[id];
index 8b17d786cb713d5176996a36fa66bd2dae96f0b8..85585219ce824140ef3a8f48fed2539ff5b11f07 100644 (file)
@@ -719,6 +719,7 @@ static int as3722_get_regulator_dt_data(struct platform_device *pdev,
 
        ret = of_regulator_match(&pdev->dev, np, as3722_regulator_matches,
                        ARRAY_SIZE(as3722_regulator_matches));
+       of_node_put(np);
        if (ret < 0) {
                dev_err(&pdev->dev, "Parsing of regulator node failed: %d\n",
                        ret);
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
new file mode 100644 (file)
index 0000000..ab08ca7
--- /dev/null
@@ -0,0 +1,403 @@
+/*
+ * Broadcom BCM590xx regulator driver
+ *
+ * Copyright 2014 Linaro Limited
+ * Author: Matt Porter <mporter@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/bcm590xx.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+
+/* Register defs */
+#define BCM590XX_RFLDOPMCTRL1  0x60
+#define BCM590XX_IOSR1PMCTRL1  0x7a
+#define BCM590XX_IOSR2PMCTRL1  0x7c
+#define BCM590XX_CSRPMCTRL1    0x7e
+#define BCM590XX_SDSR1PMCTRL1  0x82
+#define BCM590XX_SDSR2PMCTRL1  0x86
+#define BCM590XX_MSRPMCTRL1    0x8a
+#define BCM590XX_VSRPMCTRL1    0x8e
+#define BCM590XX_REG_ENABLE    BIT(7)
+
+#define BCM590XX_RFLDOCTRL     0x96
+#define BCM590XX_CSRVOUT1      0xc0
+#define BCM590XX_LDO_VSEL_MASK GENMASK(5, 3)
+#define BCM590XX_SR_VSEL_MASK  GENMASK(5, 0)
+
+/* LDO regulator IDs */
+#define BCM590XX_REG_RFLDO     0
+#define BCM590XX_REG_CAMLDO1   1
+#define BCM590XX_REG_CAMLDO2   2
+#define BCM590XX_REG_SIMLDO1   3
+#define BCM590XX_REG_SIMLDO2   4
+#define BCM590XX_REG_SDLDO     5
+#define BCM590XX_REG_SDXLDO    6
+#define BCM590XX_REG_MMCLDO1   7
+#define BCM590XX_REG_MMCLDO2   8
+#define BCM590XX_REG_AUDLDO    9
+#define BCM590XX_REG_MICLDO    10
+#define BCM590XX_REG_USBLDO    11
+#define BCM590XX_REG_VIBLDO    12
+
+/* DCDC regulator IDs */
+#define BCM590XX_REG_CSR       13
+#define BCM590XX_REG_IOSR1     14
+#define BCM590XX_REG_IOSR2     15
+#define BCM590XX_REG_MSR       16
+#define BCM590XX_REG_SDSR1     17
+#define BCM590XX_REG_SDSR2     18
+#define BCM590XX_REG_VSR       19
+
+#define BCM590XX_NUM_REGS      20
+
+#define BCM590XX_REG_IS_LDO(n) (n < BCM590XX_REG_CSR)
+
+struct bcm590xx_board {
+       struct regulator_init_data *bcm590xx_pmu_init_data[BCM590XX_NUM_REGS];
+};
+
+/* LDO group A: supported voltages in microvolts */
+static const unsigned int ldo_a_table[] = {
+       1200000, 1800000, 2500000, 2700000, 2800000,
+       2900000, 3000000, 3300000,
+};
+
+/* LDO group C: supported voltages in microvolts */
+static const unsigned int ldo_c_table[] = {
+       3100000, 1800000, 2500000, 2700000, 2800000,
+       2900000, 3000000, 3300000,
+};
+
+/* DCDC group CSR: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_csr_ranges[] = {
+       REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
+       REGULATOR_LINEAR_RANGE(1360000, 51, 55, 20000),
+       REGULATOR_LINEAR_RANGE(900000, 56, 63, 0),
+};
+
+/* DCDC group IOSR1: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_iosr1_ranges[] = {
+       REGULATOR_LINEAR_RANGE(860000, 2, 51, 10000),
+       REGULATOR_LINEAR_RANGE(1500000, 52, 52, 0),
+       REGULATOR_LINEAR_RANGE(1800000, 53, 53, 0),
+       REGULATOR_LINEAR_RANGE(900000, 54, 63, 0),
+};
+
+/* DCDC group SDSR1: supported voltages in microvolts */
+static const struct regulator_linear_range dcdc_sdsr1_ranges[] = {
+       REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
+       REGULATOR_LINEAR_RANGE(1340000, 51, 51, 0),
+       REGULATOR_LINEAR_RANGE(900000, 52, 63, 0),
+};
+
+struct bcm590xx_info {
+       const char *name;
+       const char *vin_name;
+       u8 n_voltages;
+       const unsigned int *volt_table;
+       u8 n_linear_ranges;
+       const struct regulator_linear_range *linear_ranges;
+};
+
+#define BCM590XX_REG_TABLE(_name, _table) \
+       { \
+               .name = #_name, \
+               .n_voltages = ARRAY_SIZE(_table), \
+               .volt_table = _table, \
+       }
+
+#define BCM590XX_REG_RANGES(_name, _ranges) \
+       { \
+               .name = #_name, \
+               .n_linear_ranges = ARRAY_SIZE(_ranges), \
+               .linear_ranges = _ranges, \
+       }
+
+static struct bcm590xx_info bcm590xx_regs[] = {
+       BCM590XX_REG_TABLE(rfldo, ldo_a_table),
+       BCM590XX_REG_TABLE(camldo1, ldo_c_table),
+       BCM590XX_REG_TABLE(camldo2, ldo_c_table),
+       BCM590XX_REG_TABLE(simldo1, ldo_a_table),
+       BCM590XX_REG_TABLE(simldo2, ldo_a_table),
+       BCM590XX_REG_TABLE(sdldo, ldo_c_table),
+       BCM590XX_REG_TABLE(sdxldo, ldo_a_table),
+       BCM590XX_REG_TABLE(mmcldo1, ldo_a_table),
+       BCM590XX_REG_TABLE(mmcldo2, ldo_a_table),
+       BCM590XX_REG_TABLE(audldo, ldo_a_table),
+       BCM590XX_REG_TABLE(micldo, ldo_a_table),
+       BCM590XX_REG_TABLE(usbldo, ldo_a_table),
+       BCM590XX_REG_TABLE(vibldo, ldo_c_table),
+       BCM590XX_REG_RANGES(csr, dcdc_csr_ranges),
+       BCM590XX_REG_RANGES(iosr1, dcdc_iosr1_ranges),
+       BCM590XX_REG_RANGES(iosr2, dcdc_iosr1_ranges),
+       BCM590XX_REG_RANGES(msr, dcdc_iosr1_ranges),
+       BCM590XX_REG_RANGES(sdsr1, dcdc_sdsr1_ranges),
+       BCM590XX_REG_RANGES(sdsr2, dcdc_iosr1_ranges),
+       BCM590XX_REG_RANGES(vsr, dcdc_iosr1_ranges),
+};
+
+struct bcm590xx_reg {
+       struct regulator_desc *desc;
+       struct bcm590xx *mfd;
+       struct bcm590xx_info **info;
+};
+
+static int bcm590xx_get_vsel_register(int id)
+{
+       if (BCM590XX_REG_IS_LDO(id))
+               return BCM590XX_RFLDOCTRL + id;
+       else
+               return BCM590XX_CSRVOUT1 + (id - BCM590XX_REG_CSR) * 3;
+}
+
+static int bcm590xx_get_enable_register(int id)
+{
+       int reg = 0;
+
+       if (BCM590XX_REG_IS_LDO(id))
+               reg = BCM590XX_RFLDOPMCTRL1 + id * 2;
+       else
+               switch (id) {
+               case BCM590XX_REG_CSR:
+                       reg = BCM590XX_CSRPMCTRL1;
+                       break;
+               case BCM590XX_REG_IOSR1:
+                       reg = BCM590XX_IOSR1PMCTRL1;
+                       break;
+               case BCM590XX_REG_IOSR2:
+                       reg = BCM590XX_IOSR2PMCTRL1;
+                       break;
+               case BCM590XX_REG_MSR:
+                       reg = BCM590XX_MSRPMCTRL1;
+                       break;
+               case BCM590XX_REG_SDSR1:
+                       reg = BCM590XX_SDSR1PMCTRL1;
+                       break;
+               case BCM590XX_REG_SDSR2:
+                       reg = BCM590XX_SDSR2PMCTRL1;
+                       break;
+               };
+
+       return reg;
+}
+
+static struct regulator_ops bcm590xx_ops_ldo = {
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .list_voltage           = regulator_list_voltage_table,
+       .map_voltage            = regulator_map_voltage_iterate,
+};
+
+static struct regulator_ops bcm590xx_ops_dcdc = {
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
+};
+
+#define BCM590XX_MATCH(_name, _id) \
+       { \
+               .name = #_name, \
+               .driver_data = (void *)&bcm590xx_regs[BCM590XX_REG_##_id], \
+       }
+
+static struct of_regulator_match bcm590xx_matches[] = {
+       BCM590XX_MATCH(rfldo, RFLDO),
+       BCM590XX_MATCH(camldo1, CAMLDO1),
+       BCM590XX_MATCH(camldo2, CAMLDO2),
+       BCM590XX_MATCH(simldo1, SIMLDO1),
+       BCM590XX_MATCH(simldo2, SIMLDO2),
+       BCM590XX_MATCH(sdldo, SDLDO),
+       BCM590XX_MATCH(sdxldo, SDXLDO),
+       BCM590XX_MATCH(mmcldo1, MMCLDO1),
+       BCM590XX_MATCH(mmcldo2, MMCLDO2),
+       BCM590XX_MATCH(audldo, AUDLDO),
+       BCM590XX_MATCH(micldo, MICLDO),
+       BCM590XX_MATCH(usbldo, USBLDO),
+       BCM590XX_MATCH(vibldo, VIBLDO),
+       BCM590XX_MATCH(csr, CSR),
+       BCM590XX_MATCH(iosr1, IOSR1),
+       BCM590XX_MATCH(iosr2, IOSR2),
+       BCM590XX_MATCH(msr, MSR),
+       BCM590XX_MATCH(sdsr1, SDSR1),
+       BCM590XX_MATCH(sdsr2, SDSR2),
+       BCM590XX_MATCH(vsr, VSR),
+};
+
+static struct bcm590xx_board *bcm590xx_parse_dt_reg_data(
+               struct platform_device *pdev,
+               struct of_regulator_match **bcm590xx_reg_matches)
+{
+       struct bcm590xx_board *data;
+       struct device_node *np = pdev->dev.parent->of_node;
+       struct device_node *regulators;
+       struct of_regulator_match *matches = bcm590xx_matches;
+       int count = ARRAY_SIZE(bcm590xx_matches);
+       int idx = 0;
+       int ret;
+
+       if (!np) {
+               dev_err(&pdev->dev, "of node not found\n");
+               return NULL;
+       }
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data) {
+               dev_err(&pdev->dev, "failed to allocate regulator board data\n");
+               return NULL;
+       }
+
+       np = of_node_get(np);
+       regulators = of_get_child_by_name(np, "regulators");
+       if (!regulators) {
+               dev_warn(&pdev->dev, "regulator node not found\n");
+               return NULL;
+       }
+
+       ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+       of_node_put(regulators);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
+                       ret);
+               return NULL;
+       }
+
+       *bcm590xx_reg_matches = matches;
+
+       for (idx = 0; idx < count; idx++) {
+               if (!matches[idx].init_data || !matches[idx].of_node)
+                       continue;
+
+               data->bcm590xx_pmu_init_data[idx] = matches[idx].init_data;
+       }
+
+       return data;
+}
+
+static int bcm590xx_probe(struct platform_device *pdev)
+{
+       struct bcm590xx *bcm590xx = dev_get_drvdata(pdev->dev.parent);
+       struct bcm590xx_board *pmu_data = NULL;
+       struct bcm590xx_reg *pmu;
+       struct regulator_config config = { };
+       struct bcm590xx_info *info;
+       struct regulator_init_data *reg_data;
+       struct regulator_dev *rdev;
+       struct of_regulator_match *bcm590xx_reg_matches = NULL;
+       int i;
+
+       pmu_data = bcm590xx_parse_dt_reg_data(pdev,
+                                             &bcm590xx_reg_matches);
+
+       pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+       if (!pmu) {
+               dev_err(&pdev->dev, "Memory allocation failed for pmu\n");
+               return -ENOMEM;
+       }
+
+       pmu->mfd = bcm590xx;
+
+       platform_set_drvdata(pdev, pmu);
+
+       pmu->desc = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS *
+                       sizeof(struct regulator_desc), GFP_KERNEL);
+       if (!pmu->desc) {
+               dev_err(&pdev->dev, "Memory alloc fails for desc\n");
+               return -ENOMEM;
+       }
+
+       pmu->info = devm_kzalloc(&pdev->dev, BCM590XX_NUM_REGS *
+                       sizeof(struct bcm590xx_info *), GFP_KERNEL);
+       if (!pmu->info) {
+               dev_err(&pdev->dev, "Memory alloc fails for info\n");
+               return -ENOMEM;
+       }
+
+       info = bcm590xx_regs;
+
+       for (i = 0; i < BCM590XX_NUM_REGS; i++, info++) {
+               if (pmu_data)
+                       reg_data = pmu_data->bcm590xx_pmu_init_data[i];
+               else
+                       reg_data = NULL;
+
+               /* Register the regulators */
+               pmu->info[i] = info;
+
+               pmu->desc[i].name = info->name;
+               pmu->desc[i].supply_name = info->vin_name;
+               pmu->desc[i].id = i;
+               pmu->desc[i].volt_table = info->volt_table;
+               pmu->desc[i].n_voltages = info->n_voltages;
+               pmu->desc[i].linear_ranges = info->linear_ranges;
+               pmu->desc[i].n_linear_ranges = info->n_linear_ranges;
+
+               if (BCM590XX_REG_IS_LDO(i)) {
+                       pmu->desc[i].ops = &bcm590xx_ops_ldo;
+                       pmu->desc[i].vsel_mask = BCM590XX_LDO_VSEL_MASK;
+               } else {
+                       pmu->desc[i].ops = &bcm590xx_ops_dcdc;
+                       pmu->desc[i].vsel_mask = BCM590XX_SR_VSEL_MASK;
+               }
+
+               pmu->desc[i].vsel_reg = bcm590xx_get_vsel_register(i);
+               pmu->desc[i].enable_is_inverted = true;
+               pmu->desc[i].enable_mask = BCM590XX_REG_ENABLE;
+               pmu->desc[i].enable_reg = bcm590xx_get_enable_register(i);
+               pmu->desc[i].type = REGULATOR_VOLTAGE;
+               pmu->desc[i].owner = THIS_MODULE;
+
+               config.dev = bcm590xx->dev;
+               config.init_data = reg_data;
+               config.driver_data = pmu;
+               config.regmap = bcm590xx->regmap;
+
+               if (bcm590xx_reg_matches)
+                       config.of_node = bcm590xx_reg_matches[i].of_node;
+
+               rdev = devm_regulator_register(&pdev->dev, &pmu->desc[i],
+                                              &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(bcm590xx->dev,
+                               "failed to register %s regulator\n",
+                               pdev->name);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static struct platform_driver bcm590xx_regulator_driver = {
+       .driver = {
+               .name = "bcm590xx-vregs",
+               .owner = THIS_MODULE,
+       },
+       .probe = bcm590xx_probe,
+};
+module_platform_driver(bcm590xx_regulator_driver);
+
+MODULE_AUTHOR("Matt Porter <mporter@linaro.org>");
+MODULE_DESCRIPTION("BCM590xx voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bcm590xx-vregs");
index afca1bc24f262251abf3352d2e09d6452959805b..bac485acc7f37dd83d472b4601daa7ef814cb30d 100644 (file)
@@ -2399,6 +2399,7 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
        struct regulator_dev *rdev = regulator->rdev;
        int ret = 0;
        int old_min_uV, old_max_uV;
+       int current_uV;
 
        mutex_lock(&rdev->mutex);
 
@@ -2409,6 +2410,19 @@ int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
        if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
                goto out;
 
+       /* If we're trying to set a range that overlaps the current voltage,
+        * return succesfully even though the regulator does not support
+        * changing the voltage.
+        */
+       if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+               current_uV = _regulator_get_voltage(rdev);
+               if (min_uV <= current_uV && current_uV <= max_uV) {
+                       regulator->min_uV = min_uV;
+                       regulator->max_uV = max_uV;
+                       goto out;
+               }
+       }
+
        /* sanity check */
        if (!rdev->desc->ops->set_voltage &&
            !rdev->desc->ops->set_voltage_sel) {
index 3adeaeffc485c0abe791487531a1c8ed87f91400..fdb6ea8ae7e64dc73a7cb2cc4f16a8b43c533b72 100644 (file)
@@ -240,6 +240,31 @@ static int da9052_regulator_set_voltage_sel(struct regulator_dev *rdev,
        return ret;
 }
 
+static int da9052_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+                                                unsigned int old_sel,
+                                                unsigned int new_sel)
+{
+       struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+       struct da9052_regulator_info *info = regulator->info;
+       int id = rdev_get_id(rdev);
+       int ret = 0;
+
+       /* The DVC controlled LDOs and DCDCs ramp with 6.25mV/µs after enabling
+        * the activate bit.
+        */
+       switch (id) {
+       case DA9052_ID_BUCK1:
+       case DA9052_ID_BUCK2:
+       case DA9052_ID_BUCK3:
+       case DA9052_ID_LDO2:
+       case DA9052_ID_LDO3:
+               ret = (new_sel - old_sel) * info->step_uV / 6250;
+               break;
+       }
+
+       return ret;
+}
+
 static struct regulator_ops da9052_dcdc_ops = {
        .get_current_limit = da9052_dcdc_get_current_limit,
        .set_current_limit = da9052_dcdc_set_current_limit,
@@ -248,6 +273,7 @@ static struct regulator_ops da9052_dcdc_ops = {
        .map_voltage = da9052_map_voltage,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_sel = da9052_regulator_set_voltage_sel,
+       .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel,
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -258,6 +284,7 @@ static struct regulator_ops da9052_ldo_ops = {
        .map_voltage = da9052_map_voltage,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .set_voltage_sel = da9052_regulator_set_voltage_sel,
+       .set_voltage_time_sel = da9052_regulator_set_voltage_time_sel,
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -401,7 +428,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
                if (!nproot)
                        return -ENODEV;
 
-               nproot = of_find_node_by_name(nproot, "regulators");
+               nproot = of_get_child_by_name(nproot, "regulators");
                if (!nproot)
                        return -ENODEV;
 
index b14ebdad5dd2508f30854ac97e2b803cb5fa6e03..9516317e1a9fbf45ea4bc3eff65170a24f070d8c 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
 
 #include <linux/mfd/da9055/core.h>
 #include <linux/mfd/da9055/reg.h>
@@ -446,6 +448,9 @@ static int da9055_gpio_init(struct da9055_regulator *regulator,
        struct da9055_regulator_info *info = regulator->info;
        int ret = 0;
 
+       if (!pdata)
+               return 0;
+
        if (pdata->gpio_ren && pdata->gpio_ren[id]) {
                char name[18];
                int gpio_mux = pdata->gpio_ren[id];
@@ -530,6 +535,59 @@ static inline struct da9055_regulator_info *find_regulator_info(int id)
        return NULL;
 }
 
+#ifdef CONFIG_OF
+static struct of_regulator_match da9055_reg_matches[] = {
+       { .name = "BUCK1", },
+       { .name = "BUCK2", },
+       { .name = "LDO1", },
+       { .name = "LDO2", },
+       { .name = "LDO3", },
+       { .name = "LDO4", },
+       { .name = "LDO5", },
+       { .name = "LDO6", },
+};
+
+static int da9055_regulator_dt_init(struct platform_device *pdev,
+                                   struct da9055_regulator *regulator,
+                                   struct regulator_config *config,
+                                   int regid)
+{
+       struct device_node *nproot, *np;
+       int ret;
+
+       nproot = of_node_get(pdev->dev.parent->of_node);
+       if (!nproot)
+               return -ENODEV;
+
+       np = of_get_child_by_name(nproot, "regulators");
+       if (!np)
+               return -ENODEV;
+
+       ret = of_regulator_match(&pdev->dev, np, &da9055_reg_matches[regid], 1);
+       of_node_put(nproot);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Error matching regulator: %d\n", ret);
+               return ret;
+       }
+
+       config->init_data = da9055_reg_matches[regid].init_data;
+       config->of_node = da9055_reg_matches[regid].of_node;
+
+       if (!config->of_node)
+               return -ENODEV;
+
+       return 0;
+}
+#else
+static inline int da9055_regulator_dt_init(struct platform_device *pdev,
+                                      struct da9055_regulator *regulator,
+                                      struct regulator_config *config,
+                                      int regid)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_OF */
+
 static int da9055_regulator_probe(struct platform_device *pdev)
 {
        struct regulator_config config = { };
@@ -538,9 +596,6 @@ static int da9055_regulator_probe(struct platform_device *pdev)
        struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
        int ret, irq;
 
-       if (pdata == NULL || pdata->regulators[pdev->id] == NULL)
-               return -ENODEV;
-
        regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9055_regulator),
                                 GFP_KERNEL);
        if (!regulator)
@@ -557,8 +612,14 @@ static int da9055_regulator_probe(struct platform_device *pdev)
        config.driver_data = regulator;
        config.regmap = da9055->regmap;
 
-       if (pdata && pdata->regulators)
+       if (pdata && pdata->regulators) {
                config.init_data = pdata->regulators[pdev->id];
+       } else {
+               ret = da9055_regulator_dt_init(pdev, regulator, &config,
+                                              pdev->id);
+               if (ret < 0)
+                       return ret;
+       }
 
        ret = da9055_gpio_init(regulator, &config, pdata, pdev->id);
        if (ret < 0)
index 91e99a2c8dc14354c4f60368f025fef15d78786e..7c9461d13313ea1c4d1ddd86d3cf7e6ac783b7ee 100644 (file)
@@ -365,7 +365,7 @@ static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV)
 
        sel = regulator_map_voltage_linear(rdev, uV, uV);
        if (sel < 0)
-               return -EINVAL;
+               return sel;
 
        sel <<= ffs(rdev->desc->vsel_mask) - 1;
 
@@ -666,7 +666,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
        struct device_node *node;
        int i, n, num;
 
-       node = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+       node = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
        if (!node) {
                dev_err(&pdev->dev, "Regulators device node not found\n");
                return ERR_PTR(-ENODEV);
@@ -674,6 +674,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
 
        num = of_regulator_match(&pdev->dev, node, da9063_matches,
                                 ARRAY_SIZE(da9063_matches));
+       of_node_put(node);
        if (num < 0) {
                dev_err(&pdev->dev, "Failed to match regulators\n");
                return ERR_PTR(-EINVAL);
@@ -710,7 +711,7 @@ static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
                struct platform_device *pdev,
                struct of_regulator_match **da9063_reg_matches)
 {
-       da9063_reg_matches = NULL;
+       *da9063_reg_matches = NULL;
        return ERR_PTR(-ENODEV);
 }
 #endif
@@ -756,7 +757,7 @@ static int da9063_regulator_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev,
                        "Error while reading BUCKs configuration\n");
-               return -EIO;
+               return ret;
        }
        bcores_merged = val & DA9063_BCORE_MERGE;
        bmem_bio_merged = val & DA9063_BUCK_MERGE;
@@ -775,10 +776,8 @@ static int da9063_regulator_probe(struct platform_device *pdev)
        size = sizeof(struct da9063_regulators) +
                n_regulators * sizeof(struct da9063_regulator);
        regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-       if (!regulators) {
-               dev_err(&pdev->dev, "No memory for regulators\n");
+       if (!regulators)
                return -ENOMEM;
-       }
 
        regulators->n_regulators = n_regulators;
        platform_set_drvdata(pdev, regulators);
index 6f5ecbe1132e7da4f02496672578ef2301285767..7a320dd11c46220bfff3869510f1abcd7903ae2d 100644 (file)
@@ -134,11 +134,8 @@ static int da9210_i2c_probe(struct i2c_client *i2c,
        int error;
 
        chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL);
-       if (NULL == chip) {
-               dev_err(&i2c->dev,
-                       "Cannot kzalloc memory for regulator structure\n");
+       if (!chip)
                return -ENOMEM;
-       }
 
        chip->regmap = devm_regmap_init_i2c(i2c, &da9210_regmap_config);
        if (IS_ERR(chip->regmap)) {
index 846acf240e486d138319cbf26ce9434b71e6bc6e..617c1adca816160b81edaf3a20b28bc84efca5d1 100644 (file)
@@ -263,6 +263,8 @@ dbx500_regulator_info[DB8500_NUM_REGULATORS] = {
                        .ops    = &db8500_regulator_ops,
                        .type   = REGULATOR_VOLTAGE,
                        .owner  = THIS_MODULE,
+                       .fixed_uV = 1800000,
+                       .n_voltages = 1,
                },
                .exclude_from_power_state = true,
        },
index ce89f7848a57f00d58c6f879cb2b3157d28fd303..2d16b9f16de73f5ce42ffb2370bba98c9bccf20b 100644 (file)
@@ -78,6 +78,7 @@ static struct ux500_regulator_debug {
 void ux500_regulator_suspend_debug(void)
 {
        int i;
+
        for (i = 0; i < rdebug.num_regulators; i++)
                rdebug.state_before_suspend[i] =
                        rdebug.regulator_array[i].is_enabled;
@@ -86,6 +87,7 @@ void ux500_regulator_suspend_debug(void)
 void ux500_regulator_resume_debug(void)
 {
        int i;
+
        for (i = 0; i < rdebug.num_regulators; i++)
                rdebug.state_after_suspend[i] =
                        rdebug.regulator_array[i].is_enabled;
@@ -127,9 +129,9 @@ static int ux500_regulator_status_print(struct seq_file *s, void *p)
        int i;
 
        /* print dump header */
-       err = seq_printf(s, "ux500-regulator status:\n");
+       err = seq_puts(s, "ux500-regulator status:\n");
        if (err < 0)
-               dev_err(dev, "seq_printf overflow\n");
+               dev_err(dev, "seq_puts overflow\n");
 
        err = seq_printf(s, "%31s : %8s : %8s\n", "current",
                "before", "after");
@@ -202,18 +204,12 @@ ux500_regulator_debug_init(struct platform_device *pdev,
        rdebug.num_regulators = num_regulators;
 
        rdebug.state_before_suspend = kzalloc(num_regulators, GFP_KERNEL);
-       if (!rdebug.state_before_suspend) {
-               dev_err(&pdev->dev,
-                       "could not allocate memory for saving state\n");
+       if (!rdebug.state_before_suspend)
                goto exit_destroy_power_state;
-       }
 
        rdebug.state_after_suspend = kzalloc(num_regulators, GFP_KERNEL);
-       if (!rdebug.state_after_suspend) {
-               dev_err(&pdev->dev,
-                       "could not allocate memory for saving state\n");
+       if (!rdebug.state_after_suspend)
                goto exit_free;
-       }
 
        dbx500_regulator_testcase(regulator_info, num_regulators);
        return 0;
index df9f42524abb3fd1164f61e7bee4065985d59d24..2436db9e2ca35f60875ab56cc825050c0d6a3ff2 100644 (file)
 
 struct regulator_dev *dummy_regulator_rdev;
 
-static struct regulator_init_data dummy_initdata;
+static struct regulator_init_data dummy_initdata = {
+       .constraints = {
+               .always_on = 1,
+       },
+};
 
 static struct regulator_ops dummy_ops;
 
index 7ca3d9e3b0fe22e426fb5419d76b9a4089224c7f..714fd9a89aa137b7484e437ff5017374e834f191 100644 (file)
@@ -90,11 +90,11 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
                return 0;
        ret = regulator_map_voltage_linear(rdev, uV, uV);
        if (ret < 0)
-               return -EINVAL;
+               return ret;
        ret = regmap_update_bits(di->regmap, di->sleep_reg,
                                        VSEL_NSEL_MASK, ret);
        if (ret < 0)
-               return -EINVAL;
+               return ret;
        /* Cache the sleep voltage setting.
         * Might not be the real voltage which is rounded */
        di->sleep_vol_cache = uV;
@@ -244,10 +244,9 @@ static int fan53555_regulator_probe(struct i2c_client *client,
 
        di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info),
                                        GFP_KERNEL);
-       if (!di) {
-               dev_err(&client->dev, "Failed to allocate device info data!\n");
+       if (!di)
                return -ENOMEM;
-       }
+
        di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config);
        if (IS_ERR(di->regmap)) {
                dev_err(&client->dev, "Failed to allocate regmap!\n");
@@ -260,14 +259,14 @@ static int fan53555_regulator_probe(struct i2c_client *client,
        ret = regmap_read(di->regmap, FAN53555_ID1, &val);
        if (ret < 0) {
                dev_err(&client->dev, "Failed to get chip ID!\n");
-               return -ENODEV;
+               return ret;
        }
        di->chip_id = val & DIE_ID;
        /* Get chip revision */
        ret = regmap_read(di->regmap, FAN53555_ID2, &val);
        if (ret < 0) {
                dev_err(&client->dev, "Failed to get chip Rev!\n");
-               return -ENODEV;
+               return ret;
        }
        di->chip_rev = val & DIE_REV;
        dev_info(&client->dev, "FAN53555 Option[%d] Rev[%d] Detected!\n",
index 5ea64b94341c5f7a926b07d83536c9d4fe0dba32..c61f7e97e4f8c663f06837439eceb55bde63efca 100644 (file)
@@ -130,17 +130,15 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
 
        drvdata = devm_kzalloc(&pdev->dev, sizeof(struct fixed_voltage_data),
                               GFP_KERNEL);
-       if (drvdata == NULL) {
-               dev_err(&pdev->dev, "Failed to allocate device data\n");
-               ret = -ENOMEM;
-               goto err;
-       }
+       if (!drvdata)
+               return -ENOMEM;
 
-       drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
+       drvdata->desc.name = devm_kstrdup(&pdev->dev,
+                                         config->supply_name,
+                                         GFP_KERNEL);
        if (drvdata->desc.name == NULL) {
                dev_err(&pdev->dev, "Failed to allocate supply name\n");
-               ret = -ENOMEM;
-               goto err;
+               return -ENOMEM;
        }
        drvdata->desc.type = REGULATOR_VOLTAGE;
        drvdata->desc.owner = THIS_MODULE;
@@ -149,13 +147,13 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        drvdata->desc.enable_time = config->startup_delay;
 
        if (config->input_supply) {
-               drvdata->desc.supply_name = kstrdup(config->input_supply,
-                                                       GFP_KERNEL);
+               drvdata->desc.supply_name = devm_kstrdup(&pdev->dev,
+                                           config->input_supply,
+                                           GFP_KERNEL);
                if (!drvdata->desc.supply_name) {
                        dev_err(&pdev->dev,
                                "Failed to allocate input supply\n");
-                       ret = -ENOMEM;
-                       goto err_name;
+                       return -ENOMEM;
                }
        }
 
@@ -186,11 +184,12 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        cfg.driver_data = drvdata;
        cfg.of_node = pdev->dev.of_node;
 
-       drvdata->dev = regulator_register(&drvdata->desc, &cfg);
+       drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
+                                              &cfg);
        if (IS_ERR(drvdata->dev)) {
                ret = PTR_ERR(drvdata->dev);
                dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
-               goto err_input;
+               return ret;
        }
 
        platform_set_drvdata(pdev, drvdata);
@@ -199,24 +198,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
                drvdata->desc.fixed_uV);
 
        return 0;
-
-err_input:
-       kfree(drvdata->desc.supply_name);
-err_name:
-       kfree(drvdata->desc.name);
-err:
-       return ret;
-}
-
-static int reg_fixed_voltage_remove(struct platform_device *pdev)
-{
-       struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
-
-       regulator_unregister(drvdata->dev);
-       kfree(drvdata->desc.supply_name);
-       kfree(drvdata->desc.name);
-
-       return 0;
 }
 
 #if defined(CONFIG_OF)
@@ -229,7 +210,6 @@ MODULE_DEVICE_TABLE(of, fixed_of_match);
 
 static struct platform_driver regulator_fixed_voltage_driver = {
        .probe          = reg_fixed_voltage_probe,
-       .remove         = reg_fixed_voltage_remove,
        .driver         = {
                .name           = "reg-fixed-voltage",
                .owner          = THIS_MODULE,
index c0a1d00b78c932a6982961334f9e3e9501592c50..989b23b377c0d2bb511227749e871ce72f46398b 100644 (file)
@@ -136,7 +136,6 @@ static struct gpio_regulator_config *
 of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
 {
        struct gpio_regulator_config *config;
-       struct property *prop;
        const char *regtype;
        int proplen, gpio, i;
        int ret;
@@ -172,22 +171,35 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
        if (!config->gpios)
                return ERR_PTR(-ENOMEM);
 
+       proplen = of_property_count_u32_elems(np, "gpios-states");
+       /* optional property */
+       if (proplen < 0)
+               proplen = 0;
+
+       if (proplen > 0 && proplen != config->nr_gpios) {
+               dev_warn(dev, "gpios <-> gpios-states mismatch\n");
+               proplen = 0;
+       }
+
        for (i = 0; i < config->nr_gpios; i++) {
                gpio = of_get_named_gpio(np, "gpios", i);
                if (gpio < 0)
                        break;
                config->gpios[i].gpio = gpio;
+               if (proplen > 0) {
+                       of_property_read_u32_index(np, "gpios-states", i, &ret);
+                       if (ret)
+                               config->gpios[i].flags = GPIOF_OUT_INIT_HIGH;
+               }
        }
 
        /* Fetch states. */
-       prop = of_find_property(np, "states", NULL);
-       if (!prop) {
+       proplen = of_property_count_u32_elems(np, "states");
+       if (proplen < 0) {
                dev_err(dev, "No 'states' property found\n");
                return ERR_PTR(-EINVAL);
        }
 
-       proplen = prop->length / sizeof(int);
-
        config->states = devm_kzalloc(dev,
                                sizeof(struct gpio_regulator_state)
                                * (proplen / 2),
@@ -196,10 +208,10 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np)
                return ERR_PTR(-ENOMEM);
 
        for (i = 0; i < proplen / 2; i++) {
-               config->states[i].value =
-                       be32_to_cpup((int *)prop->value + (i * 2));
-               config->states[i].gpios =
-                       be32_to_cpup((int *)prop->value + (i * 2 + 1));
+               of_property_read_u32_index(np, "states", i * 2,
+                                          &config->states[i].value);
+               of_property_read_u32_index(np, "states", i * 2 + 1,
+                                          &config->states[i].gpios);
        }
        config->nr_states = i;
 
@@ -239,10 +251,8 @@ static int gpio_regulator_probe(struct platform_device *pdev)
 
        drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data),
                               GFP_KERNEL);
-       if (drvdata == NULL) {
-               dev_err(&pdev->dev, "Failed to allocate device data\n");
+       if (drvdata == NULL)
                return -ENOMEM;
-       }
 
        drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL);
        if (drvdata->desc.name == NULL) {
index e221a271ba56601d12f5fc3d14a09db967a7d292..cbc39096c78d436f8206aa7107811bc43da6fd20 100644 (file)
@@ -37,10 +37,17 @@ int regulator_is_enabled_regmap(struct regulator_dev *rdev)
        if (ret != 0)
                return ret;
 
-       if (rdev->desc->enable_is_inverted)
-               return (val & rdev->desc->enable_mask) == 0;
-       else
-               return (val & rdev->desc->enable_mask) != 0;
+       val &= rdev->desc->enable_mask;
+
+       if (rdev->desc->enable_is_inverted) {
+               if (rdev->desc->enable_val)
+                       return val != rdev->desc->enable_val;
+               return val == 0;
+       } else {
+               if (rdev->desc->enable_val)
+                       return val == rdev->desc->enable_val;
+               return val != 0;
+       }
 }
 EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
 
@@ -57,10 +64,13 @@ int regulator_enable_regmap(struct regulator_dev *rdev)
 {
        unsigned int val;
 
-       if (rdev->desc->enable_is_inverted)
-               val = 0;
-       else
-               val = rdev->desc->enable_mask;
+       if (rdev->desc->enable_is_inverted) {
+               val = rdev->desc->disable_val;
+       } else {
+               val = rdev->desc->enable_val;
+               if (!val)
+                       val = rdev->desc->enable_mask;
+       }
 
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask, val);
@@ -80,10 +90,13 @@ int regulator_disable_regmap(struct regulator_dev *rdev)
 {
        unsigned int val;
 
-       if (rdev->desc->enable_is_inverted)
-               val = rdev->desc->enable_mask;
-       else
-               val = 0;
+       if (rdev->desc->enable_is_inverted) {
+               val = rdev->desc->enable_val;
+               if (!val)
+                       val = rdev->desc->enable_mask;
+       } else {
+               val = rdev->desc->disable_val;
+       }
 
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask, val);
@@ -419,10 +432,13 @@ int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
 {
        unsigned int val;
 
-       if (enable)
-               val = rdev->desc->bypass_mask;
-       else
-               val = 0;
+       if (enable) {
+               val = rdev->desc->bypass_val_on;
+               if (!val)
+                       val = rdev->desc->bypass_mask;
+       } else {
+               val = rdev->desc->bypass_val_off;
+       }
 
        return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
                                  rdev->desc->bypass_mask, val);
index 3b1102b75071f9d9f73fa0b34db63e43f9784614..66fd2330dca043a2627c8443f090ba5822f5de14 100644 (file)
@@ -327,7 +327,7 @@ static int lp3971_i2c_read(struct i2c_client *i2c, char reg, int count,
                return -EIO;
        ret = i2c_smbus_read_byte_data(i2c, reg);
        if (ret < 0)
-               return -EIO;
+               return ret;
 
        *dest = ret;
        return 0;
index 2e4734ff79fce29eaa2cab621b2e13cfa565a0da..2e022aabd951264bdc575d6b32fbdaa148d77298 100644 (file)
@@ -211,7 +211,7 @@ static int lp872x_get_timestep_usec(struct lp872x *lp)
 
        ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
        if (ret)
-               return -EINVAL;
+               return ret;
 
        val = (val & mask) >> shift;
        if (val >= size)
@@ -229,7 +229,7 @@ static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
        u8 addr, val;
 
        if (time_step_us < 0)
-               return -EINVAL;
+               return time_step_us;
 
        switch (rid) {
        case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
index e0619526708c88393069abf4411fa2ff9473e510..ed60baaeceeca79cc9ba2b9c770973d5039da3fa 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * max14577.c - Regulator driver for the Maxim 14577
  *
- * Copyright (C) 2013 Samsung Electronics
+ * Copyright (C) 2013,2014 Samsung Electronics
  * Krzysztof Kozlowski <k.kozlowski@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #include <linux/mfd/max14577-private.h>
 #include <linux/regulator/of_regulator.h>
 
-struct max14577_regulator {
-       struct device *dev;
-       struct max14577 *max14577;
-       struct regulator_dev **regulators;
-};
-
 static int max14577_reg_is_enabled(struct regulator_dev *rdev)
 {
        int rid = rdev_get_id(rdev);
index e242dd316d363bb7f4226f2abfdee051c4d095d3..d23d0577754ba64334659aba14fe6671aba2a38a 100644 (file)
@@ -46,8 +46,6 @@ struct max1586_data {
 
        unsigned int v3_curr_sel;
        unsigned int v6_curr_sel;
-
-       struct regulator_dev *rdev[0];
 };
 
 /*
@@ -162,14 +160,12 @@ static struct regulator_desc max1586_reg[] = {
 static int max1586_pmic_probe(struct i2c_client *client,
                                        const struct i2c_device_id *i2c_id)
 {
-       struct regulator_dev **rdev;
        struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
        struct regulator_config config = { };
        struct max1586_data *max1586;
        int i, id;
 
-       max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data) +
-                       sizeof(struct regulator_dev *) * (MAX1586_V6 + 1),
+       max1586 = devm_kzalloc(&client->dev, sizeof(struct max1586_data),
                        GFP_KERNEL);
        if (!max1586)
                return -ENOMEM;
@@ -186,8 +182,9 @@ static int max1586_pmic_probe(struct i2c_client *client,
        max1586->v3_curr_sel = 24; /* 1.3V */
        max1586->v6_curr_sel = 0;
 
-       rdev = max1586->rdev;
        for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) {
+               struct regulator_dev *rdev;
+
                id = pdata->subdevs[i].id;
                if (!pdata->subdevs[i].platform_data)
                        continue;
@@ -207,12 +204,12 @@ static int max1586_pmic_probe(struct i2c_client *client,
                config.init_data = pdata->subdevs[i].platform_data;
                config.driver_data = max1586;
 
-               rdev[i] = devm_regulator_register(&client->dev,
+               rdev = devm_regulator_register(&client->dev,
                                                  &max1586_reg[id], &config);
-               if (IS_ERR(rdev[i])) {
+               if (IS_ERR(rdev)) {
                        dev_err(&client->dev, "failed to register %s\n",
                                max1586_reg[id].name);
-                       return PTR_ERR(rdev[i]);
+                       return PTR_ERR(rdev);
                }
        }
 
index ae001ccf26f42d952bec37c2a821c250e2307b3c..ef1af2debbd293af555cbcde4ed5c72d866ad47a 100644 (file)
@@ -65,7 +65,6 @@ enum max77686_ramp_rate {
 };
 
 struct max77686_data {
-       struct regulator_dev *rdev[MAX77686_REGULATORS];
        unsigned int opmode[MAX77686_REGULATORS];
 };
 
@@ -400,7 +399,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
        unsigned int i;
 
        pmic_np = iodev->dev->of_node;
-       regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
+       regulators_np = of_get_child_by_name(pmic_np, "voltage-regulators");
        if (!regulators_np) {
                dev_err(&pdev->dev, "could not find regulators sub-node\n");
                return -EINVAL;
@@ -410,8 +409,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
        rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
                             pdata->num_regulators, GFP_KERNEL);
        if (!rdata) {
-               dev_err(&pdev->dev,
-                       "could not allocate memory for regulator data\n");
+               of_node_put(regulators_np);
                return -ENOMEM;
        }
 
@@ -425,6 +423,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
        }
 
        pdata->regulators = rdata;
+       of_node_put(regulators_np);
 
        return 0;
 }
@@ -474,16 +473,18 @@ static int max77686_pmic_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, max77686);
 
        for (i = 0; i < MAX77686_REGULATORS; i++) {
+               struct regulator_dev *rdev;
+
                config.init_data = pdata->regulators[i].initdata;
                config.of_node = pdata->regulators[i].of_node;
 
                max77686->opmode[i] = regulators[i].enable_mask;
-               max77686->rdev[i] = devm_regulator_register(&pdev->dev,
+               rdev = devm_regulator_register(&pdev->dev,
                                                &regulators[i], &config);
-               if (IS_ERR(max77686->rdev[i])) {
+               if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev,
                                "regulator init failed for %d\n", i);
-                       return PTR_ERR(max77686->rdev[i]);
+                       return PTR_ERR(rdev);
                }
        }
 
index 5fb899f461d0c47a666715fc6bf3813083ee17a3..653a58b49cdff81b3f3ab724bc05e8417d1c913d 100644 (file)
 
 #define CHGIN_ILIM_STEP_20mA                   20000
 
-struct max77693_pmic_dev {
-       struct device *dev;
-       struct max77693_dev *iodev;
-       int num_regulators;
-       struct regulator_dev **rdev;
-};
-
 /* CHARGER regulator ops */
 /* CHARGER regulator uses two bits for enabling */
 static int max77693_chg_is_enabled(struct regulator_dev *rdev)
@@ -170,19 +163,22 @@ static int max77693_pmic_dt_parse_rdata(struct device *dev,
        struct max77693_regulator_data *tmp;
        int i, matched = 0;
 
-       np = of_find_node_by_name(dev->parent->of_node, "regulators");
+       np = of_get_child_by_name(dev->parent->of_node, "regulators");
        if (!np)
                return -EINVAL;
 
        rmatch = devm_kzalloc(dev,
                 sizeof(*rmatch) * ARRAY_SIZE(regulators), GFP_KERNEL);
-       if (!rmatch)
+       if (!rmatch) {
+               of_node_put(np);
                return -ENOMEM;
+       }
 
        for (i = 0; i < ARRAY_SIZE(regulators); i++)
                rmatch[i].name = regulators[i].name;
 
        matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(regulators));
+       of_node_put(np);
        if (matched <= 0)
                return matched;
        *rdata = devm_kzalloc(dev, sizeof(**rdata) * matched, GFP_KERNEL);
@@ -229,7 +225,6 @@ static int max77693_pmic_init_rdata(struct device *dev,
 static int max77693_pmic_probe(struct platform_device *pdev)
 {
        struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
-       struct max77693_pmic_dev *max77693_pmic;
        struct max77693_regulator_data *rdata = NULL;
        int num_rdata, i;
        struct regulator_config config;
@@ -240,39 +235,22 @@ static int max77693_pmic_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       max77693_pmic = devm_kzalloc(&pdev->dev,
-                               sizeof(struct max77693_pmic_dev),
-                               GFP_KERNEL);
-       if (!max77693_pmic)
-               return -ENOMEM;
-
-       max77693_pmic->rdev = devm_kzalloc(&pdev->dev,
-                               sizeof(struct regulator_dev *) * num_rdata,
-                               GFP_KERNEL);
-       if (!max77693_pmic->rdev)
-               return -ENOMEM;
-
-       max77693_pmic->dev = &pdev->dev;
-       max77693_pmic->iodev = iodev;
-       max77693_pmic->num_regulators = num_rdata;
-
        config.dev = &pdev->dev;
        config.regmap = iodev->regmap;
-       config.driver_data = max77693_pmic;
-       platform_set_drvdata(pdev, max77693_pmic);
 
-       for (i = 0; i < max77693_pmic->num_regulators; i++) {
+       for (i = 0; i < num_rdata; i++) {
                int id = rdata[i].id;
+               struct regulator_dev *rdev;
 
                config.init_data = rdata[i].initdata;
                config.of_node = rdata[i].of_node;
 
-               max77693_pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+               rdev = devm_regulator_register(&pdev->dev,
                                                &regulators[id], &config);
-               if (IS_ERR(max77693_pmic->rdev[i])) {
-                       dev_err(max77693_pmic->dev,
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev,
                                "Failed to initialize regulator-%d\n", id);
-                       return PTR_ERR(max77693_pmic->rdev[i]);
+                       return PTR_ERR(rdev);
                }
        }
 
index 7f049c92ee52406bbbc7bd3e08573bdd89f25159..3172da847d248bfbb039162447d282ee99299abd 100644 (file)
@@ -49,7 +49,6 @@
 #define MAX8649_RAMP_DOWN      (1 << 1)
 
 struct max8649_regulator_info {
-       struct regulator_dev    *regulator;
        struct device           *dev;
        struct regmap           *regmap;
 
@@ -154,6 +153,7 @@ static int max8649_regulator_probe(struct i2c_client *client,
 {
        struct max8649_platform_data *pdata = dev_get_platdata(&client->dev);
        struct max8649_regulator_info *info = NULL;
+       struct regulator_dev *regulator;
        struct regulator_config config = { };
        unsigned int val;
        unsigned char data;
@@ -234,12 +234,12 @@ static int max8649_regulator_probe(struct i2c_client *client,
        config.driver_data = info;
        config.regmap = info->regmap;
 
-       info->regulator = devm_regulator_register(&client->dev, &dcdc_desc,
+       regulator = devm_regulator_register(&client->dev, &dcdc_desc,
                                                  &config);
-       if (IS_ERR(info->regulator)) {
+       if (IS_ERR(regulator)) {
                dev_err(info->dev, "failed to register regulator %s\n",
                        dcdc_desc.name);
-               return PTR_ERR(info->regulator);
+               return PTR_ERR(regulator);
        }
 
        return 0;
index 8d94d3d7f97f22336fb973d5659ed50b4a3e6a97..2fc4111887949e01e04b023d6065962853779d42 100644 (file)
@@ -81,16 +81,17 @@ enum {
 struct max8660 {
        struct i2c_client *client;
        u8 shadow_regs[MAX8660_N_REGS];         /* as chip is write only */
-       struct regulator_dev *rdev[];
 };
 
 static int max8660_write(struct max8660 *max8660, u8 reg, u8 mask, u8 val)
 {
-       static const u8 max8660_addresses[MAX8660_N_REGS] =
-         { 0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80 };
+       static const u8 max8660_addresses[MAX8660_N_REGS] = {
+        0x10, 0x12, 0x20, 0x23, 0x24, 0x29, 0x2a, 0x32, 0x33, 0x39, 0x80
+       };
 
        int ret;
        u8 reg_val = (max8660->shadow_regs[reg] & mask) | val;
+
        dev_vdbg(&max8660->client->dev, "Writing reg %02x with %02x\n",
                        max8660_addresses[reg], reg_val);
 
@@ -112,6 +113,7 @@ static int max8660_dcdc_is_enabled(struct regulator_dev *rdev)
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
        u8 val = max8660->shadow_regs[MAX8660_OVER1];
        u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+
        return !!(val & mask);
 }
 
@@ -119,6 +121,7 @@ static int max8660_dcdc_enable(struct regulator_dev *rdev)
 {
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
        u8 bit = (rdev_get_id(rdev) == MAX8660_V3) ? 1 : 4;
+
        return max8660_write(max8660, MAX8660_OVER1, 0xff, bit);
 }
 
@@ -126,15 +129,16 @@ static int max8660_dcdc_disable(struct regulator_dev *rdev)
 {
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
        u8 mask = (rdev_get_id(rdev) == MAX8660_V3) ? ~1 : ~4;
+
        return max8660_write(max8660, MAX8660_OVER1, mask, 0);
 }
 
 static int max8660_dcdc_get_voltage_sel(struct regulator_dev *rdev)
 {
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
-
        u8 reg = (rdev_get_id(rdev) == MAX8660_V3) ? MAX8660_ADTV2 : MAX8660_SDTV2;
        u8 selector = max8660->shadow_regs[reg];
+
        return selector;
 }
 
@@ -207,6 +211,7 @@ static int max8660_ldo67_is_enabled(struct regulator_dev *rdev)
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
        u8 val = max8660->shadow_regs[MAX8660_OVER2];
        u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+
        return !!(val & mask);
 }
 
@@ -214,6 +219,7 @@ static int max8660_ldo67_enable(struct regulator_dev *rdev)
 {
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
        u8 bit = (rdev_get_id(rdev) == MAX8660_V6) ? 2 : 4;
+
        return max8660_write(max8660, MAX8660_OVER2, 0xff, bit);
 }
 
@@ -221,15 +227,16 @@ static int max8660_ldo67_disable(struct regulator_dev *rdev)
 {
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
        u8 mask = (rdev_get_id(rdev) == MAX8660_V6) ? ~2 : ~4;
+
        return max8660_write(max8660, MAX8660_OVER2, mask, 0);
 }
 
 static int max8660_ldo67_get_voltage_sel(struct regulator_dev *rdev)
 {
        struct max8660 *max8660 = rdev_get_drvdata(rdev);
-
        u8 shift = (rdev_get_id(rdev) == MAX8660_V6) ? 0 : 4;
        u8 selector = (max8660->shadow_regs[MAX8660_L12VCR] >> shift) & 0xf;
+
        return selector;
 }
 
@@ -330,7 +337,7 @@ static int max8660_pdata_from_dt(struct device *dev,
        struct max8660_subdev_data *sub;
        struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)];
 
-       np = of_find_node_by_name(dev->of_node, "regulators");
+       np = of_get_child_by_name(dev->of_node, "regulators");
        if (!np) {
                dev_err(dev, "missing 'regulators' subnode in DT\n");
                return -EINVAL;
@@ -340,6 +347,7 @@ static int max8660_pdata_from_dt(struct device *dev,
                rmatch[i].name = max8660_reg[i].name;
 
        matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch));
+       of_node_put(np);
        if (matched <= 0)
                return matched;
 
@@ -373,7 +381,6 @@ static inline int max8660_pdata_from_dt(struct device *dev,
 static int max8660_probe(struct i2c_client *client,
                                   const struct i2c_device_id *i2c_id)
 {
-       struct regulator_dev **rdev;
        struct device *dev = &client->dev;
        struct max8660_platform_data *pdata = dev_get_platdata(dev);
        struct regulator_config config = { };
@@ -406,14 +413,11 @@ static int max8660_probe(struct i2c_client *client,
                return -EINVAL;
        }
 
-       max8660 = devm_kzalloc(dev, sizeof(struct max8660) +
-                       sizeof(struct regulator_dev *) * MAX8660_V_END,
-                       GFP_KERNEL);
+       max8660 = devm_kzalloc(dev, sizeof(struct max8660), GFP_KERNEL);
        if (!max8660)
                return -ENOMEM;
 
        max8660->client = client;
-       rdev = max8660->rdev;
 
        if (pdata->en34_is_high) {
                /* Simulate always on */
@@ -481,6 +485,7 @@ static int max8660_probe(struct i2c_client *client,
 
        /* Finally register devices */
        for (i = 0; i < pdata->num_subdevs; i++) {
+               struct regulator_dev *rdev;
 
                id = pdata->subdevs[i].id;
 
@@ -489,13 +494,13 @@ static int max8660_probe(struct i2c_client *client,
                config.of_node = of_node[i];
                config.driver_data = max8660;
 
-               rdev[i] = devm_regulator_register(&client->dev,
+               rdev = devm_regulator_register(&client->dev,
                                                  &max8660_reg[id], &config);
-               if (IS_ERR(rdev[i])) {
-                       ret = PTR_ERR(rdev[i]);
+               if (IS_ERR(rdev)) {
+                       ret = PTR_ERR(rdev);
                        dev_err(&client->dev, "failed to register %s\n",
                                max8660_reg[id].name);
-                       return PTR_ERR(rdev[i]);
+                       return PTR_ERR(rdev);
                }
        }
 
index 0c5fe6c6ac26eb22ba248c3b64bbe9c98129283b..9623e9e290bf91bb1b7a330946b6783e5bdc7707 100644 (file)
@@ -34,7 +34,6 @@
 
 struct max8907_regulator {
        struct regulator_desc desc[MAX8907_NUM_REGULATORS];
-       struct regulator_dev *rdev[MAX8907_NUM_REGULATORS];
 };
 
 #define REG_MBATT() \
@@ -231,7 +230,7 @@ static int max8907_regulator_parse_dt(struct platform_device *pdev)
        if (!np)
                return 0;
 
-       regulators = of_find_node_by_name(np, "regulators");
+       regulators = of_get_child_by_name(np, "regulators");
        if (!regulators) {
                dev_err(&pdev->dev, "regulators node not found\n");
                return -EINVAL;
@@ -292,10 +291,9 @@ static int max8907_regulator_probe(struct platform_device *pdev)
                return ret;
 
        pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
-       if (!pmic) {
-               dev_err(&pdev->dev, "Failed to alloc pmic\n");
+       if (!pmic)
                return -ENOMEM;
-       }
+
        platform_set_drvdata(pdev, pmic);
 
        memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc));
@@ -311,6 +309,8 @@ static int max8907_regulator_probe(struct platform_device *pdev)
        }
 
        for (i = 0; i < MAX8907_NUM_REGULATORS; i++) {
+               struct regulator_dev *rdev;
+
                config.dev = pdev->dev.parent;
                if (pdata)
                        idata = pdata->init_data[i];
@@ -350,13 +350,13 @@ static int max8907_regulator_probe(struct platform_device *pdev)
                                pmic->desc[i].ops = &max8907_out5v_hwctl_ops;
                }
 
-               pmic->rdev[i] = devm_regulator_register(&pdev->dev,
+               rdev = devm_regulator_register(&pdev->dev,
                                                &pmic->desc[i], &config);
-               if (IS_ERR(pmic->rdev[i])) {
+               if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev,
                                "failed to register %s regulator\n",
                                pmic->desc[i].name);
-                       return PTR_ERR(pmic->rdev[i]);
+                       return PTR_ERR(rdev);
                }
        }
 
index 759510789e71db39b0b287f9849cea88827d50c4..dad2bcd14e962759d9f5e7b9ae579f6c1369b325 100644 (file)
@@ -36,9 +36,7 @@
 
 struct max8925_regulator_info {
        struct regulator_desc   desc;
-       struct regulator_dev    *regulator;
        struct i2c_client       *i2c;
-       struct max8925_chip     *chip;
 
        int     vol_reg;
        int     enable_reg;
@@ -251,10 +249,11 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
 {
        struct device_node *nproot, *np;
        int rcount;
+
        nproot = of_node_get(pdev->dev.parent->of_node);
        if (!nproot)
                return -ENODEV;
-       np = of_find_node_by_name(nproot, "regulators");
+       np = of_get_child_by_name(nproot, "regulators");
        if (!np) {
                dev_err(&pdev->dev, "failed to find regulators node\n");
                return -ENODEV;
@@ -264,7 +263,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
                                &max8925_regulator_matches[ridx], 1);
        of_node_put(np);
        if (rcount < 0)
-               return -ENODEV;
+               return rcount;
        config->init_data =     max8925_regulator_matches[ridx].init_data;
        config->of_node = max8925_regulator_matches[ridx].of_node;
 
@@ -303,7 +302,6 @@ static int max8925_regulator_probe(struct platform_device *pdev)
                return -EINVAL;
        }
        ri->i2c = chip->i2c;
-       ri->chip = chip;
 
        config.dev = &pdev->dev;
        config.driver_data = ri;
index 788e5ae2af1b51464b16dca99c4d190a067cc67a..d920f5a32ec8e935bd5288b571dd9adb3076c319 100644 (file)
@@ -48,9 +48,7 @@ enum {
 
 struct max8952_data {
        struct i2c_client       *client;
-       struct device           *dev;
        struct max8952_platform_data *pdata;
-       struct regulator_dev    *rdev;
 
        bool vid0;
        bool vid1;
@@ -59,6 +57,7 @@ struct max8952_data {
 static int max8952_read_reg(struct max8952_data *max8952, u8 reg)
 {
        int ret = i2c_smbus_read_byte_data(max8952->client, reg);
+
        if (ret > 0)
                ret &= 0xff;
 
@@ -144,10 +143,8 @@ static struct max8952_platform_data *max8952_parse_dt(struct device *dev)
        int i;
 
        pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
-       if (!pd) {
-               dev_err(dev, "Failed to allocate platform data\n");
+       if (!pd)
                return NULL;
-       }
 
        pd->gpio_vid0 = of_get_named_gpio(np, "max8952,vid-gpios", 0);
        pd->gpio_vid1 = of_get_named_gpio(np, "max8952,vid-gpios", 1);
@@ -199,6 +196,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
        struct max8952_platform_data *pdata = dev_get_platdata(&client->dev);
        struct regulator_config config = { };
        struct max8952_data *max8952;
+       struct regulator_dev *rdev;
 
        int ret = 0, err = 0;
 
@@ -219,10 +217,9 @@ static int max8952_pmic_probe(struct i2c_client *client,
                return -ENOMEM;
 
        max8952->client = client;
-       max8952->dev = &client->dev;
        max8952->pdata = pdata;
 
-       config.dev = max8952->dev;
+       config.dev = &client->dev;
        config.init_data = pdata->reg_data;
        config.driver_data = max8952;
        config.of_node = client->dev.of_node;
@@ -231,11 +228,11 @@ static int max8952_pmic_probe(struct i2c_client *client,
        if (pdata->reg_data->constraints.boot_on)
                config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
 
-       max8952->rdev = regulator_register(&regulator, &config);
+       rdev = devm_regulator_register(&client->dev, &regulator, &config);
 
-       if (IS_ERR(max8952->rdev)) {
-               ret = PTR_ERR(max8952->rdev);
-               dev_err(max8952->dev, "regulator init failed (%d)\n", ret);
+       if (IS_ERR(rdev)) {
+               ret = PTR_ERR(rdev);
+               dev_err(&client->dev, "regulator init failed (%d)\n", ret);
                return ret;
        }
 
@@ -263,7 +260,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
                err = 3;
 
        if (err) {
-               dev_warn(max8952->dev, "VID0/1 gpio invalid: "
+               dev_warn(&client->dev, "VID0/1 gpio invalid: "
                                "DVS not available.\n");
                max8952->vid0 = 0;
                max8952->vid1 = 0;
@@ -274,7 +271,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
                /* Disable Pulldown of EN only */
                max8952_write_reg(max8952, MAX8952_REG_CONTROL, 0x60);
 
-               dev_err(max8952->dev, "DVS modes disabled because VID0 and VID1"
+               dev_err(&client->dev, "DVS modes disabled because VID0 and VID1"
                                " do not have proper controls.\n");
        } else {
                /*
@@ -321,9 +318,6 @@ static int max8952_pmic_remove(struct i2c_client *client)
 {
        struct max8952_data *max8952 = i2c_get_clientdata(client);
        struct max8952_platform_data *pdata = max8952->pdata;
-       struct regulator_dev *rdev = max8952->rdev;
-
-       regulator_unregister(rdev);
 
        gpio_free(pdata->gpio_vid0);
        gpio_free(pdata->gpio_vid1);
index 892aa1e5b96c65dc2d879be20b3bb97b290af67a..dbedf1768db0268f4f07f168bb89eb36f0c74429 100644 (file)
@@ -93,7 +93,6 @@
 struct max8973_chip {
        struct device *dev;
        struct regulator_desc desc;
-       struct regulator_dev *rdev;
        struct regmap *regmap;
        bool enable_external_control;
        int dvs_gpio;
@@ -379,10 +378,8 @@ static int max8973_probe(struct i2c_client *client,
        }
 
        max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL);
-       if (!max) {
-               dev_err(&client->dev, "Memory allocation for max failed\n");
+       if (!max)
                return -ENOMEM;
-       }
 
        max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config);
        if (IS_ERR(max->regmap)) {
@@ -474,7 +471,6 @@ static int max8973_probe(struct i2c_client *client,
                return ret;
        }
 
-       max->rdev = rdev;
        return 0;
 }
 
index 2d618fc9c1af51da55ebce43be2e7c62db8c3eab..90b4c530dee530b7255b21d238929d744de5abcd 100644 (file)
@@ -38,7 +38,6 @@ struct max8997_data {
        struct device *dev;
        struct max8997_dev *iodev;
        int num_regulators;
-       struct regulator_dev **rdev;
        int ramp_delay; /* in mV/us */
 
        bool buck1_gpiodvs;
@@ -924,7 +923,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
                return -ENODEV;
        }
 
-       regulators_np = of_find_node_by_name(pmic_np, "regulators");
+       regulators_np = of_get_child_by_name(pmic_np, "regulators");
        if (!regulators_np) {
                dev_err(&pdev->dev, "could not find regulators sub-node\n");
                return -EINVAL;
@@ -937,7 +936,6 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
                                pdata->num_regulators, GFP_KERNEL);
        if (!rdata) {
                of_node_put(regulators_np);
-               dev_err(&pdev->dev, "could not allocate memory for regulator data\n");
                return -ENOMEM;
        }
 
@@ -1030,10 +1028,10 @@ static int max8997_pmic_probe(struct platform_device *pdev)
        struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct max8997_platform_data *pdata = iodev->pdata;
        struct regulator_config config = { };
-       struct regulator_dev **rdev;
+       struct regulator_dev *rdev;
        struct max8997_data *max8997;
        struct i2c_client *i2c;
-       int i, ret, size, nr_dvs;
+       int i, ret, nr_dvs;
        u8 max_buck1 = 0, max_buck2 = 0, max_buck5 = 0;
 
        if (!pdata) {
@@ -1052,12 +1050,6 @@ static int max8997_pmic_probe(struct platform_device *pdev)
        if (!max8997)
                return -ENOMEM;
 
-       size = sizeof(struct regulator_dev *) * pdata->num_regulators;
-       max8997->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-       if (!max8997->rdev)
-               return -ENOMEM;
-
-       rdev = max8997->rdev;
        max8997->dev = &pdev->dev;
        max8997->iodev = iodev;
        max8997->num_regulators = pdata->num_regulators;
@@ -1205,12 +1197,12 @@ static int max8997_pmic_probe(struct platform_device *pdev)
                config.driver_data = max8997;
                config.of_node = pdata->regulators[i].reg_node;
 
-               rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
-                                                 &config);
-               if (IS_ERR(rdev[i])) {
+               rdev = devm_regulator_register(&pdev->dev, &regulators[id],
+                                              &config);
+               if (IS_ERR(rdev)) {
                        dev_err(max8997->dev, "regulator init failed for %d\n",
                                        id);
-                       return PTR_ERR(rdev[i]);
+                       return PTR_ERR(rdev);
                }
        }
 
index ae3f0656feb00c406ca438642c051a6453ab9171..961091b46557a40cc4e20029c9056afd4a34e80b 100644 (file)
@@ -40,7 +40,6 @@ struct max8998_data {
        struct device           *dev;
        struct max8998_dev      *iodev;
        int                     num_regulators;
-       struct regulator_dev    **rdev;
        u8                      buck1_vol[4]; /* voltages for selection */
        u8                      buck2_vol[2];
        unsigned int            buck1_idx; /* index to last changed voltage */
@@ -674,8 +673,10 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
 
        rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
                                pdata->num_regulators, GFP_KERNEL);
-       if (!rdata)
+       if (!rdata) {
+               of_node_put(regulators_np);
                return -ENOMEM;
+       }
 
        pdata->regulators = rdata;
        for (i = 0; i < ARRAY_SIZE(regulators); ++i) {
@@ -692,6 +693,9 @@ static int max8998_pmic_dt_parse_pdata(struct max8998_dev *iodev,
        }
        pdata->num_regulators = rdata - pdata->regulators;
 
+       of_node_put(reg_np);
+       of_node_put(regulators_np);
+
        ret = max8998_pmic_dt_parse_dvs_gpio(iodev, pdata, pmic_np);
        if (ret)
                return -EINVAL;
@@ -741,10 +745,10 @@ static int max8998_pmic_probe(struct platform_device *pdev)
        struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        struct max8998_platform_data *pdata = iodev->pdata;
        struct regulator_config config = { };
-       struct regulator_dev **rdev;
+       struct regulator_dev *rdev;
        struct max8998_data *max8998;
        struct i2c_client *i2c;
-       int i, ret, size;
+       int i, ret;
        unsigned int v;
 
        if (!pdata) {
@@ -763,12 +767,6 @@ static int max8998_pmic_probe(struct platform_device *pdev)
        if (!max8998)
                return -ENOMEM;
 
-       size = sizeof(struct regulator_dev *) * pdata->num_regulators;
-       max8998->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-       if (!max8998->rdev)
-               return -ENOMEM;
-
-       rdev = max8998->rdev;
        max8998->dev = &pdev->dev;
        max8998->iodev = iodev;
        max8998->num_regulators = pdata->num_regulators;
@@ -872,13 +870,12 @@ static int max8998_pmic_probe(struct platform_device *pdev)
                config.init_data = pdata->regulators[i].initdata;
                config.driver_data = max8998;
 
-               rdev[i] = devm_regulator_register(&pdev->dev,
-                                                 &regulators[index], &config);
-               if (IS_ERR(rdev[i])) {
-                       ret = PTR_ERR(rdev[i]);
+               rdev = devm_regulator_register(&pdev->dev, &regulators[index],
+                                              &config);
+               if (IS_ERR(rdev)) {
+                       ret = PTR_ERR(rdev);
                        dev_err(max8998->dev, "regulator %s init failed (%d)\n",
                                                regulators[index].name, ret);
-                       rdev[i] = NULL;
                        return ret;
                }
        }
index da48592823020af8a10b025816a74aaea0226758..05b971726ffaa0700e9147c912c35a83ebef9fc6 100644 (file)
@@ -167,8 +167,10 @@ int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
        struct device_node *parent;
        int num;
 
-       of_node_get(pdev->dev.parent->of_node);
-       parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+       if (!pdev->dev.parent->of_node)
+               return -ENODEV;
+
+       parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
        if (!parent)
                return -ENODEV;
 
@@ -187,8 +189,10 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
        struct device_node *parent, *child;
        int i, parsed = 0;
 
-       of_node_get(pdev->dev.parent->of_node);
-       parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+       if (!pdev->dev.parent->of_node)
+               return NULL;
+
+       parent = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
        if (!parent)
                return NULL;
 
index ab174f20ca11f473c889848b1d7e6b7545a9e3ef..67e678c4301c4d1e6dd0d4b4fc5b6238231557cd 100644 (file)
@@ -56,6 +56,8 @@
 #define PFUZE100_VGEN5VOL      0x70
 #define PFUZE100_VGEN6VOL      0x71
 
+enum chips { PFUZE100, PFUZE200 };
+
 struct pfuze_regulator {
        struct regulator_desc desc;
        unsigned char stby_reg;
@@ -63,6 +65,7 @@ struct pfuze_regulator {
 };
 
 struct pfuze_chip {
+       int     chip_id;
        struct regmap *regmap;
        struct device *dev;
        struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
@@ -78,21 +81,23 @@ static const int pfuze100_vsnvs[] = {
 };
 
 static const struct i2c_device_id pfuze_device_id[] = {
-       {.name = "pfuze100"},
-       {},
+       {.name = "pfuze100", .driver_data = PFUZE100},
+       {.name = "pfuze200", .driver_data = PFUZE200},
+       { }
 };
 MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
 
 static const struct of_device_id pfuze_dt_ids[] = {
-       { .compatible = "fsl,pfuze100" },
-       {},
+       { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
+       { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
+       { }
 };
 MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
 
 static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
 {
        struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
-       int id = rdev->desc->id;
+       int id = rdev_get_id(rdev);
        unsigned int ramp_bits;
        int ret;
 
@@ -139,14 +144,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
 
 };
 
-#define PFUZE100_FIXED_REG(_name, base, voltage)       \
-       [PFUZE100_ ## _name] = {        \
+#define PFUZE100_FIXED_REG(_chip, _name, base, voltage)        \
+       [_chip ## _ ## _name] = {       \
                .desc = {       \
                        .name = #_name, \
                        .n_voltages = 1,        \
                        .ops = &pfuze100_fixed_regulator_ops,   \
                        .type = REGULATOR_VOLTAGE,      \
-                       .id = PFUZE100_ ## _name,       \
+                       .id = _chip ## _ ## _name,      \
                        .owner = THIS_MODULE,   \
                        .min_uV = (voltage),    \
                        .enable_reg = (base),   \
@@ -154,14 +159,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
                },      \
        }
 
-#define PFUZE100_SW_REG(_name, base, min, max, step)   \
-       [PFUZE100_ ## _name] = {        \
+#define PFUZE100_SW_REG(_chip, _name, base, min, max, step)    \
+       [_chip ## _ ## _name] = {       \
                .desc = {       \
                        .name = #_name,\
                        .n_voltages = ((max) - (min)) / (step) + 1,     \
                        .ops = &pfuze100_sw_regulator_ops,      \
                        .type = REGULATOR_VOLTAGE,      \
-                       .id = PFUZE100_ ## _name,       \
+                       .id = _chip ## _ ## _name,      \
                        .owner = THIS_MODULE,   \
                        .min_uV = (min),        \
                        .uV_step = (step),      \
@@ -172,14 +177,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
                .stby_mask = 0x3f,      \
        }
 
-#define PFUZE100_SWB_REG(_name, base, mask, voltages)  \
-       [PFUZE100_ ## _name] = {        \
+#define PFUZE100_SWB_REG(_chip, _name, base, mask, voltages)   \
+       [_chip ## _ ##  _name] = {      \
                .desc = {       \
                        .name = #_name, \
                        .n_voltages = ARRAY_SIZE(voltages),     \
                        .ops = &pfuze100_swb_regulator_ops,     \
                        .type = REGULATOR_VOLTAGE,      \
-                       .id = PFUZE100_ ## _name,       \
+                       .id = _chip ## _ ## _name,      \
                        .owner = THIS_MODULE,   \
                        .volt_table = voltages, \
                        .vsel_reg = (base),     \
@@ -187,14 +192,14 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
                },      \
        }
 
-#define PFUZE100_VGEN_REG(_name, base, min, max, step) \
-       [PFUZE100_ ## _name] = {        \
+#define PFUZE100_VGEN_REG(_chip, _name, base, min, max, step)  \
+       [_chip ## _ ## _name] = {       \
                .desc = {       \
                        .name = #_name, \
                        .n_voltages = ((max) - (min)) / (step) + 1,     \
                        .ops = &pfuze100_ldo_regulator_ops,     \
                        .type = REGULATOR_VOLTAGE,      \
-                       .id = PFUZE100_ ## _name,       \
+                       .id = _chip ## _ ## _name,      \
                        .owner = THIS_MODULE,   \
                        .min_uV = (min),        \
                        .uV_step = (step),      \
@@ -207,25 +212,45 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
                .stby_mask = 0x20,      \
        }
 
+/* PFUZE100 */
 static struct pfuze_regulator pfuze100_regulators[] = {
-       PFUZE100_SW_REG(SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
-       PFUZE100_SW_REG(SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
-       PFUZE100_SW_REG(SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
-       PFUZE100_SW_REG(SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
-       PFUZE100_SW_REG(SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
-       PFUZE100_SW_REG(SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
-       PFUZE100_SWB_REG(SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
-       PFUZE100_SWB_REG(VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
-       PFUZE100_FIXED_REG(VREFDDR, PFUZE100_VREFDDRCON, 750000),
-       PFUZE100_VGEN_REG(VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
-       PFUZE100_VGEN_REG(VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
-       PFUZE100_VGEN_REG(VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
-       PFUZE100_VGEN_REG(VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
-       PFUZE100_VGEN_REG(VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
-       PFUZE100_VGEN_REG(VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+       PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+       PFUZE100_SW_REG(PFUZE100, SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
+       PFUZE100_SW_REG(PFUZE100, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(PFUZE100, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(PFUZE100, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(PFUZE100, SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
+       PFUZE100_SWB_REG(PFUZE100, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+       PFUZE100_SWB_REG(PFUZE100, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+       PFUZE100_FIXED_REG(PFUZE100, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+       PFUZE100_VGEN_REG(PFUZE100, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+       PFUZE100_VGEN_REG(PFUZE100, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+       PFUZE100_VGEN_REG(PFUZE100, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE100, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE100, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE100, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
+static struct pfuze_regulator pfuze200_regulators[] = {
+       PFUZE100_SW_REG(PFUZE200, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+       PFUZE100_SW_REG(PFUZE200, SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(PFUZE200, SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+       PFUZE100_SW_REG(PFUZE200, SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+       PFUZE100_SWB_REG(PFUZE200, SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+       PFUZE100_SWB_REG(PFUZE200, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+       PFUZE100_FIXED_REG(PFUZE200, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+       PFUZE100_VGEN_REG(PFUZE200, VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+       PFUZE100_VGEN_REG(PFUZE200, VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+       PFUZE100_VGEN_REG(PFUZE200, VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE200, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE200, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
 };
 
+static struct pfuze_regulator *pfuze_regulators;
+
 #ifdef CONFIG_OF
+/* PFUZE100 */
 static struct of_regulator_match pfuze100_matches[] = {
        { .name = "sw1ab",      },
        { .name = "sw1c",       },
@@ -244,24 +269,56 @@ static struct of_regulator_match pfuze100_matches[] = {
        { .name = "vgen6",      },
 };
 
+/* PFUZE200 */
+static struct of_regulator_match pfuze200_matches[] = {
+
+       { .name = "sw1ab",      },
+       { .name = "sw2",        },
+       { .name = "sw3a",       },
+       { .name = "sw3b",       },
+       { .name = "swbst",      },
+       { .name = "vsnvs",      },
+       { .name = "vrefddr",    },
+       { .name = "vgen1",      },
+       { .name = "vgen2",      },
+       { .name = "vgen3",      },
+       { .name = "vgen4",      },
+       { .name = "vgen5",      },
+       { .name = "vgen6",      },
+};
+
+static struct of_regulator_match *pfuze_matches;
+
 static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
 {
        struct device *dev = chip->dev;
        struct device_node *np, *parent;
        int ret;
 
-       np = of_node_get(dev->parent->of_node);
+       np = of_node_get(dev->of_node);
        if (!np)
-               return 0;
+               return -EINVAL;
 
-       parent = of_find_node_by_name(np, "regulators");
+       parent = of_get_child_by_name(np, "regulators");
        if (!parent) {
                dev_err(dev, "regulators node not found\n");
                return -EINVAL;
        }
 
-       ret = of_regulator_match(dev, parent, pfuze100_matches,
-                                ARRAY_SIZE(pfuze100_matches));
+       switch (chip->chip_id) {
+       case PFUZE200:
+               pfuze_matches = pfuze200_matches;
+               ret = of_regulator_match(dev, parent, pfuze200_matches,
+                                        ARRAY_SIZE(pfuze200_matches));
+               break;
+
+       case PFUZE100:
+       default:
+               pfuze_matches = pfuze100_matches;
+               ret = of_regulator_match(dev, parent, pfuze100_matches,
+                                        ARRAY_SIZE(pfuze100_matches));
+               break;
+       }
 
        of_node_put(parent);
        if (ret < 0) {
@@ -275,12 +332,12 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
 
 static inline struct regulator_init_data *match_init_data(int index)
 {
-       return pfuze100_matches[index].init_data;
+       return pfuze_matches[index].init_data;
 }
 
 static inline struct device_node *match_of_node(int index)
 {
-       return pfuze100_matches[index].of_node;
+       return pfuze_matches[index].of_node;
 }
 #else
 static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -308,16 +365,14 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
        if (ret)
                return ret;
 
-       switch (value & 0x0f) {
-       /*
-        * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013
-        * as ID=8
-        */
-       case 0x8:
+       if (((value & 0x0f) == 0x8) && (pfuze_chip->chip_id == PFUZE100)) {
+               /*
+                * Freescale misprogrammed 1-3% of parts prior to week 8 of 2013
+                * as ID=8 in PFUZE100
+                */
                dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
-       case 0x0:
-               break;
-       default:
+       } else if ((value & 0x0f) != pfuze_chip->chip_id) {
+               /* device id NOT match with your setting */
                dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
                return -ENODEV;
        }
@@ -353,17 +408,31 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
            dev_get_platdata(&client->dev);
        struct regulator_config config = { };
        int i, ret;
+       const struct of_device_id *match;
+       u32 regulator_num;
+       u32 sw_check_start, sw_check_end;
 
        pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
                        GFP_KERNEL);
        if (!pfuze_chip)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, pfuze_chip);
-
-       memcpy(pfuze_chip->regulator_descs, pfuze100_regulators,
-               sizeof(pfuze_chip->regulator_descs));
+       if (client->dev.of_node) {
+               match = of_match_device(of_match_ptr(pfuze_dt_ids),
+                               &client->dev);
+               if (!match) {
+                       dev_err(&client->dev, "Error: No device match found\n");
+                       return -ENODEV;
+               }
+               pfuze_chip->chip_id = (int)(long)match->data;
+       } else if (id) {
+               pfuze_chip->chip_id = id->driver_data;
+       } else {
+               dev_err(&client->dev, "No dts match or id table match found\n");
+               return -ENODEV;
+       }
 
+       i2c_set_clientdata(client, pfuze_chip);
        pfuze_chip->dev = &client->dev;
 
        pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config);
@@ -380,11 +449,34 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                return ret;
        }
 
+       /* use the right regulators after identify the right device */
+       switch (pfuze_chip->chip_id) {
+       case PFUZE200:
+               pfuze_regulators = pfuze200_regulators;
+               regulator_num = ARRAY_SIZE(pfuze200_regulators);
+               sw_check_start = PFUZE200_SW2;
+               sw_check_end = PFUZE200_SW3B;
+               break;
+
+       case PFUZE100:
+       default:
+               pfuze_regulators = pfuze100_regulators;
+               regulator_num = ARRAY_SIZE(pfuze100_regulators);
+               sw_check_start = PFUZE100_SW2;
+               sw_check_end = PFUZE100_SW4;
+               break;
+       }
+       dev_info(&client->dev, "pfuze%s found.\n",
+               (pfuze_chip->chip_id == PFUZE100) ? "100" : "200");
+
+       memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
+               sizeof(pfuze_chip->regulator_descs));
+
        ret = pfuze_parse_regulators_dt(pfuze_chip);
        if (ret)
                return ret;
 
-       for (i = 0; i < PFUZE100_MAX_REGULATOR; i++) {
+       for (i = 0; i < regulator_num; i++) {
                struct regulator_init_data *init_data;
                struct regulator_desc *desc;
                int val;
@@ -397,7 +489,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                        init_data = match_init_data(i);
 
                /* SW2~SW4 high bit check and modify the voltage value table */
-               if (i > PFUZE100_SW1C && i < PFUZE100_SWBST) {
+               if (i >= sw_check_start && i <= sw_check_end) {
                        regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
                        if (val & 0x40) {
                                desc->min_uV = 800000;
@@ -415,7 +507,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                        devm_regulator_register(&client->dev, desc, &config);
                if (IS_ERR(pfuze_chip->regulators[i])) {
                        dev_err(&client->dev, "register regulator%s failed\n",
-                               pfuze100_regulators[i].desc.name);
+                               pfuze_regulators[i].desc.name);
                        return PTR_ERR(pfuze_chip->regulators[i]);
                }
        }
@@ -435,6 +527,6 @@ static struct i2c_driver pfuze_driver = {
 module_i2c_driver(pfuze_driver);
 
 MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
-MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100 PMIC");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("i2c:pfuze100-regulator");
index b58affb33143903d9044a8de2a614a8f20ee0e5f..4c414ae109ae5eb6edc44094e0c608a325a9cb63 100644 (file)
@@ -119,7 +119,6 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
 {
        struct rc5t583 *rc5t583 = dev_get_drvdata(pdev->dev.parent);
        struct rc5t583_platform_data *pdata = dev_get_platdata(rc5t583->dev);
-       struct regulator_init_data *reg_data;
        struct regulator_config config = { };
        struct rc5t583_regulator *reg = NULL;
        struct rc5t583_regulator *regs;
@@ -135,19 +134,11 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
 
        regs = devm_kzalloc(&pdev->dev, RC5T583_REGULATOR_MAX *
                        sizeof(struct rc5t583_regulator), GFP_KERNEL);
-       if (!regs) {
-               dev_err(&pdev->dev, "Memory allocation failed exiting..\n");
+       if (!regs)
                return -ENOMEM;
-       }
 
 
        for (id = 0; id < RC5T583_REGULATOR_MAX; ++id) {
-               reg_data = pdata->reg_init_data[id];
-
-               /* No need to register if there is no regulator data */
-               if (!reg_data)
-                       continue;
-
                reg = &regs[id];
                ri = &rc5t583_reg_info[id];
                reg->reg_info = ri;
@@ -169,7 +160,7 @@ static int rc5t583_regulator_probe(struct platform_device *pdev)
 
 skip_ext_pwr_config:
                config.dev = &pdev->dev;
-               config.init_data = reg_data;
+               config.init_data = pdata->reg_init_data[id];
                config.driver_data = reg;
                config.regmap = rc5t583->regmap;
 
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
new file mode 100644 (file)
index 0000000..808b3aa
--- /dev/null
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ *             http://www.samsung.com
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mpa01.h>
+
+#define S2MPA01_REGULATOR_CNT ARRAY_SIZE(regulators)
+
+struct s2mpa01_info {
+       int ramp_delay24;
+       int ramp_delay3;
+       int ramp_delay5;
+       int ramp_delay16;
+       int ramp_delay7;
+       int ramp_delay8910;
+};
+
+static int get_ramp_delay(int ramp_delay)
+{
+       unsigned char cnt = 0;
+
+       ramp_delay /= 6250;
+
+       while (true) {
+               ramp_delay = ramp_delay >> 1;
+               if (ramp_delay == 0)
+                       break;
+               cnt++;
+       }
+
+       if (cnt > 3)
+               cnt = 3;
+
+       return cnt;
+}
+
+static int s2mpa01_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+                                  unsigned int old_selector,
+                                  unsigned int new_selector)
+{
+       struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
+       unsigned int ramp_delay = 0;
+       int old_volt, new_volt;
+
+       switch (rdev->desc->id) {
+       case S2MPA01_BUCK2:
+       case S2MPA01_BUCK4:
+               ramp_delay = s2mpa01->ramp_delay24;
+               break;
+       case S2MPA01_BUCK3:
+               ramp_delay = s2mpa01->ramp_delay3;
+               break;
+       case S2MPA01_BUCK5:
+               ramp_delay = s2mpa01->ramp_delay5;
+               break;
+       case S2MPA01_BUCK1:
+       case S2MPA01_BUCK6:
+               ramp_delay = s2mpa01->ramp_delay16;
+               break;
+       case S2MPA01_BUCK7:
+               ramp_delay = s2mpa01->ramp_delay7;
+               break;
+       case S2MPA01_BUCK8:
+       case S2MPA01_BUCK9:
+       case S2MPA01_BUCK10:
+               ramp_delay = s2mpa01->ramp_delay8910;
+               break;
+       }
+
+       if (ramp_delay == 0)
+               ramp_delay = rdev->desc->ramp_delay;
+
+       old_volt = rdev->desc->min_uV + (rdev->desc->uV_step * old_selector);
+       new_volt = rdev->desc->min_uV + (rdev->desc->uV_step * new_selector);
+
+       return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+}
+
+static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+       struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev);
+       unsigned int ramp_val, ramp_shift, ramp_reg = S2MPA01_REG_RAMP2;
+       unsigned int ramp_enable = 1, enable_shift = 0;
+       int ret;
+
+       switch (rdev->desc->id) {
+       case S2MPA01_BUCK1:
+               enable_shift = S2MPA01_BUCK1_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               if (ramp_delay > s2mpa01->ramp_delay16)
+                       s2mpa01->ramp_delay16 = ramp_delay;
+               else
+                       ramp_delay = s2mpa01->ramp_delay16;
+
+               ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
+               ramp_reg = S2MPA01_REG_RAMP1;
+               break;
+       case S2MPA01_BUCK2:
+               enable_shift = S2MPA01_BUCK2_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               if (ramp_delay > s2mpa01->ramp_delay24)
+                       s2mpa01->ramp_delay24 = ramp_delay;
+               else
+                       ramp_delay = s2mpa01->ramp_delay24;
+
+               ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
+               ramp_reg = S2MPA01_REG_RAMP1;
+               break;
+       case S2MPA01_BUCK3:
+               enable_shift = S2MPA01_BUCK3_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               s2mpa01->ramp_delay3 = ramp_delay;
+               ramp_shift = S2MPA01_BUCK3_RAMP_SHIFT;
+               ramp_reg = S2MPA01_REG_RAMP1;
+               break;
+       case S2MPA01_BUCK4:
+               enable_shift = S2MPA01_BUCK4_RAMP_EN_SHIFT;
+               if (!ramp_delay) {
+                       ramp_enable = 0;
+                       break;
+               }
+
+               if (ramp_delay > s2mpa01->ramp_delay24)
+                       s2mpa01->ramp_delay24 = ramp_delay;
+               else
+                       ramp_delay = s2mpa01->ramp_delay24;
+
+               ramp_shift = S2MPA01_BUCK24_RAMP_SHIFT;
+               ramp_reg = S2MPA01_REG_RAMP1;
+               break;
+       case S2MPA01_BUCK5:
+               s2mpa01->ramp_delay5 = ramp_delay;
+               ramp_shift = S2MPA01_BUCK5_RAMP_SHIFT;
+               break;
+       case S2MPA01_BUCK6:
+               if (ramp_delay > s2mpa01->ramp_delay16)
+                       s2mpa01->ramp_delay16 = ramp_delay;
+               else
+                       ramp_delay = s2mpa01->ramp_delay16;
+
+               ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
+               break;
+       case S2MPA01_BUCK7:
+               s2mpa01->ramp_delay7 = ramp_delay;
+               ramp_shift = S2MPA01_BUCK7_RAMP_SHIFT;
+               break;
+       case S2MPA01_BUCK8:
+       case S2MPA01_BUCK9:
+       case S2MPA01_BUCK10:
+               if (ramp_delay > s2mpa01->ramp_delay8910)
+                       s2mpa01->ramp_delay8910 = ramp_delay;
+               else
+                       ramp_delay = s2mpa01->ramp_delay8910;
+
+               ramp_shift = S2MPA01_BUCK8910_RAMP_SHIFT;
+               break;
+       default:
+               return 0;
+       }
+
+       if (!ramp_enable)
+               goto ramp_disable;
+
+       if (enable_shift) {
+               ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+                                       1 << enable_shift, 1 << enable_shift);
+               if (ret) {
+                       dev_err(&rdev->dev, "failed to enable ramp rate\n");
+                       return ret;
+               }
+       }
+
+       ramp_val = get_ramp_delay(ramp_delay);
+
+       return regmap_update_bits(rdev->regmap, ramp_reg, 0x3 << ramp_shift,
+                                 ramp_val << ramp_shift);
+
+ramp_disable:
+       return regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+                                 1 << enable_shift, 0);
+}
+
+static struct regulator_ops s2mpa01_ldo_ops = {
+       .list_voltage           = regulator_list_voltage_linear,
+       .map_voltage            = regulator_map_voltage_linear,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .set_voltage_time_sel   = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops s2mpa01_buck_ops = {
+       .list_voltage           = regulator_list_voltage_linear,
+       .map_voltage            = regulator_map_voltage_linear,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .set_voltage_time_sel   = s2mpa01_regulator_set_voltage_time_sel,
+       .set_ramp_delay         = s2mpa01_set_ramp_delay,
+};
+
+#define regulator_desc_ldo1(num)       {               \
+       .name           = "LDO"#num,                    \
+       .id             = S2MPA01_LDO##num,             \
+       .ops            = &s2mpa01_ldo_ops,             \
+       .type           = REGULATOR_VOLTAGE,            \
+       .owner          = THIS_MODULE,                  \
+       .min_uV         = S2MPA01_LDO_MIN,              \
+       .uV_step        = S2MPA01_LDO_STEP1,            \
+       .n_voltages     = S2MPA01_LDO_N_VOLTAGES,       \
+       .vsel_reg       = S2MPA01_REG_L1CTRL + num - 1, \
+       .vsel_mask      = S2MPA01_LDO_VSEL_MASK,        \
+       .enable_reg     = S2MPA01_REG_L1CTRL + num - 1, \
+       .enable_mask    = S2MPA01_ENABLE_MASK           \
+}
+#define regulator_desc_ldo2(num)       {               \
+       .name           = "LDO"#num,                    \
+       .id             = S2MPA01_LDO##num,             \
+       .ops            = &s2mpa01_ldo_ops,             \
+       .type           = REGULATOR_VOLTAGE,            \
+       .owner          = THIS_MODULE,                  \
+       .min_uV         = S2MPA01_LDO_MIN,              \
+       .uV_step        = S2MPA01_LDO_STEP2,            \
+       .n_voltages     = S2MPA01_LDO_N_VOLTAGES,       \
+       .vsel_reg       = S2MPA01_REG_L1CTRL + num - 1, \
+       .vsel_mask      = S2MPA01_LDO_VSEL_MASK,        \
+       .enable_reg     = S2MPA01_REG_L1CTRL + num - 1, \
+       .enable_mask    = S2MPA01_ENABLE_MASK           \
+}
+
+#define regulator_desc_buck1_4(num)    {                       \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPA01_BUCK##num,                    \
+       .ops            = &s2mpa01_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPA01_BUCK_MIN1,                    \
+       .uV_step        = S2MPA01_BUCK_STEP1,                   \
+       .n_voltages     = S2MPA01_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPA01_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPA01_REG_B1CTRL2 + (num - 1) * 2,  \
+       .vsel_mask      = S2MPA01_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPA01_REG_B1CTRL1 + (num - 1) * 2,  \
+       .enable_mask    = S2MPA01_ENABLE_MASK                   \
+}
+
+#define regulator_desc_buck5   {                               \
+       .name           = "BUCK5",                              \
+       .id             = S2MPA01_BUCK5,                        \
+       .ops            = &s2mpa01_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPA01_BUCK_MIN2,                    \
+       .uV_step        = S2MPA01_BUCK_STEP1,                   \
+       .n_voltages     = S2MPA01_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPA01_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPA01_REG_B5CTRL2,                  \
+       .vsel_mask      = S2MPA01_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPA01_REG_B5CTRL1,                  \
+       .enable_mask    = S2MPA01_ENABLE_MASK                   \
+}
+
+#define regulator_desc_buck6_7(num)    {                       \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPA01_BUCK##num,                    \
+       .ops            = &s2mpa01_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPA01_BUCK_MIN1,                    \
+       .uV_step        = S2MPA01_BUCK_STEP1,                   \
+       .n_voltages     = S2MPA01_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPA01_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPA01_REG_B6CTRL2 + (num - 6) * 2,  \
+       .vsel_mask      = S2MPA01_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPA01_REG_B6CTRL1 + (num - 6) * 2,  \
+       .enable_mask    = S2MPA01_ENABLE_MASK                   \
+}
+
+#define regulator_desc_buck8   {                               \
+       .name           = "BUCK8",                              \
+       .id             = S2MPA01_BUCK8,                        \
+       .ops            = &s2mpa01_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPA01_BUCK_MIN2,                    \
+       .uV_step        = S2MPA01_BUCK_STEP2,                   \
+       .n_voltages     = S2MPA01_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPA01_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPA01_REG_B8CTRL2,                  \
+       .vsel_mask      = S2MPA01_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPA01_REG_B8CTRL1,                  \
+       .enable_mask    = S2MPA01_ENABLE_MASK                   \
+}
+
+#define regulator_desc_buck9   {                               \
+       .name           = "BUCK9",                              \
+       .id             = S2MPA01_BUCK9,                        \
+       .ops            = &s2mpa01_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPA01_BUCK_MIN4,                    \
+       .uV_step        = S2MPA01_BUCK_STEP2,                   \
+       .n_voltages     = S2MPA01_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPA01_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPA01_REG_B9CTRL2,                  \
+       .vsel_mask      = S2MPA01_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPA01_REG_B9CTRL1,                  \
+       .enable_mask    = S2MPA01_ENABLE_MASK                   \
+}
+
+#define regulator_desc_buck10  {                               \
+       .name           = "BUCK10",                             \
+       .id             = S2MPA01_BUCK10,                       \
+       .ops            = &s2mpa01_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPA01_BUCK_MIN3,                    \
+       .uV_step        = S2MPA01_BUCK_STEP2,                   \
+       .n_voltages     = S2MPA01_BUCK_N_VOLTAGES,              \
+       .ramp_delay     = S2MPA01_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPA01_REG_B10CTRL2,                 \
+       .vsel_mask      = S2MPA01_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPA01_REG_B10CTRL1,                 \
+       .enable_mask    = S2MPA01_ENABLE_MASK                   \
+}
+
+static struct regulator_desc regulators[] = {
+       regulator_desc_ldo2(1),
+       regulator_desc_ldo1(2),
+       regulator_desc_ldo1(3),
+       regulator_desc_ldo1(4),
+       regulator_desc_ldo1(5),
+       regulator_desc_ldo2(6),
+       regulator_desc_ldo1(7),
+       regulator_desc_ldo1(8),
+       regulator_desc_ldo1(9),
+       regulator_desc_ldo1(10),
+       regulator_desc_ldo2(11),
+       regulator_desc_ldo1(12),
+       regulator_desc_ldo1(13),
+       regulator_desc_ldo1(14),
+       regulator_desc_ldo1(15),
+       regulator_desc_ldo1(16),
+       regulator_desc_ldo1(17),
+       regulator_desc_ldo1(18),
+       regulator_desc_ldo1(19),
+       regulator_desc_ldo1(20),
+       regulator_desc_ldo1(21),
+       regulator_desc_ldo2(22),
+       regulator_desc_ldo2(23),
+       regulator_desc_ldo1(24),
+       regulator_desc_ldo1(25),
+       regulator_desc_ldo1(26),
+       regulator_desc_buck1_4(1),
+       regulator_desc_buck1_4(2),
+       regulator_desc_buck1_4(3),
+       regulator_desc_buck1_4(4),
+       regulator_desc_buck5,
+       regulator_desc_buck6_7(6),
+       regulator_desc_buck6_7(7),
+       regulator_desc_buck8,
+       regulator_desc_buck9,
+       regulator_desc_buck10,
+};
+
+static int s2mpa01_pmic_probe(struct platform_device *pdev)
+{
+       struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+       struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+       struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX];
+       struct device_node *reg_np = NULL;
+       struct regulator_config config = { };
+       struct s2mpa01_info *s2mpa01;
+       int i;
+
+       s2mpa01 = devm_kzalloc(&pdev->dev, sizeof(*s2mpa01), GFP_KERNEL);
+       if (!s2mpa01)
+               return -ENOMEM;
+
+       for (i = 0; i < S2MPA01_REGULATOR_CNT; i++)
+               rdata[i].name = regulators[i].name;
+
+       if (iodev->dev->of_node) {
+               reg_np = of_get_child_by_name(iodev->dev->of_node,
+                                                       "regulators");
+                       if (!reg_np) {
+                               dev_err(&pdev->dev,
+                                       "could not find regulators sub-node\n");
+                               return -EINVAL;
+                       }
+
+               of_regulator_match(&pdev->dev, reg_np, rdata,
+                                               S2MPA01_REGULATOR_MAX);
+               of_node_put(reg_np);
+       }
+
+       platform_set_drvdata(pdev, s2mpa01);
+
+       config.dev = &pdev->dev;
+       config.regmap = iodev->regmap_pmic;
+       config.driver_data = s2mpa01;
+
+       for (i = 0; i < S2MPA01_REGULATOR_MAX; i++) {
+               struct regulator_dev *rdev;
+               if (pdata)
+                       config.init_data = pdata->regulators[i].initdata;
+               else
+                       config.init_data = rdata[i].init_data;
+
+               if (reg_np)
+                       config.of_node = rdata[i].of_node;
+
+               rdev = devm_regulator_register(&pdev->dev,
+                                               &regulators[i], &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev, "regulator init failed for %d\n",
+                               i);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id s2mpa01_pmic_id[] = {
+       { "s2mpa01-pmic", 0},
+       { },
+};
+MODULE_DEVICE_TABLE(platform, s2mpa01_pmic_id);
+
+static struct platform_driver s2mpa01_pmic_driver = {
+       .driver = {
+               .name = "s2mpa01-pmic",
+               .owner = THIS_MODULE,
+       },
+       .probe = s2mpa01_pmic_probe,
+       .id_table = s2mpa01_pmic_id,
+};
+
+module_platform_driver(s2mpa01_pmic_driver);
+
+/* Module information */
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>");
+MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver");
+MODULE_LICENSE("GPL");
index cd0b9e35a56d90ccc4fd78fe5fe781f2f009cd77..68fd54702edbf12436fbd5f7b7ebd2a6cb68e7ef 100644 (file)
@@ -1,13 +1,18 @@
 /*
  * s2mps11.c
  *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * Copyright (c) 2012-2014 Samsung Electronics Co., Ltd
  *              http://www.samsung.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  *
  */
 
 #include <linux/regulator/of_regulator.h>
 #include <linux/mfd/samsung/core.h>
 #include <linux/mfd/samsung/s2mps11.h>
-
-#define S2MPS11_REGULATOR_CNT ARRAY_SIZE(regulators)
+#include <linux/mfd/samsung/s2mps14.h>
 
 struct s2mps11_info {
-       struct regulator_dev *rdev[S2MPS11_REGULATOR_MAX];
-
+       unsigned int rdev_num;
        int ramp_delay2;
        int ramp_delay34;
        int ramp_delay5;
        int ramp_delay16;
        int ramp_delay7810;
        int ramp_delay9;
+       /*
+        * One bit for each S2MPS14 regulator whether the suspend mode
+        * was enabled.
+        */
+       unsigned int s2mps14_suspend_state:30;
 };
 
 static int get_ramp_delay(int ramp_delay)
@@ -65,7 +73,7 @@ static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
        unsigned int ramp_delay = 0;
        int old_volt, new_volt;
 
-       switch (rdev->desc->id) {
+       switch (rdev_get_id(rdev)) {
        case S2MPS11_BUCK2:
                ramp_delay = s2mps11->ramp_delay2;
                break;
@@ -105,7 +113,7 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
        unsigned int ramp_enable = 1, enable_shift = 0;
        int ret;
 
-       switch (rdev->desc->id) {
+       switch (rdev_get_id(rdev)) {
        case S2MPS11_BUCK1:
                if (ramp_delay > s2mps11->ramp_delay16)
                        s2mps11->ramp_delay16 = ramp_delay;
@@ -236,7 +244,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .set_ramp_delay         = s2mps11_set_ramp_delay,
 };
 
-#define regulator_desc_ldo1(num)       {               \
+#define regulator_desc_s2mps11_ldo1(num)       {               \
        .name           = "LDO"#num,                    \
        .id             = S2MPS11_LDO##num,             \
        .ops            = &s2mps11_ldo_ops,             \
@@ -250,7 +258,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_reg     = S2MPS11_REG_L1CTRL + num - 1, \
        .enable_mask    = S2MPS11_ENABLE_MASK           \
 }
-#define regulator_desc_ldo2(num)       {               \
+#define regulator_desc_s2mps11_ldo2(num) {             \
        .name           = "LDO"#num,                    \
        .id             = S2MPS11_LDO##num,             \
        .ops            = &s2mps11_ldo_ops,             \
@@ -265,7 +273,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK           \
 }
 
-#define regulator_desc_buck1_4(num)    {                       \
+#define regulator_desc_s2mps11_buck1_4(num) {                  \
        .name           = "BUCK"#num,                           \
        .id             = S2MPS11_BUCK##num,                    \
        .ops            = &s2mps11_buck_ops,                    \
@@ -281,7 +289,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
-#define regulator_desc_buck5   {                               \
+#define regulator_desc_s2mps11_buck5 {                         \
        .name           = "BUCK5",                              \
        .id             = S2MPS11_BUCK5,                        \
        .ops            = &s2mps11_buck_ops,                    \
@@ -297,7 +305,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
-#define regulator_desc_buck6_8(num)    {                       \
+#define regulator_desc_s2mps11_buck6_8(num) {                  \
        .name           = "BUCK"#num,                           \
        .id             = S2MPS11_BUCK##num,                    \
        .ops            = &s2mps11_buck_ops,                    \
@@ -313,7 +321,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
-#define regulator_desc_buck9   {                               \
+#define regulator_desc_s2mps11_buck9 {                         \
        .name           = "BUCK9",                              \
        .id             = S2MPS11_BUCK9,                        \
        .ops            = &s2mps11_buck_ops,                    \
@@ -329,7 +337,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
-#define regulator_desc_buck10  {                               \
+#define regulator_desc_s2mps11_buck10 {                                \
        .name           = "BUCK10",                             \
        .id             = S2MPS11_BUCK10,                       \
        .ops            = &s2mps11_buck_ops,                    \
@@ -345,72 +353,252 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
-static struct regulator_desc regulators[] = {
-       regulator_desc_ldo2(1),
-       regulator_desc_ldo1(2),
-       regulator_desc_ldo1(3),
-       regulator_desc_ldo1(4),
-       regulator_desc_ldo1(5),
-       regulator_desc_ldo2(6),
-       regulator_desc_ldo1(7),
-       regulator_desc_ldo1(8),
-       regulator_desc_ldo1(9),
-       regulator_desc_ldo1(10),
-       regulator_desc_ldo2(11),
-       regulator_desc_ldo1(12),
-       regulator_desc_ldo1(13),
-       regulator_desc_ldo1(14),
-       regulator_desc_ldo1(15),
-       regulator_desc_ldo1(16),
-       regulator_desc_ldo1(17),
-       regulator_desc_ldo1(18),
-       regulator_desc_ldo1(19),
-       regulator_desc_ldo1(20),
-       regulator_desc_ldo1(21),
-       regulator_desc_ldo2(22),
-       regulator_desc_ldo2(23),
-       regulator_desc_ldo1(24),
-       regulator_desc_ldo1(25),
-       regulator_desc_ldo1(26),
-       regulator_desc_ldo2(27),
-       regulator_desc_ldo1(28),
-       regulator_desc_ldo1(29),
-       regulator_desc_ldo1(30),
-       regulator_desc_ldo1(31),
-       regulator_desc_ldo1(32),
-       regulator_desc_ldo1(33),
-       regulator_desc_ldo1(34),
-       regulator_desc_ldo1(35),
-       regulator_desc_ldo1(36),
-       regulator_desc_ldo1(37),
-       regulator_desc_ldo1(38),
-       regulator_desc_buck1_4(1),
-       regulator_desc_buck1_4(2),
-       regulator_desc_buck1_4(3),
-       regulator_desc_buck1_4(4),
-       regulator_desc_buck5,
-       regulator_desc_buck6_8(6),
-       regulator_desc_buck6_8(7),
-       regulator_desc_buck6_8(8),
-       regulator_desc_buck9,
-       regulator_desc_buck10,
+static const struct regulator_desc s2mps11_regulators[] = {
+       regulator_desc_s2mps11_ldo2(1),
+       regulator_desc_s2mps11_ldo1(2),
+       regulator_desc_s2mps11_ldo1(3),
+       regulator_desc_s2mps11_ldo1(4),
+       regulator_desc_s2mps11_ldo1(5),
+       regulator_desc_s2mps11_ldo2(6),
+       regulator_desc_s2mps11_ldo1(7),
+       regulator_desc_s2mps11_ldo1(8),
+       regulator_desc_s2mps11_ldo1(9),
+       regulator_desc_s2mps11_ldo1(10),
+       regulator_desc_s2mps11_ldo2(11),
+       regulator_desc_s2mps11_ldo1(12),
+       regulator_desc_s2mps11_ldo1(13),
+       regulator_desc_s2mps11_ldo1(14),
+       regulator_desc_s2mps11_ldo1(15),
+       regulator_desc_s2mps11_ldo1(16),
+       regulator_desc_s2mps11_ldo1(17),
+       regulator_desc_s2mps11_ldo1(18),
+       regulator_desc_s2mps11_ldo1(19),
+       regulator_desc_s2mps11_ldo1(20),
+       regulator_desc_s2mps11_ldo1(21),
+       regulator_desc_s2mps11_ldo2(22),
+       regulator_desc_s2mps11_ldo2(23),
+       regulator_desc_s2mps11_ldo1(24),
+       regulator_desc_s2mps11_ldo1(25),
+       regulator_desc_s2mps11_ldo1(26),
+       regulator_desc_s2mps11_ldo2(27),
+       regulator_desc_s2mps11_ldo1(28),
+       regulator_desc_s2mps11_ldo1(29),
+       regulator_desc_s2mps11_ldo1(30),
+       regulator_desc_s2mps11_ldo1(31),
+       regulator_desc_s2mps11_ldo1(32),
+       regulator_desc_s2mps11_ldo1(33),
+       regulator_desc_s2mps11_ldo1(34),
+       regulator_desc_s2mps11_ldo1(35),
+       regulator_desc_s2mps11_ldo1(36),
+       regulator_desc_s2mps11_ldo1(37),
+       regulator_desc_s2mps11_ldo1(38),
+       regulator_desc_s2mps11_buck1_4(1),
+       regulator_desc_s2mps11_buck1_4(2),
+       regulator_desc_s2mps11_buck1_4(3),
+       regulator_desc_s2mps11_buck1_4(4),
+       regulator_desc_s2mps11_buck5,
+       regulator_desc_s2mps11_buck6_8(6),
+       regulator_desc_s2mps11_buck6_8(7),
+       regulator_desc_s2mps11_buck6_8(8),
+       regulator_desc_s2mps11_buck9,
+       regulator_desc_s2mps11_buck10,
+};
+
+static int s2mps14_regulator_enable(struct regulator_dev *rdev)
+{
+       struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+       unsigned int val;
+
+       if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+               val = S2MPS14_ENABLE_SUSPEND;
+       else
+               val = rdev->desc->enable_mask;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+                       rdev->desc->enable_mask, val);
+}
+
+static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
+{
+       int ret;
+       unsigned int val;
+       struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+
+       /* LDO3 should be always on and does not support suspend mode */
+       if (rdev_get_id(rdev) == S2MPS14_LDO3)
+               return 0;
+
+       ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+       if (ret < 0)
+               return ret;
+
+       s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+       /*
+        * Don't enable suspend mode if regulator is already disabled because
+        * this would effectively for a short time turn on the regulator after
+        * resuming.
+        * However we still want to toggle the suspend_state bit for regulator
+        * in case if it got enabled before suspending the system.
+        */
+       if (!(val & rdev->desc->enable_mask))
+               return 0;
+
+       return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+                       rdev->desc->enable_mask, S2MPS14_ENABLE_SUSPEND);
+}
+
+static struct regulator_ops s2mps14_reg_ops = {
+       .list_voltage           = regulator_list_voltage_linear,
+       .map_voltage            = regulator_map_voltage_linear,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = s2mps14_regulator_enable,
+       .disable                = regulator_disable_regmap,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .set_voltage_time_sel   = regulator_set_voltage_time_sel,
+       .set_suspend_disable    = s2mps14_regulator_set_suspend_disable,
+};
+
+#define regulator_desc_s2mps14_ldo1(num) {             \
+       .name           = "LDO"#num,                    \
+       .id             = S2MPS14_LDO##num,             \
+       .ops            = &s2mps14_reg_ops,             \
+       .type           = REGULATOR_VOLTAGE,            \
+       .owner          = THIS_MODULE,                  \
+       .min_uV         = S2MPS14_LDO_MIN_800MV,        \
+       .uV_step        = S2MPS14_LDO_STEP_25MV,        \
+       .n_voltages     = S2MPS14_LDO_N_VOLTAGES,       \
+       .vsel_reg       = S2MPS14_REG_L1CTRL + num - 1, \
+       .vsel_mask      = S2MPS14_LDO_VSEL_MASK,        \
+       .enable_reg     = S2MPS14_REG_L1CTRL + num - 1, \
+       .enable_mask    = S2MPS14_ENABLE_MASK           \
+}
+#define regulator_desc_s2mps14_ldo2(num) {             \
+       .name           = "LDO"#num,                    \
+       .id             = S2MPS14_LDO##num,             \
+       .ops            = &s2mps14_reg_ops,             \
+       .type           = REGULATOR_VOLTAGE,            \
+       .owner          = THIS_MODULE,                  \
+       .min_uV         = S2MPS14_LDO_MIN_1800MV,       \
+       .uV_step        = S2MPS14_LDO_STEP_25MV,        \
+       .n_voltages     = S2MPS14_LDO_N_VOLTAGES,       \
+       .vsel_reg       = S2MPS14_REG_L1CTRL + num - 1, \
+       .vsel_mask      = S2MPS14_LDO_VSEL_MASK,        \
+       .enable_reg     = S2MPS14_REG_L1CTRL + num - 1, \
+       .enable_mask    = S2MPS14_ENABLE_MASK           \
+}
+#define regulator_desc_s2mps14_ldo3(num) {             \
+       .name           = "LDO"#num,                    \
+       .id             = S2MPS14_LDO##num,             \
+       .ops            = &s2mps14_reg_ops,             \
+       .type           = REGULATOR_VOLTAGE,            \
+       .owner          = THIS_MODULE,                  \
+       .min_uV         = S2MPS14_LDO_MIN_800MV,        \
+       .uV_step        = S2MPS14_LDO_STEP_12_5MV,      \
+       .n_voltages     = S2MPS14_LDO_N_VOLTAGES,       \
+       .vsel_reg       = S2MPS14_REG_L1CTRL + num - 1, \
+       .vsel_mask      = S2MPS14_LDO_VSEL_MASK,        \
+       .enable_reg     = S2MPS14_REG_L1CTRL + num - 1, \
+       .enable_mask    = S2MPS14_ENABLE_MASK           \
+}
+#define regulator_desc_s2mps14_buck1235(num) {                 \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPS14_BUCK##num,                    \
+       .ops            = &s2mps14_reg_ops,                     \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPS14_BUCK1235_MIN_600MV,           \
+       .uV_step        = S2MPS14_BUCK1235_STEP_6_25MV,         \
+       .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
+       .linear_min_sel = S2MPS14_BUCK1235_START_SEL,           \
+       .ramp_delay     = S2MPS14_BUCK_RAMP_DELAY,              \
+       .vsel_reg       = S2MPS14_REG_B1CTRL2 + (num - 1) * 2,  \
+       .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPS14_REG_B1CTRL1 + (num - 1) * 2,  \
+       .enable_mask    = S2MPS14_ENABLE_MASK                   \
+}
+#define regulator_desc_s2mps14_buck4(num) {                    \
+       .name           = "BUCK"#num,                           \
+       .id             = S2MPS14_BUCK##num,                    \
+       .ops            = &s2mps14_reg_ops,                     \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = S2MPS14_BUCK4_MIN_1400MV,             \
+       .uV_step        = S2MPS14_BUCK4_STEP_12_5MV,            \
+       .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
+       .linear_min_sel = S2MPS14_BUCK4_START_SEL,              \
+       .ramp_delay     = S2MPS14_BUCK_RAMP_DELAY,              \
+       .vsel_reg       = S2MPS14_REG_B1CTRL2 + (num - 1) * 2,  \
+       .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
+       .enable_reg     = S2MPS14_REG_B1CTRL1 + (num - 1) * 2,  \
+       .enable_mask    = S2MPS14_ENABLE_MASK                   \
+}
+
+static const struct regulator_desc s2mps14_regulators[] = {
+       regulator_desc_s2mps14_ldo3(1),
+       regulator_desc_s2mps14_ldo3(2),
+       regulator_desc_s2mps14_ldo1(3),
+       regulator_desc_s2mps14_ldo1(4),
+       regulator_desc_s2mps14_ldo3(5),
+       regulator_desc_s2mps14_ldo3(6),
+       regulator_desc_s2mps14_ldo1(7),
+       regulator_desc_s2mps14_ldo2(8),
+       regulator_desc_s2mps14_ldo3(9),
+       regulator_desc_s2mps14_ldo3(10),
+       regulator_desc_s2mps14_ldo1(11),
+       regulator_desc_s2mps14_ldo2(12),
+       regulator_desc_s2mps14_ldo2(13),
+       regulator_desc_s2mps14_ldo2(14),
+       regulator_desc_s2mps14_ldo2(15),
+       regulator_desc_s2mps14_ldo2(16),
+       regulator_desc_s2mps14_ldo2(17),
+       regulator_desc_s2mps14_ldo2(18),
+       regulator_desc_s2mps14_ldo1(19),
+       regulator_desc_s2mps14_ldo1(20),
+       regulator_desc_s2mps14_ldo1(21),
+       regulator_desc_s2mps14_ldo3(22),
+       regulator_desc_s2mps14_ldo1(23),
+       regulator_desc_s2mps14_ldo2(24),
+       regulator_desc_s2mps14_ldo2(25),
+       regulator_desc_s2mps14_buck1235(1),
+       regulator_desc_s2mps14_buck1235(2),
+       regulator_desc_s2mps14_buck1235(3),
+       regulator_desc_s2mps14_buck4(4),
+       regulator_desc_s2mps14_buck1235(5),
 };
 
 static int s2mps11_pmic_probe(struct platform_device *pdev)
 {
        struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
-       struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
-       struct of_regulator_match rdata[S2MPS11_REGULATOR_MAX];
+       struct sec_platform_data *pdata = iodev->pdata;
+       struct of_regulator_match *rdata = NULL;
        struct device_node *reg_np = NULL;
        struct regulator_config config = { };
        struct s2mps11_info *s2mps11;
-       int i, ret;
+       int i, ret = 0;
+       const struct regulator_desc *regulators;
+       enum sec_device_type dev_type;
 
        s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
                                GFP_KERNEL);
        if (!s2mps11)
                return -ENOMEM;
 
+       dev_type = platform_get_device_id(pdev)->driver_data;
+       switch (dev_type) {
+       case S2MPS11X:
+               s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
+               regulators = s2mps11_regulators;
+               break;
+       case S2MPS14X:
+               s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
+               regulators = s2mps14_regulators;
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device type: %u\n", dev_type);
+               return -EINVAL;
+       };
+
        if (!iodev->dev->of_node) {
                if (pdata) {
                        goto common_reg;
@@ -421,16 +609,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
                }
        }
 
-       for (i = 0; i < S2MPS11_REGULATOR_CNT; i++)
+       rdata = kzalloc(sizeof(*rdata) * s2mps11->rdev_num, GFP_KERNEL);
+       if (!rdata)
+               return -ENOMEM;
+
+       for (i = 0; i < s2mps11->rdev_num; i++)
                rdata[i].name = regulators[i].name;
 
-       reg_np = of_find_node_by_name(iodev->dev->of_node, "regulators");
+       reg_np = of_get_child_by_name(iodev->dev->of_node, "regulators");
        if (!reg_np) {
                dev_err(&pdev->dev, "could not find regulators sub-node\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
-       of_regulator_match(&pdev->dev, reg_np, rdata, S2MPS11_REGULATOR_MAX);
+       of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num);
+       of_node_put(reg_np);
 
 common_reg:
        platform_set_drvdata(pdev, s2mps11);
@@ -438,7 +632,9 @@ common_reg:
        config.dev = &pdev->dev;
        config.regmap = iodev->regmap_pmic;
        config.driver_data = s2mps11;
-       for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+       for (i = 0; i < s2mps11->rdev_num; i++) {
+               struct regulator_dev *regulator;
+
                if (!reg_np) {
                        config.init_data = pdata->regulators[i].initdata;
                        config.of_node = pdata->regulators[i].reg_node;
@@ -447,21 +643,25 @@ common_reg:
                        config.of_node = rdata[i].of_node;
                }
 
-               s2mps11->rdev[i] = devm_regulator_register(&pdev->dev,
+               regulator = devm_regulator_register(&pdev->dev,
                                                &regulators[i], &config);
-               if (IS_ERR(s2mps11->rdev[i])) {
-                       ret = PTR_ERR(s2mps11->rdev[i]);
+               if (IS_ERR(regulator)) {
+                       ret = PTR_ERR(regulator);
                        dev_err(&pdev->dev, "regulator init failed for %d\n",
                                i);
-                       return ret;
+                       goto out;
                }
        }
 
-       return 0;
+out:
+       kfree(rdata);
+
+       return ret;
 }
 
 static const struct platform_device_id s2mps11_pmic_id[] = {
-       { "s2mps11-pmic", 0},
+       { "s2mps11-pmic", S2MPS11X},
+       { "s2mps14-pmic", S2MPS14X},
        { },
 };
 MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
@@ -489,5 +689,5 @@ module_exit(s2mps11_pmic_exit);
 
 /* Module information */
 MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
-MODULE_DESCRIPTION("SAMSUNG S2MPS11 Regulator Driver");
+MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14 Regulator Driver");
 MODULE_LICENSE("GPL");
index d958dfa051254866808fe6c36cf9db7184627b94..f05badabd69e99169a0adcb9f4d4d335891cafb8 100644 (file)
  *
  */
 
-#include <linux/bug.h>
 #include <linux/err.h>
-#include <linux/gpio.h>
 #include <linux/of_gpio.h>
-#include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
@@ -170,12 +167,11 @@ static unsigned int s5m8767_opmode_reg[][4] = {
        {0x0, 0x3, 0x1, 0x1}, /* BUCK9 */
 };
 
-static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
-                               int *enable_ctrl)
+static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
+                               int *reg, int *enable_ctrl)
 {
-       int i, reg_id = rdev_get_id(rdev);
+       int i;
        unsigned int mode;
-       struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
 
        switch (reg_id) {
        case S5M8767_LDO1 ... S5M8767_LDO2:
@@ -214,53 +210,6 @@ static int s5m8767_get_register(struct regulator_dev *rdev, int *reg,
        return 0;
 }
 
-static int s5m8767_reg_is_enabled(struct regulator_dev *rdev)
-{
-       struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
-       int ret, reg;
-       int enable_ctrl;
-       unsigned int val;
-
-       ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
-       if (ret == -EINVAL)
-               return 1;
-       else if (ret)
-               return ret;
-
-       ret = regmap_read(s5m8767->iodev->regmap_pmic, reg, &val);
-       if (ret)
-               return ret;
-
-       return (val & S5M8767_ENCTRL_MASK) == enable_ctrl;
-}
-
-static int s5m8767_reg_enable(struct regulator_dev *rdev)
-{
-       struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
-       int ret, reg;
-       int enable_ctrl;
-
-       ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
-       if (ret)
-               return ret;
-
-       return regmap_update_bits(s5m8767->iodev->regmap_pmic, reg,
-                       S5M8767_ENCTRL_MASK, enable_ctrl);
-}
-
-static int s5m8767_reg_disable(struct regulator_dev *rdev)
-{
-       struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
-       int ret, reg, enable_ctrl;
-
-       ret = s5m8767_get_register(rdev, &reg, &enable_ctrl);
-       if (ret)
-               return ret;
-
-       return regmap_update_bits(s5m8767->iodev->regmap_pmic, reg,
-                       S5M8767_ENCTRL_MASK, ~S5M8767_ENCTRL_MASK);
-}
-
 static int s5m8767_get_vsel_reg(int reg_id, struct s5m8767_info *s5m8767)
 {
        int reg;
@@ -410,9 +359,9 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
 
 static struct regulator_ops s5m8767_ops = {
        .list_voltage           = regulator_list_voltage_linear,
-       .is_enabled             = s5m8767_reg_is_enabled,
-       .enable                 = s5m8767_reg_enable,
-       .disable                = s5m8767_reg_disable,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = s5m8767_set_voltage_sel,
        .set_voltage_time_sel   = s5m8767_set_voltage_time_sel,
@@ -420,9 +369,9 @@ static struct regulator_ops s5m8767_ops = {
 
 static struct regulator_ops s5m8767_buck78_ops = {
        .list_voltage           = regulator_list_voltage_linear,
-       .is_enabled             = s5m8767_reg_is_enabled,
-       .enable                 = s5m8767_reg_enable,
-       .disable                = s5m8767_reg_disable,
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
 };
@@ -483,6 +432,66 @@ static struct regulator_desc regulators[] = {
        s5m8767_regulator_desc(BUCK9),
 };
 
+/*
+ * Enable GPIO control over BUCK9 in regulator_config for that regulator.
+ */
+static void s5m8767_regulator_config_ext_control(struct s5m8767_info *s5m8767,
+               struct sec_regulator_data *rdata,
+               struct regulator_config *config)
+{
+       int i, mode = 0;
+
+       if (rdata->id != S5M8767_BUCK9)
+               return;
+
+       /* Check if opmode for regulator matches S5M8767_ENCTRL_USE_GPIO */
+       for (i = 0; i < s5m8767->num_regulators; i++) {
+               const struct sec_opmode_data *opmode = &s5m8767->opmode[i];
+               if (opmode->id == rdata->id) {
+                       mode = s5m8767_opmode_reg[rdata->id][opmode->mode];
+                       break;
+               }
+       }
+       if (mode != S5M8767_ENCTRL_USE_GPIO) {
+               dev_warn(s5m8767->dev,
+                               "ext-control for %s: mismatched op_mode (%x), ignoring\n",
+                               rdata->reg_node->name, mode);
+               return;
+       }
+
+       if (!gpio_is_valid(rdata->ext_control_gpio)) {
+               dev_warn(s5m8767->dev,
+                               "ext-control for %s: GPIO not valid, ignoring\n",
+                               rdata->reg_node->name);
+               return;
+       }
+
+       config->ena_gpio = rdata->ext_control_gpio;
+       config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+}
+
+/*
+ * Turn on GPIO control over BUCK9.
+ */
+static int s5m8767_enable_ext_control(struct s5m8767_info *s5m8767,
+               struct regulator_dev *rdev)
+{
+       int id = rdev_get_id(rdev);
+       int ret, reg, enable_ctrl;
+
+       if (id != S5M8767_BUCK9)
+               return -EINVAL;
+
+       ret = s5m8767_get_register(s5m8767, id, &reg, &enable_ctrl);
+       if (ret)
+               return ret;
+
+       return regmap_update_bits(s5m8767->iodev->regmap_pmic,
+                       reg, S5M8767_ENCTRL_MASK,
+                       S5M8767_ENCTRL_USE_GPIO << S5M8767_ENCTRL_SHIFT);
+}
+
+
 #ifdef CONFIG_OF
 static int s5m8767_pmic_dt_parse_dvs_gpio(struct sec_pmic_dev *iodev,
                        struct sec_platform_data *pdata,
@@ -520,6 +529,16 @@ static int s5m8767_pmic_dt_parse_ds_gpio(struct sec_pmic_dev *iodev,
        return 0;
 }
 
+static void s5m8767_pmic_dt_parse_ext_control_gpio(struct sec_pmic_dev *iodev,
+               struct sec_regulator_data *rdata,
+               struct device_node *reg_np)
+{
+       rdata->ext_control_gpio = of_get_named_gpio(reg_np,
+                       "s5m8767,pmic-ext-control-gpios", 0);
+       if (!gpio_is_valid(rdata->ext_control_gpio))
+               rdata->ext_control_gpio = 0;
+}
+
 static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
                                        struct sec_platform_data *pdata)
 {
@@ -546,19 +565,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
 
        rdata = devm_kzalloc(&pdev->dev, sizeof(*rdata) *
                                pdata->num_regulators, GFP_KERNEL);
-       if (!rdata) {
-               dev_err(iodev->dev,
-                       "could not allocate memory for regulator data\n");
+       if (!rdata)
                return -ENOMEM;
-       }
 
        rmode = devm_kzalloc(&pdev->dev, sizeof(*rmode) *
                                pdata->num_regulators, GFP_KERNEL);
-       if (!rmode) {
-               dev_err(iodev->dev,
-                       "could not allocate memory for regulator mode\n");
+       if (!rmode)
                return -ENOMEM;
-       }
 
        pdata->regulators = rdata;
        pdata->opmode = rmode;
@@ -574,6 +587,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
                        continue;
                }
 
+               s5m8767_pmic_dt_parse_ext_control_gpio(iodev, rdata, reg_np);
+
                rdata->id = i;
                rdata->initdata = of_get_regulator_init_data(
                                                &pdev->dev, reg_np);
@@ -922,6 +937,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
        for (i = 0; i < pdata->num_regulators; i++) {
                const struct sec_voltage_desc *desc;
                int id = pdata->regulators[i].id;
+               int enable_reg, enable_val;
 
                desc = reg_voltage_map[id];
                if (desc) {
@@ -935,6 +951,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
                                regulators[id].vsel_mask = 0x3f;
                        else
                                regulators[id].vsel_mask = 0xff;
+
+                       s5m8767_get_register(s5m8767, id, &enable_reg,
+                                            &enable_val);
+                       regulators[id].enable_reg = enable_reg;
+                       regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
+                       regulators[id].enable_val = enable_val;
                }
 
                config.dev = s5m8767->dev;
@@ -942,6 +964,9 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
                config.driver_data = s5m8767;
                config.regmap = iodev->regmap_pmic;
                config.of_node = pdata->regulators[i].reg_node;
+               if (pdata->regulators[i].ext_control_gpio)
+                       s5m8767_regulator_config_ext_control(s5m8767,
+                                       &pdata->regulators[i], &config);
 
                rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
                                                  &config);
@@ -951,6 +976,16 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
                                        id);
                        return ret;
                }
+
+               if (pdata->regulators[i].ext_control_gpio) {
+                       ret = s5m8767_enable_ext_control(s5m8767, rdev[i]);
+                       if (ret < 0) {
+                               dev_err(s5m8767->dev,
+                                               "failed to enable gpio control over %s: %d\n",
+                                               rdev[i]->desc->name, ret);
+                               return ret;
+                       }
+               }
        }
 
        return 0;
diff --git a/drivers/regulator/st-pwm.c b/drivers/regulator/st-pwm.c
new file mode 100644 (file)
index 0000000..e367af1
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Regulator driver for ST's PWM Regulators
+ *
+ * Copyright (C) 2014 - STMicroelectronics Inc.
+ *
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pwm.h>
+
+#define ST_PWM_REG_PERIOD 8448
+
+struct st_pwm_regulator_pdata {
+       const struct regulator_desc *desc;
+       struct st_pwm_voltages *duty_cycle_table;
+};
+
+struct st_pwm_regulator_data {
+       const struct st_pwm_regulator_pdata *pdata;
+       struct pwm_device *pwm;
+       bool enabled;
+       int state;
+};
+
+struct st_pwm_voltages {
+       unsigned int uV;
+       unsigned int dutycycle;
+};
+
+static int st_pwm_regulator_get_voltage_sel(struct regulator_dev *dev)
+{
+       struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+       return drvdata->state;
+}
+
+static int st_pwm_regulator_set_voltage_sel(struct regulator_dev *dev,
+                                           unsigned selector)
+{
+       struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+       int dutycycle;
+       int ret;
+
+       dutycycle = (ST_PWM_REG_PERIOD / 100) *
+               drvdata->pdata->duty_cycle_table[selector].dutycycle;
+
+       ret = pwm_config(drvdata->pwm, dutycycle, ST_PWM_REG_PERIOD);
+       if (ret) {
+               dev_err(&dev->dev, "Failed to configure PWM\n");
+               return ret;
+       }
+
+       drvdata->state = selector;
+
+       if (!drvdata->enabled) {
+               ret = pwm_enable(drvdata->pwm);
+               if (ret) {
+                       dev_err(&dev->dev, "Failed to enable PWM\n");
+                       return ret;
+               }
+               drvdata->enabled = true;
+       }
+
+       return 0;
+}
+
+static int st_pwm_regulator_list_voltage(struct regulator_dev *dev,
+                                        unsigned selector)
+{
+       struct st_pwm_regulator_data *drvdata = rdev_get_drvdata(dev);
+
+       if (selector >= dev->desc->n_voltages)
+               return -EINVAL;
+
+       return drvdata->pdata->duty_cycle_table[selector].uV;
+}
+
+static struct regulator_ops st_pwm_regulator_voltage_ops = {
+       .set_voltage_sel = st_pwm_regulator_set_voltage_sel,
+       .get_voltage_sel = st_pwm_regulator_get_voltage_sel,
+       .list_voltage    = st_pwm_regulator_list_voltage,
+       .map_voltage     = regulator_map_voltage_iterate,
+};
+
+static struct st_pwm_voltages b2105_duty_cycle_table[] = {
+       { .uV = 1114000, .dutycycle = 0,  },
+       { .uV = 1095000, .dutycycle = 10, },
+       { .uV = 1076000, .dutycycle = 20, },
+       { .uV = 1056000, .dutycycle = 30, },
+       { .uV = 1036000, .dutycycle = 40, },
+       { .uV = 1016000, .dutycycle = 50, },
+       /* WARNING: Values above 50% duty-cycle cause boot failures. */
+};
+
+static const struct regulator_desc b2105_desc = {
+       .name           = "b2105-pwm-regulator",
+       .ops            = &st_pwm_regulator_voltage_ops,
+       .type           = REGULATOR_VOLTAGE,
+       .owner          = THIS_MODULE,
+       .n_voltages     = ARRAY_SIZE(b2105_duty_cycle_table),
+       .supply_name    = "pwm",
+};
+
+static const struct st_pwm_regulator_pdata b2105_info = {
+       .desc             = &b2105_desc,
+       .duty_cycle_table = b2105_duty_cycle_table,
+};
+
+static struct of_device_id st_pwm_of_match[] = {
+       { .compatible = "st,b2105-pwm-regulator", .data = &b2105_info, },
+       { },
+};
+MODULE_DEVICE_TABLE(of, st_pwm_of_match);
+
+static int st_pwm_regulator_probe(struct platform_device *pdev)
+{
+       struct st_pwm_regulator_data *drvdata;
+       struct regulator_dev *regulator;
+       struct regulator_config config = { };
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *of_match;
+
+       if (!np) {
+               dev_err(&pdev->dev, "Device Tree node missing\n");
+               return -EINVAL;
+       }
+
+       drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       of_match = of_match_device(st_pwm_of_match, &pdev->dev);
+       if (!of_match) {
+               dev_err(&pdev->dev, "failed to match of device\n");
+               return -ENODEV;
+       }
+       drvdata->pdata = of_match->data;
+
+       config.init_data = of_get_regulator_init_data(&pdev->dev, np);
+       if (!config.init_data)
+               return -ENOMEM;
+
+       config.of_node = np;
+       config.dev = &pdev->dev;
+       config.driver_data = drvdata;
+
+       drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
+       if (IS_ERR(drvdata->pwm)) {
+               dev_err(&pdev->dev, "Failed to get PWM\n");
+               return PTR_ERR(drvdata->pwm);
+       }
+
+       regulator = devm_regulator_register(&pdev->dev,
+                                           drvdata->pdata->desc, &config);
+       if (IS_ERR(regulator)) {
+               dev_err(&pdev->dev, "Failed to register regulator %s\n",
+                       drvdata->pdata->desc->name);
+               return PTR_ERR(regulator);
+       }
+
+       return 0;
+}
+
+static struct platform_driver st_pwm_regulator_driver = {
+       .driver = {
+               .name           = "st-pwm-regulator",
+               .owner          = THIS_MODULE,
+               .of_match_table = of_match_ptr(st_pwm_of_match),
+       },
+       .probe = st_pwm_regulator_probe,
+};
+
+module_platform_driver(st_pwm_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lee Jones <lee.jones@linaro.org>");
+MODULE_DESCRIPTION("ST PWM Regulator Driver");
+MODULE_ALIAS("platform:st_pwm-regulator");
index b187b6bba7ad485a9a8d97ba248b0a70de89c5c8..a2dabb575b97d6260837da1deca24e821accc4f0 100644 (file)
@@ -54,8 +54,8 @@ struct ti_abb_info {
 
 /**
  * struct ti_abb_reg - Register description for ABB block
- * @setup_reg:                 setup register offset from base
- * @control_reg:               control register offset from base
+ * @setup_off:                 setup register offset from base
+ * @control_off:               control register offset from base
  * @sr2_wtcnt_value_mask:      setup register- sr2_wtcnt_value mask
  * @fbb_sel_mask:              setup register- FBB sel mask
  * @rbb_sel_mask:              setup register- RBB sel mask
@@ -64,8 +64,8 @@ struct ti_abb_info {
  * @opp_sel_mask:              control register - mask for mode to operate
  */
 struct ti_abb_reg {
-       u32 setup_reg;
-       u32 control_reg;
+       u32 setup_off;
+       u32 control_off;
 
        /* Setup register fields */
        u32 sr2_wtcnt_value_mask;
@@ -83,6 +83,8 @@ struct ti_abb_reg {
  * @rdesc:                     regulator descriptor
  * @clk:                       clock(usually sysclk) supplying ABB block
  * @base:                      base address of ABB block
+ * @setup_reg:                 setup register of ABB block
+ * @control_reg:               control register of ABB block
  * @int_base:                  interrupt register base address
  * @efuse_base:                        (optional) efuse base address for ABB modes
  * @ldo_base:                  (optional) LDOVBB vset override base address
@@ -99,6 +101,8 @@ struct ti_abb {
        struct regulator_desc rdesc;
        struct clk *clk;
        void __iomem *base;
+       void __iomem *setup_reg;
+       void __iomem *control_reg;
        void __iomem *int_base;
        void __iomem *efuse_base;
        void __iomem *ldo_base;
@@ -118,20 +122,18 @@ struct ti_abb {
  * ti_abb_rmw() - handy wrapper to set specific register bits
  * @mask:      mask for register field
  * @value:     value shifted to mask location and written
- * @offset:    offset of register
- * @base:      base address
+ * @reg:       register address
  *
  * Return: final register value (may be unused)
  */
-static inline u32 ti_abb_rmw(u32 mask, u32 value, u32 offset,
-                            void __iomem *base)
+static inline u32 ti_abb_rmw(u32 mask, u32 value, void __iomem *reg)
 {
        u32 val;
 
-       val = readl(base + offset);
+       val = readl(reg);
        val &= ~mask;
        val |= (value << __ffs(mask)) & mask;
-       writel(val, base + offset);
+       writel(val, reg);
 
        return val;
 }
@@ -263,21 +265,19 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
        if (ret)
                goto out;
 
-       ti_abb_rmw(regs->fbb_sel_mask | regs->rbb_sel_mask, 0, regs->setup_reg,
-                  abb->base);
+       ti_abb_rmw(regs->fbb_sel_mask | regs->rbb_sel_mask, 0, abb->setup_reg);
 
        switch (info->opp_sel) {
        case TI_ABB_SLOW_OPP:
-               ti_abb_rmw(regs->rbb_sel_mask, 1, regs->setup_reg, abb->base);
+               ti_abb_rmw(regs->rbb_sel_mask, 1, abb->setup_reg);
                break;
        case TI_ABB_FAST_OPP:
-               ti_abb_rmw(regs->fbb_sel_mask, 1, regs->setup_reg, abb->base);
+               ti_abb_rmw(regs->fbb_sel_mask, 1, abb->setup_reg);
                break;
        }
 
        /* program next state of ABB ldo */
-       ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, regs->control_reg,
-                  abb->base);
+       ti_abb_rmw(regs->opp_sel_mask, info->opp_sel, abb->control_reg);
 
        /*
         * program LDO VBB vset override if needed for !bypass mode
@@ -288,7 +288,7 @@ static int ti_abb_set_opp(struct regulator_dev *rdev, struct ti_abb *abb,
                ti_abb_program_ldovbb(dev, abb, info);
 
        /* Initiate ABB ldo change */
-       ti_abb_rmw(regs->opp_change_mask, 1, regs->control_reg, abb->base);
+       ti_abb_rmw(regs->opp_change_mask, 1, abb->control_reg);
 
        /* Wait for ABB LDO to complete transition to new Bias setting */
        ret = ti_abb_wait_txdone(dev, abb);
@@ -490,8 +490,7 @@ static int ti_abb_init_timings(struct device *dev, struct ti_abb *abb)
        dev_dbg(dev, "%s: Clk_rate=%ld, sr2_cnt=0x%08x\n", __func__,
                clk_get_rate(abb->clk), sr2_wt_cnt_val);
 
-       ti_abb_rmw(regs->sr2_wtcnt_value_mask, sr2_wt_cnt_val, regs->setup_reg,
-                  abb->base);
+       ti_abb_rmw(regs->sr2_wtcnt_value_mask, sr2_wt_cnt_val, abb->setup_reg);
 
        return 0;
 }
@@ -508,32 +507,24 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
                             struct regulator_init_data *rinit_data)
 {
        struct ti_abb_info *info;
-       const struct property *prop;
-       const __be32 *abb_info;
        const u32 num_values = 6;
        char *pname = "ti,abb_info";
-       u32 num_entries, i;
+       u32 i;
        unsigned int *volt_table;
-       int min_uV = INT_MAX, max_uV = 0;
+       int num_entries, min_uV = INT_MAX, max_uV = 0;
        struct regulation_constraints *c = &rinit_data->constraints;
 
-       prop = of_find_property(dev->of_node, pname, NULL);
-       if (!prop) {
-               dev_err(dev, "No '%s' property?\n", pname);
-               return -ENODEV;
-       }
-
-       if (!prop->value) {
-               dev_err(dev, "Empty '%s' property?\n", pname);
-               return -ENODATA;
-       }
-
        /*
         * Each abb_info is a set of n-tuple, where n is num_values, consisting
         * of voltage and a set of detection logic for ABB information for that
         * voltage to apply.
         */
-       num_entries = prop->length / sizeof(u32);
+       num_entries = of_property_count_u32_elems(dev->of_node, pname);
+       if (num_entries < 0) {
+               dev_err(dev, "No '%s' property?\n", pname);
+               return num_entries;
+       }
+
        if (!num_entries || (num_entries % num_values)) {
                dev_err(dev, "All '%s' list entries need %d vals\n", pname,
                        num_values);
@@ -542,38 +533,38 @@ static int ti_abb_init_table(struct device *dev, struct ti_abb *abb,
        num_entries /= num_values;
 
        info = devm_kzalloc(dev, sizeof(*info) * num_entries, GFP_KERNEL);
-       if (!info) {
-               dev_err(dev, "Can't allocate info table for '%s' property\n",
-                       pname);
+       if (!info)
                return -ENOMEM;
-       }
+
        abb->info = info;
 
        volt_table = devm_kzalloc(dev, sizeof(unsigned int) * num_entries,
                                  GFP_KERNEL);
-       if (!volt_table) {
-               dev_err(dev, "Can't allocate voltage table for '%s' property\n",
-                       pname);
+       if (!volt_table)
                return -ENOMEM;
-       }
 
        abb->rdesc.n_voltages = num_entries;
        abb->rdesc.volt_table = volt_table;
        /* We do not know where the OPP voltage is at the moment */
        abb->current_info_idx = -EINVAL;
 
-       abb_info = prop->value;
        for (i = 0; i < num_entries; i++, info++, volt_table++) {
                u32 efuse_offset, rbb_mask, fbb_mask, vset_mask;
                u32 efuse_val;
 
                /* NOTE: num_values should equal to entries picked up here */
-               *volt_table = be32_to_cpup(abb_info++);
-               info->opp_sel = be32_to_cpup(abb_info++);
-               efuse_offset = be32_to_cpup(abb_info++);
-               rbb_mask = be32_to_cpup(abb_info++);
-               fbb_mask = be32_to_cpup(abb_info++);
-               vset_mask = be32_to_cpup(abb_info++);
+               of_property_read_u32_index(dev->of_node, pname, i * num_values,
+                                          volt_table);
+               of_property_read_u32_index(dev->of_node, pname,
+                                          i * num_values + 1, &info->opp_sel);
+               of_property_read_u32_index(dev->of_node, pname,
+                                          i * num_values + 2, &efuse_offset);
+               of_property_read_u32_index(dev->of_node, pname,
+                                          i * num_values + 3, &rbb_mask);
+               of_property_read_u32_index(dev->of_node, pname,
+                                          i * num_values + 4, &fbb_mask);
+               of_property_read_u32_index(dev->of_node, pname,
+                                          i * num_values + 5, &vset_mask);
 
                dev_dbg(dev,
                        "[%d]v=%d ABB=%d ef=0x%x rbb=0x%x fbb=0x%x vset=0x%x\n",
@@ -648,8 +639,8 @@ static struct regulator_ops ti_abb_reg_ops = {
 /* Default ABB block offsets, IF this changes in future, create new one */
 static const struct ti_abb_reg abb_regs_v1 = {
        /* WARNING: registers are wrongly documented in TRM */
-       .setup_reg              = 0x04,
-       .control_reg            = 0x00,
+       .setup_off              = 0x04,
+       .control_off            = 0x00,
 
        .sr2_wtcnt_value_mask   = (0xff << 8),
        .fbb_sel_mask           = (0x01 << 2),
@@ -661,8 +652,8 @@ static const struct ti_abb_reg abb_regs_v1 = {
 };
 
 static const struct ti_abb_reg abb_regs_v2 = {
-       .setup_reg              = 0x00,
-       .control_reg            = 0x04,
+       .setup_off              = 0x00,
+       .control_off            = 0x04,
 
        .sr2_wtcnt_value_mask   = (0xff << 8),
        .fbb_sel_mask           = (0x01 << 2),
@@ -673,9 +664,20 @@ static const struct ti_abb_reg abb_regs_v2 = {
        .opp_sel_mask           = (0x03 << 0),
 };
 
+static const struct ti_abb_reg abb_regs_generic = {
+       .sr2_wtcnt_value_mask   = (0xff << 8),
+       .fbb_sel_mask           = (0x01 << 2),
+       .rbb_sel_mask           = (0x01 << 1),
+       .sr2_en_mask            = (0x01 << 0),
+
+       .opp_change_mask        = (0x01 << 2),
+       .opp_sel_mask           = (0x03 << 0),
+};
+
 static const struct of_device_id ti_abb_of_match[] = {
        {.compatible = "ti,abb-v1", .data = &abb_regs_v1},
        {.compatible = "ti,abb-v2", .data = &abb_regs_v2},
+       {.compatible = "ti,abb-v3", .data = &abb_regs_generic},
        { },
 };
 
@@ -722,11 +724,29 @@ static int ti_abb_probe(struct platform_device *pdev)
        abb->regs = match->data;
 
        /* Map ABB resources */
-       pname = "base-address";
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
-       abb->base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(abb->base))
-               return PTR_ERR(abb->base);
+       if (abb->regs->setup_off || abb->regs->control_off) {
+               pname = "base-address";
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+               abb->base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(abb->base))
+                       return PTR_ERR(abb->base);
+
+               abb->setup_reg = abb->base + abb->regs->setup_off;
+               abb->control_reg = abb->base + abb->regs->control_off;
+
+       } else {
+               pname = "control-address";
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+               abb->control_reg = devm_ioremap_resource(dev, res);
+               if (IS_ERR(abb->control_reg))
+                       return PTR_ERR(abb->control_reg);
+
+               pname = "setup-address";
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
+               abb->setup_reg = devm_ioremap_resource(dev, res);
+               if (IS_ERR(abb->setup_reg))
+                       return PTR_ERR(abb->setup_reg);
+       }
 
        pname = "int-address";
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
@@ -860,7 +880,7 @@ skip_opt:
        platform_set_drvdata(pdev, rdev);
 
        /* Enable the ldo if not already done by bootloader */
-       ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->regs->setup_reg, abb->base);
+       ti_abb_rmw(abb->regs->sr2_en_mask, 1, abb->setup_reg);
 
        return 0;
 }
index b3764f594ee933703394ebacd0e6610aa7c809bc..f31f22e3e1bd56c0b6f1e36a07152f8c3d0039bc 100644 (file)
@@ -227,10 +227,8 @@ static struct tps51632_regulator_platform_data *
        struct device_node *np = dev->of_node;
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata) {
-               dev_err(dev, "Memory alloc failed for platform data\n");
+       if (!pdata)
                return NULL;
-       }
 
        pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
        if (!pdata->reg_init_data) {
@@ -299,10 +297,8 @@ static int tps51632_probe(struct i2c_client *client,
        }
 
        tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
-       if (!tps) {
-               dev_err(&client->dev, "Memory allocation failed\n");
+       if (!tps)
                return -ENOMEM;
-       }
 
        tps->dev = &client->dev;
        tps->desc.name = client->name;
index c3fa15a299b16fb4ea48cca9f4173a8e0b4d9c8a..a1672044e5195304095f5de99e5c4e01218cb828 100644 (file)
@@ -299,10 +299,8 @@ static struct tps62360_regulator_platform_data *
        struct device_node *np = dev->of_node;
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata) {
-               dev_err(dev, "Memory alloc failed for platform data\n");
+       if (!pdata)
                return NULL;
-       }
 
        pdata->reg_init_data = of_get_regulator_init_data(dev, dev->of_node);
        if (!pdata->reg_init_data) {
@@ -377,11 +375,8 @@ static int tps62360_probe(struct i2c_client *client,
        }
 
        tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
-       if (!tps) {
-               dev_err(&client->dev, "%s(): Memory allocation failed\n",
-                                               __func__);
+       if (!tps)
                return -ENOMEM;
-       }
 
        tps->en_discharge = pdata->en_discharge;
        tps->en_internal_pulldn = pdata->en_internal_pulldn;
index 162a0fae20b317bd0b4899e07520a45f496e43c6..98e66ce26723fac64c2cbd7b836f03c878465e28 100644 (file)
@@ -359,7 +359,6 @@ static struct regulator_ops tps6507x_pmic_ops = {
        .map_voltage = regulator_map_voltage_ascend,
 };
 
-#ifdef CONFIG_OF
 static struct of_regulator_match tps6507x_matches[] = {
        { .name = "VDCDC1"},
        { .name = "VDCDC2"},
@@ -381,12 +380,10 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
 
        tps_board = devm_kzalloc(&pdev->dev, sizeof(*tps_board),
                                        GFP_KERNEL);
-       if (!tps_board) {
-               dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+       if (!tps_board)
                return NULL;
-       }
 
-       regulators = of_find_node_by_name(np, "regulators");
+       regulators = of_get_child_by_name(np, "regulators");
        if (!regulators) {
                dev_err(&pdev->dev, "regulator node not found\n");
                return NULL;
@@ -396,6 +393,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
        matches = tps6507x_matches;
 
        ret = of_regulator_match(&pdev->dev, regulators, matches, count);
+       of_node_put(regulators);
        if (ret < 0) {
                dev_err(&pdev->dev, "Error parsing regulator init data: %d\n",
                        ret);
@@ -406,10 +404,8 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
 
        reg_data = devm_kzalloc(&pdev->dev, (sizeof(struct regulator_init_data)
                                        * TPS6507X_NUM_REGULATOR), GFP_KERNEL);
-       if (!reg_data) {
-               dev_err(&pdev->dev, "Failure to alloc init data for regulators.\n");
+       if (!reg_data)
                return NULL;
-       }
 
        tps_board->tps6507x_pmic_init_data = reg_data;
 
@@ -424,15 +420,7 @@ static struct tps6507x_board *tps6507x_parse_dt_reg_data(
 
        return tps_board;
 }
-#else
-static inline struct tps6507x_board *tps6507x_parse_dt_reg_data(
-                       struct platform_device *pdev,
-                       struct of_regulator_match **tps6507x_reg_matches)
-{
-       *tps6507x_reg_matches = NULL;
-       return NULL;
-}
-#endif
+
 static int tps6507x_pmic_probe(struct platform_device *pdev)
 {
        struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
@@ -453,9 +441,10 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
         */
 
        tps_board = dev_get_platdata(tps6507x_dev->dev);
-       if (!tps_board && tps6507x_dev->dev->of_node)
+       if (IS_ENABLED(CONFIG_OF) && !tps_board &&
+               tps6507x_dev->dev->of_node)
                tps_board = tps6507x_parse_dt_reg_data(pdev,
-                                               &tps6507x_reg_matches);
+                               &tps6507x_reg_matches);
        if (!tps_board)
                return -EINVAL;
 
@@ -481,7 +470,7 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
                tps->info[i] = info;
                if (init_data->driver_data) {
                        struct tps6507x_reg_platform_data *data =
-                                                       init_data->driver_data;
+                                       init_data->driver_data;
                        tps->info[i]->defdcdc_default = data->defdcdc_default;
                }
 
index 676f75548f0028435b6795bc0371490c462a641e..2e92ef68574da733e6fd81d3aed1760e627d5bc4 100644 (file)
@@ -168,17 +168,13 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
 
        tps65090_pdata = devm_kzalloc(&pdev->dev, sizeof(*tps65090_pdata),
                                GFP_KERNEL);
-       if (!tps65090_pdata) {
-               dev_err(&pdev->dev, "Memory alloc for tps65090_pdata failed\n");
+       if (!tps65090_pdata)
                return ERR_PTR(-ENOMEM);
-       }
 
        reg_pdata = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX *
                                sizeof(*reg_pdata), GFP_KERNEL);
-       if (!reg_pdata) {
-               dev_err(&pdev->dev, "Memory alloc for reg_pdata failed\n");
+       if (!reg_pdata)
                return ERR_PTR(-ENOMEM);
-       }
 
        regulators = of_get_child_by_name(np, "regulators");
        if (!regulators) {
@@ -188,6 +184,7 @@ static struct tps65090_platform_data *tps65090_parse_dt_reg_data(
 
        ret = of_regulator_match(&pdev->dev, regulators, tps65090_matches,
                        ARRAY_SIZE(tps65090_matches));
+       of_node_put(regulators);
        if (ret < 0) {
                dev_err(&pdev->dev,
                        "Error parsing regulator init data: %d\n", ret);
@@ -252,10 +249,8 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
 
        pmic = devm_kzalloc(&pdev->dev, TPS65090_REGULATOR_MAX * sizeof(*pmic),
                        GFP_KERNEL);
-       if (!pmic) {
-               dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+       if (!pmic)
                return -ENOMEM;
-       }
 
        for (num = 0; num < TPS65090_REGULATOR_MAX; num++) {
                tps_pdata = tps65090_pdata->reg_pdata[num];
index 9ea1bf26bd137b4b4e33dcede0c6c1551ff6d79c..10b78d2b766aa3394b1ee51b2bf47e616f6f95d4 100644 (file)
@@ -187,7 +187,7 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
        struct device_node *regs;
        int i, count;
 
-       regs = of_find_node_by_name(node, "regulators");
+       regs = of_get_child_by_name(node, "regulators");
        if (!regs)
                return NULL;
 
@@ -202,7 +202,7 @@ static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev)
                return NULL;
 
        for (i = 0; i < count; i++) {
-               if (!reg_matches[i].init_data || !reg_matches[i].of_node)
+               if (!reg_matches[i].of_node)
                        continue;
 
                pdata->tps65217_init_data[i] = reg_matches[i].init_data;
@@ -222,7 +222,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
 {
        struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
        struct tps65217_board *pdata = dev_get_platdata(tps->dev);
-       struct regulator_init_data *reg_data;
        struct regulator_dev *rdev;
        struct regulator_config config = { };
        int i;
@@ -243,19 +242,9 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, tps);
 
        for (i = 0; i < TPS65217_NUM_REGULATOR; i++) {
-
-               reg_data = pdata->tps65217_init_data[i];
-
-               /*
-                * Regulator API handles empty constraints but not NULL
-                * constraints
-                */
-               if (!reg_data)
-                       continue;
-
                /* Register the regulators */
                config.dev = tps->dev;
-               config.init_data = reg_data;
+               config.init_data = pdata->tps65217_init_data[i];
                config.driver_data = tps;
                config.regmap = tps->regmap;
                if (tps->dev->of_node)
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
new file mode 100644 (file)
index 0000000..cec72fa
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * tps65218-regulator.c
+ *
+ * Regulator driver for TPS65218 PMIC
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/tps65218.h>
+
+static unsigned int tps65218_ramp_delay = 4000;
+
+enum tps65218_regulators { DCDC1, DCDC2, DCDC3, DCDC4, DCDC5, DCDC6, LDO1 };
+
+#define TPS65218_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _er, _em, _t, \
+                           _lr, _nlr)                          \
+       {                                                       \
+               .name                   = _name,                \
+               .id                     = _id,                  \
+               .ops                    = &_ops,                \
+               .n_voltages             = _n,                   \
+               .type                   = REGULATOR_VOLTAGE,    \
+               .owner                  = THIS_MODULE,          \
+               .vsel_reg               = _vr,                  \
+               .vsel_mask              = _vm,                  \
+               .enable_reg             = _er,                  \
+               .enable_mask            = _em,                  \
+               .volt_table             = _t,                   \
+               .linear_ranges          = _lr,                  \
+               .n_linear_ranges        = _nlr,                 \
+       }                                                       \
+
+#define TPS65218_INFO(_id, _nm, _min, _max)    \
+       {                                               \
+               .id             = _id,                  \
+               .name           = _nm,                  \
+               .min_uV         = _min,                 \
+               .max_uV         = _max,                 \
+       }
+
+static const struct regulator_linear_range dcdc1_dcdc2_ranges[] = {
+       REGULATOR_LINEAR_RANGE(850000, 0x0, 0x32, 10000),
+       REGULATOR_LINEAR_RANGE(1375000, 0x33, 0x3f, 25000),
+};
+
+static const struct regulator_linear_range ldo1_dcdc3_ranges[] = {
+       REGULATOR_LINEAR_RANGE(900000, 0x0, 0x1a, 25000),
+       REGULATOR_LINEAR_RANGE(1600000, 0x1b, 0x3f, 50000),
+};
+
+static const struct regulator_linear_range dcdc4_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1175000, 0x0, 0xf, 25000),
+       REGULATOR_LINEAR_RANGE(1550000, 0x10, 0x34, 50000),
+};
+
+static struct tps_info tps65218_pmic_regs[] = {
+       TPS65218_INFO(0, "DCDC1", 850000, 167500),
+       TPS65218_INFO(1, "DCDC2", 850000, 1675000),
+       TPS65218_INFO(2, "DCDC3", 900000, 3400000),
+       TPS65218_INFO(3, "DCDC4", 1175000, 3400000),
+       TPS65218_INFO(4, "DCDC5", 1000000, 1000000),
+       TPS65218_INFO(5, "DCDC6", 1800000, 1800000),
+       TPS65218_INFO(6, "LDO1", 900000, 3400000),
+};
+
+#define TPS65218_OF_MATCH(comp, label) \
+       { \
+               .compatible = comp, \
+               .data = &label, \
+       }
+
+static const struct of_device_id tps65218_of_match[] = {
+       TPS65218_OF_MATCH("ti,tps65218-dcdc1", tps65218_pmic_regs[DCDC1]),
+       TPS65218_OF_MATCH("ti,tps65218-dcdc2", tps65218_pmic_regs[DCDC2]),
+       TPS65218_OF_MATCH("ti,tps65218-dcdc3", tps65218_pmic_regs[DCDC3]),
+       TPS65218_OF_MATCH("ti,tps65218-dcdc4", tps65218_pmic_regs[DCDC4]),
+       TPS65218_OF_MATCH("ti,tps65218-dcdc5", tps65218_pmic_regs[DCDC5]),
+       TPS65218_OF_MATCH("ti,tps65218-dcdc6", tps65218_pmic_regs[DCDC6]),
+       TPS65218_OF_MATCH("ti,tps65218-ldo1", tps65218_pmic_regs[LDO1]),
+       { }
+};
+MODULE_DEVICE_TABLE(of, tps65218_of_match);
+
+static int tps65218_pmic_set_voltage_sel(struct regulator_dev *dev,
+                                        unsigned selector)
+{
+       int ret;
+       struct tps65218 *tps = rdev_get_drvdata(dev);
+       unsigned int rid = rdev_get_id(dev);
+
+       /* Set the voltage based on vsel value and write protect level is 2 */
+       ret = tps65218_set_bits(tps, dev->desc->vsel_reg, dev->desc->vsel_mask,
+                               selector, TPS65218_PROTECT_L1);
+
+       /* Set GO bit for DCDC1/2 to initiate voltage transistion */
+       switch (rid) {
+       case TPS65218_DCDC_1:
+       case TPS65218_DCDC_2:
+               ret = tps65218_set_bits(tps, TPS65218_REG_CONTRL_SLEW_RATE,
+                                       TPS65218_SLEW_RATE_GO,
+                                       TPS65218_SLEW_RATE_GO,
+                                       TPS65218_PROTECT_L1);
+               break;
+       }
+
+       return ret;
+}
+
+static int tps65218_pmic_enable(struct regulator_dev *dev)
+{
+       struct tps65218 *tps = rdev_get_drvdata(dev);
+       unsigned int rid = rdev_get_id(dev);
+
+       if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+               return -EINVAL;
+
+       /* Enable the regulator and password protection is level 1 */
+       return tps65218_set_bits(tps, dev->desc->enable_reg,
+                                dev->desc->enable_mask, dev->desc->enable_mask,
+                                TPS65218_PROTECT_L1);
+}
+
+static int tps65218_pmic_disable(struct regulator_dev *dev)
+{
+       struct tps65218 *tps = rdev_get_drvdata(dev);
+       unsigned int rid = rdev_get_id(dev);
+
+       if (rid < TPS65218_DCDC_1 || rid > TPS65218_LDO_1)
+               return -EINVAL;
+
+       /* Disable the regulator and password protection is level 1 */
+       return tps65218_clear_bits(tps, dev->desc->enable_reg,
+                                  dev->desc->enable_mask, TPS65218_PROTECT_L1);
+}
+
+static int tps65218_set_voltage_time_sel(struct regulator_dev *rdev,
+       unsigned int old_selector, unsigned int new_selector)
+{
+       int old_uv, new_uv;
+
+       old_uv = regulator_list_voltage_linear_range(rdev, old_selector);
+       if (old_uv < 0)
+               return old_uv;
+
+       new_uv = regulator_list_voltage_linear_range(rdev, new_selector);
+       if (new_uv < 0)
+               return new_uv;
+
+       return DIV_ROUND_UP(abs(old_uv - new_uv), tps65218_ramp_delay);
+}
+
+/* Operations permitted on DCDC1, DCDC2 */
+static struct regulator_ops tps65218_dcdc12_ops = {
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = tps65218_pmic_enable,
+       .disable                = tps65218_pmic_disable,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = tps65218_pmic_set_voltage_sel,
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
+       .set_voltage_time_sel   = tps65218_set_voltage_time_sel,
+};
+
+/* Operations permitted on DCDC3, DCDC4 and LDO1 */
+static struct regulator_ops tps65218_ldo1_dcdc34_ops = {
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = tps65218_pmic_enable,
+       .disable                = tps65218_pmic_disable,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = tps65218_pmic_set_voltage_sel,
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
+};
+
+/* Operations permitted on DCDC5, DCDC6 */
+static struct regulator_ops tps65218_dcdc56_pmic_ops = {
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = tps65218_pmic_enable,
+       .disable                = tps65218_pmic_disable,
+};
+
+static const struct regulator_desc regulators[] = {
+       TPS65218_REGULATOR("DCDC1", TPS65218_DCDC_1, tps65218_dcdc12_ops, 64,
+                          TPS65218_REG_CONTROL_DCDC1,
+                          TPS65218_CONTROL_DCDC1_MASK,
+                          TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC1_EN, NULL,
+                          dcdc1_dcdc2_ranges, 2),
+       TPS65218_REGULATOR("DCDC2", TPS65218_DCDC_2, tps65218_dcdc12_ops, 64,
+                          TPS65218_REG_CONTROL_DCDC2,
+                          TPS65218_CONTROL_DCDC2_MASK,
+                          TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC2_EN, NULL,
+                          dcdc1_dcdc2_ranges, 2),
+       TPS65218_REGULATOR("DCDC3", TPS65218_DCDC_3, tps65218_ldo1_dcdc34_ops,
+                          64, TPS65218_REG_CONTROL_DCDC3,
+                          TPS65218_CONTROL_DCDC3_MASK, TPS65218_REG_ENABLE1,
+                          TPS65218_ENABLE1_DC3_EN, NULL,
+                          ldo1_dcdc3_ranges, 2),
+       TPS65218_REGULATOR("DCDC4", TPS65218_DCDC_4, tps65218_ldo1_dcdc34_ops,
+                          53, TPS65218_REG_CONTROL_DCDC4,
+                          TPS65218_CONTROL_DCDC4_MASK,
+                          TPS65218_REG_ENABLE1, TPS65218_ENABLE1_DC4_EN, NULL,
+                          dcdc4_ranges, 2),
+       TPS65218_REGULATOR("DCDC5", TPS65218_DCDC_5, tps65218_dcdc56_pmic_ops,
+                          1, -1, -1, TPS65218_REG_ENABLE1,
+                          TPS65218_ENABLE1_DC5_EN, NULL, NULL, 0),
+       TPS65218_REGULATOR("DCDC6", TPS65218_DCDC_6, tps65218_dcdc56_pmic_ops,
+                          1, -1, -1, TPS65218_REG_ENABLE1,
+                          TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0),
+       TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
+                          TPS65218_REG_CONTROL_DCDC4,
+                          TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
+                          TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
+                          2),
+};
+
+static int tps65218_regulator_probe(struct platform_device *pdev)
+{
+       struct tps65218 *tps = dev_get_drvdata(pdev->dev.parent);
+       struct regulator_init_data *init_data;
+       const struct tps_info   *template;
+       struct regulator_dev *rdev;
+       const struct of_device_id       *match;
+       struct regulator_config config = { };
+       int id;
+
+       match = of_match_device(tps65218_of_match, &pdev->dev);
+       if (!match)
+               return -ENODEV;
+
+       template = match->data;
+       id = template->id;
+       init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+
+       platform_set_drvdata(pdev, tps);
+
+       tps->info[id] = &tps65218_pmic_regs[id];
+       config.dev = &pdev->dev;
+       config.init_data = init_data;
+       config.driver_data = tps;
+       config.regmap = tps->regmap;
+
+       rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
+       if (IS_ERR(rdev)) {
+               dev_err(tps->dev, "failed to register %s regulator\n",
+                       pdev->name);
+               return PTR_ERR(rdev);
+       }
+
+       return 0;
+}
+
+static struct platform_driver tps65218_regulator_driver = {
+       .driver = {
+               .name = "tps65218-pmic",
+               .owner = THIS_MODULE,
+               .of_match_table = tps65218_of_match,
+       },
+       .probe = tps65218_regulator_probe,
+};
+
+module_platform_driver(tps65218_regulator_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("TPS65218 voltage regulator driver");
+MODULE_ALIAS("platform:tps65218-pmic");
+MODULE_LICENSE("GPL v2");
index 9f6bfda711b73a6dae9cdc9b8be3b69b2f5e88cc..5b494db9f95c90aba642c44f8af58910c71b1711 100644 (file)
@@ -593,10 +593,9 @@ static int pmic_probe(struct spi_device *spi)
        }
 
        hw = devm_kzalloc(&spi->dev, sizeof(struct tps6524x), GFP_KERNEL);
-       if (!hw) {
-               dev_err(dev, "cannot allocate regulator private data\n");
+       if (!hw)
                return -ENOMEM;
-       }
+
        spi_set_drvdata(spi, hw);
 
        memset(hw, 0, sizeof(struct tps6524x));
index 0485d47f0d8a82d9de595a8df5bd73c16111d4f2..32f38a63d944eaaf148a5a1f7c4034ab372e7f0d 100644 (file)
@@ -363,10 +363,8 @@ static struct tps6586x_platform_data *tps6586x_parse_regulator_dt(
        }
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-       if (!pdata) {
-               dev_err(&pdev->dev, "Memory alloction failed\n");
+       if (!pdata)
                return NULL;
-       }
 
        for (i = 0; i < num; i++) {
                int id;
@@ -398,7 +396,7 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
 {
        struct tps6586x_regulator *ri = NULL;
        struct regulator_config config = { };
-       struct regulator_dev **rdev;
+       struct regulator_dev *rdev;
        struct regulator_init_data *reg_data;
        struct tps6586x_platform_data *pdata;
        struct of_regulator_match *tps6586x_reg_matches = NULL;
@@ -418,13 +416,6 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       rdev = devm_kzalloc(&pdev->dev, TPS6586X_ID_MAX_REGULATOR *
-                               sizeof(*rdev), GFP_KERNEL);
-       if (!rdev) {
-               dev_err(&pdev->dev, "Mmemory alloc failed\n");
-               return -ENOMEM;
-       }
-
        version = tps6586x_get_version(pdev->dev.parent);
 
        for (id = 0; id < TPS6586X_ID_MAX_REGULATOR; ++id) {
@@ -451,12 +442,11 @@ static int tps6586x_regulator_probe(struct platform_device *pdev)
                if (tps6586x_reg_matches)
                        config.of_node = tps6586x_reg_matches[id].of_node;
 
-               rdev[id] = devm_regulator_register(&pdev->dev, &ri->desc,
-                                                  &config);
-               if (IS_ERR(rdev[id])) {
+               rdev = devm_regulator_register(&pdev->dev, &ri->desc, &config);
+               if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev, "failed to register regulator %s\n",
                                        ri->desc.name);
-                       return PTR_ERR(rdev[id]);
+                       return PTR_ERR(rdev);
                }
 
                if (reg_data) {
index f50dd847eebc9f1e38508ee1ab67ba0be5e6bdd2..fa7db8847578abd61787d0820661e675c3cd80b0 100644 (file)
@@ -1011,11 +1011,8 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
 
        pmic_plat_data = devm_kzalloc(&pdev->dev, sizeof(*pmic_plat_data),
                                        GFP_KERNEL);
-
-       if (!pmic_plat_data) {
-               dev_err(&pdev->dev, "Failure to alloc pdata for regulators.\n");
+       if (!pmic_plat_data)
                return NULL;
-       }
 
        np = of_node_get(pdev->dev.parent->of_node);
        regulators = of_get_child_by_name(np, "regulators");
@@ -1098,10 +1095,8 @@ static int tps65910_probe(struct platform_device *pdev)
        }
 
        pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
-       if (!pmic) {
-               dev_err(&pdev->dev, "Memory allocation failed for pmic\n");
+       if (!pmic)
                return -ENOMEM;
-       }
 
        pmic->mfd = tps65910;
        platform_set_drvdata(pdev, pmic);
@@ -1130,24 +1125,18 @@ static int tps65910_probe(struct platform_device *pdev)
 
        pmic->desc = devm_kzalloc(&pdev->dev, pmic->num_regulators *
                        sizeof(struct regulator_desc), GFP_KERNEL);
-       if (!pmic->desc) {
-               dev_err(&pdev->dev, "Memory alloc fails for desc\n");
+       if (!pmic->desc)
                return -ENOMEM;
-       }
 
        pmic->info = devm_kzalloc(&pdev->dev, pmic->num_regulators *
                        sizeof(struct tps_info *), GFP_KERNEL);
-       if (!pmic->info) {
-               dev_err(&pdev->dev, "Memory alloc fails for info\n");
+       if (!pmic->info)
                return -ENOMEM;
-       }
 
        pmic->rdev = devm_kzalloc(&pdev->dev, pmic->num_regulators *
                        sizeof(struct regulator_dev *), GFP_KERNEL);
-       if (!pmic->rdev) {
-               dev_err(&pdev->dev, "Memory alloc fails for rdev\n");
+       if (!pmic->rdev)
                return -ENOMEM;
-       }
 
        for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
                        i++, info++) {
index 71f457a42623f602541ae5d69b90521d673c9e2b..26aa6d9c308fdf1415f55a20f6d987b4260f92f8 100644 (file)
@@ -115,7 +115,7 @@ static int tps80031_reg_is_enabled(struct regulator_dev *rdev)
                        ri->rinfo->state_reg, ret);
                return ret;
        }
-       return ((reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON);
+       return (reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON;
 }
 
 static int tps80031_reg_enable(struct regulator_dev *rdev)
@@ -693,10 +693,8 @@ static int tps80031_regulator_probe(struct platform_device *pdev)
 
        pmic = devm_kzalloc(&pdev->dev,
                        TPS80031_REGULATOR_MAX * sizeof(*pmic), GFP_KERNEL);
-       if (!pmic) {
-               dev_err(&pdev->dev, "mem alloc for pmic failed\n");
+       if (!pmic)
                return -ENOMEM;
-       }
 
        for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
                tps_pdata = pdata->regulator_pdata[num];
index 04cf9c16ef233eae69fbdf209d09e0865f947a67..0d88a82ab2a227f835fa7d6b2b318b939e9e8737 100644 (file)
@@ -469,10 +469,8 @@ static int wm831x_buckv_probe(struct platform_device *pdev)
 
        dcdc = devm_kzalloc(&pdev->dev,  sizeof(struct wm831x_dcdc),
                            GFP_KERNEL);
-       if (dcdc == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!dcdc)
                return -ENOMEM;
-       }
 
        dcdc->wm831x = wm831x;
 
@@ -622,10 +620,8 @@ static int wm831x_buckp_probe(struct platform_device *pdev)
 
        dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
                            GFP_KERNEL);
-       if (dcdc == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!dcdc)
                return -ENOMEM;
-       }
 
        dcdc->wm831x = wm831x;
 
@@ -752,10 +748,8 @@ static int wm831x_boostp_probe(struct platform_device *pdev)
                return -ENODEV;
 
        dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
-       if (dcdc == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!dcdc)
                return -ENOMEM;
-       }
 
        dcdc->wm831x = wm831x;
 
@@ -842,10 +836,8 @@ static int wm831x_epe_probe(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "Probing EPE%d\n", id + 1);
 
        dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL);
-       if (dcdc == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!dcdc)
                return -ENOMEM;
-       }
 
        dcdc->wm831x = wm831x;
 
index 0339b886df5dbf2a6bde26111a0a4c6a60f58732..72e385e76a9d75df7146e0bb42589d32c034f28b 100644 (file)
@@ -165,10 +165,8 @@ static int wm831x_isink_probe(struct platform_device *pdev)
 
        isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
                             GFP_KERNEL);
-       if (isink == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!isink)
                return -ENOMEM;
-       }
 
        isink->wm831x = wm831x;
 
index 46d6700467b57007fbb277283207cfbbc4456fc0..eca0eeb78acd66209dcb39ca94ac5aeb3937f3bf 100644 (file)
@@ -235,10 +235,8 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
        ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
-       if (ldo == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!ldo)
                return -ENOMEM;
-       }
 
        ldo->wm831x = wm831x;
 
@@ -447,10 +445,8 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
        ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
-       if (ldo == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!ldo)
                return -ENOMEM;
-       }
 
        ldo->wm831x = wm831x;
 
@@ -594,10 +590,8 @@ static int wm831x_alive_ldo_probe(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
        ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
-       if (ldo == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!ldo)
                return -ENOMEM;
-       }
 
        ldo->wm831x = wm831x;
 
index de7b9c73e3fa09f3b14db1c5d8949ca99a37c352..7ec7c390eedaa1085eeb03d4adfce33f7f1b4531 100644 (file)
@@ -361,7 +361,7 @@ static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
 
        sel = regulator_map_voltage_linear(rdev, uV, uV);
        if (sel < 0)
-               return -EINVAL;
+               return sel;
 
        /* all DCDCs have same mV bits */
        val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
@@ -574,7 +574,7 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
 
        sel = regulator_map_voltage_linear_range(rdev, uV, uV);
        if (sel < 0)
-               return -EINVAL;
+               return sel;
 
        /* all LDOs have same mV bits */
        val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
index 71c5911f2e7130b2114d19b7619a30e69672a94a..c24346db8a71139e202c60464317e4fc03123253 100644 (file)
@@ -134,10 +134,8 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
 
        ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm8994_ldo), GFP_KERNEL);
-       if (ldo == NULL) {
-               dev_err(&pdev->dev, "Unable to allocate private data\n");
+       if (!ldo)
                return -ENOMEM;
-       }
 
        ldo->wm8994 = wm8994;
        ldo->supply = wm8994_ldo_consumer[id];
index eb5d22795c47a55744381ee3f9e70c1666701835..5af7f0bd6125702358cb1fbab5830c5a77506e7b 100644 (file)
@@ -922,7 +922,7 @@ static int __init con3215_init(void)
                raw3215_freelist = req;
        }
 
-       cdev = ccw_device_probe_console();
+       cdev = ccw_device_create_console(&raw3215_ccw_driver);
        if (IS_ERR(cdev))
                return -ENODEV;
 
@@ -932,6 +932,12 @@ static int __init con3215_init(void)
        cdev->handler = raw3215_irq;
 
        raw->flags |= RAW3215_FIXED;
+       if (ccw_device_enable_console(cdev)) {
+               ccw_device_destroy_console(cdev);
+               raw3215_free_info(raw);
+               raw3215[0] = NULL;
+               return -ENODEV;
+       }
 
        /* Request the console irq */
        if (raw3215_startup(raw) != 0) {
index 699fd3e363dfba2f7b0fe8dbab3f43961ec1a108..75ffe9980c3e1cb8e141c6e29d01cef98aaf862f 100644 (file)
@@ -7,6 +7,7 @@
  *     Copyright IBM Corp. 2003, 2009
  */
 
+#include <linux/module.h>
 #include <linux/console.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -30,6 +31,9 @@
 
 static struct raw3270_fn con3270_fn;
 
+static bool auto_update = 1;
+module_param(auto_update, bool, 0);
+
 /*
  * Main 3270 console view data structure.
  */
@@ -204,6 +208,8 @@ con3270_update(struct con3270 *cp)
        struct string *s, *n;
        int rc;
 
+       if (!auto_update && !raw3270_view_active(&cp->view))
+               return;
        if (cp->view.dev)
                raw3270_activate_view(&cp->view);
 
@@ -529,6 +535,7 @@ con3270_flush(void)
        if (!cp->view.dev)
                return;
        raw3270_pm_unfreeze(&cp->view);
+       raw3270_activate_view(&cp->view);
        spin_lock_irqsave(&cp->view.lock, flags);
        con3270_wait_write(cp);
        cp->nr_up = 0;
@@ -576,7 +583,6 @@ static struct console con3270 = {
 static int __init
 con3270_init(void)
 {
-       struct ccw_device *cdev;
        struct raw3270 *rp;
        void *cbuf;
        int i;
@@ -591,10 +597,7 @@ con3270_init(void)
                cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
        }
 
-       cdev = ccw_device_probe_console();
-       if (IS_ERR(cdev))
-               return -ENODEV;
-       rp = raw3270_setup_console(cdev);
+       rp = raw3270_setup_console();
        if (IS_ERR(rp))
                return PTR_ERR(rp);
 
index 2cdec21e8924ea7b0403aafd1b0bae172a76416e..9f849df4381e1388c87b89bfc33f2b350ce54dda 100644 (file)
@@ -275,6 +275,15 @@ __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
        return 0;
 }
 
+int
+raw3270_view_active(struct raw3270_view *view)
+{
+       struct raw3270 *rp = view->dev;
+
+       return rp && rp->view == view &&
+               !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags);
+}
+
 int
 raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
 {
@@ -776,22 +785,37 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
 }
 
 #ifdef CONFIG_TN3270_CONSOLE
+/* Tentative definition - see below for actual definition. */
+static struct ccw_driver raw3270_ccw_driver;
+
 /*
  * Setup 3270 device configured as console.
  */
-struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
+struct raw3270 __init *raw3270_setup_console(void)
 {
+       struct ccw_device *cdev;
        unsigned long flags;
        struct raw3270 *rp;
        char *ascebc;
        int rc;
 
+       cdev = ccw_device_create_console(&raw3270_ccw_driver);
+       if (IS_ERR(cdev))
+               return ERR_CAST(cdev);
+
        rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
        ascebc = kzalloc(256, GFP_KERNEL);
        rc = raw3270_setup_device(cdev, rp, ascebc);
        if (rc)
                return ERR_PTR(rc);
        set_bit(RAW3270_FLAGS_CONSOLE, &rp->flags);
+
+       rc = ccw_device_enable_console(cdev);
+       if (rc) {
+               ccw_device_destroy_console(cdev);
+               return ERR_PTR(rc);
+       }
+
        spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
        do {
                __raw3270_reset_device(rp);
index 7b73ff8c1bd7a59a5c60fe9d7886371c48d4d2c4..e1e41c2861fbb34db1d035b4f51965cb39b41aa7 100644 (file)
@@ -173,6 +173,7 @@ int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
 int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
 int raw3270_reset(struct raw3270_view *);
 struct raw3270_view *raw3270_view(struct raw3270_view *);
+int raw3270_view_active(struct raw3270_view *);
 
 /* Reference count inliner for view structures. */
 static inline void
@@ -190,7 +191,7 @@ raw3270_put_view(struct raw3270_view *view)
                wake_up(&raw3270_wait_queue);
 }
 
-struct raw3270 *raw3270_setup_console(struct ccw_device *cdev);
+struct raw3270 *raw3270_setup_console(void);
 void raw3270_wait_cons_dev(struct raw3270 *);
 
 /* Notifier for device addition/removal */
index 82f2c389b4d1f51e882de45cd5e0fadfa3405f3e..14196ea0fdf3ad878b6fe527733d8390f0ba6a25 100644 (file)
@@ -20,7 +20,9 @@ struct read_info_sccb {
        struct  sccb_header header;     /* 0-7 */
        u16     rnmax;                  /* 8-9 */
        u8      rnsize;                 /* 10 */
-       u8      _reserved0[24 - 11];    /* 11-15 */
+       u8      _reserved0[16 - 11];    /* 11-15 */
+       u16     ncpurl;                 /* 16-17 */
+       u8      _reserved7[24 - 18];    /* 18-23 */
        u8      loadparm[8];            /* 24-31 */
        u8      _reserved1[48 - 32];    /* 32-47 */
        u64     facilities;             /* 48-55 */
@@ -32,13 +34,16 @@ struct read_info_sccb {
        u8      _reserved4[100 - 92];   /* 92-99 */
        u32     rnsize2;                /* 100-103 */
        u64     rnmax2;                 /* 104-111 */
-       u8      _reserved5[4096 - 112]; /* 112-4095 */
+       u8      _reserved5[120 - 112];  /* 112-119 */
+       u16     hcpua;                  /* 120-121 */
+       u8      _reserved6[4096 - 122]; /* 122-4095 */
 } __packed __aligned(PAGE_SIZE);
 
 static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
 static unsigned int sclp_con_has_vt220 __initdata;
 static unsigned int sclp_con_has_linemode __initdata;
 static unsigned long sclp_hsa_size;
+static unsigned int sclp_max_cpu;
 static struct sclp_ipl_info sclp_ipl_info;
 
 u64 sclp_facilities;
@@ -102,6 +107,15 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
        sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
        sclp_rzm <<= 20;
 
+       if (!sccb->hcpua) {
+               if (MACHINE_IS_VM)
+                       sclp_max_cpu = 64;
+               else
+                       sclp_max_cpu = sccb->ncpurl;
+       } else {
+               sclp_max_cpu = sccb->hcpua + 1;
+       }
+
        /* Save IPL information */
        sclp_ipl_info.is_valid = 1;
        if (sccb->flags & 0x2)
@@ -129,6 +143,11 @@ unsigned long long sclp_get_rzm(void)
        return sclp_rzm;
 }
 
+unsigned int sclp_get_max_cpu(void)
+{
+       return sclp_max_cpu;
+}
+
 /*
  * This function will be called after sclp_facilities_detect(), which gets
  * called from early.c code. The sclp_facilities_detect() function retrieves
@@ -184,9 +203,9 @@ static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
        sccb_init_eq_size(sccb);
        if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
                return -EIO;
-       if (sccb->evbuf.blk_cnt != 0)
-               return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
-       return 0;
+       if (sccb->evbuf.blk_cnt == 0)
+               return 0;
+       return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
 }
 
 static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
@@ -195,6 +214,8 @@ static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
        sccb->length = PAGE_SIZE;
        if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
                return -EIO;
+       if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
+               return 0;
        return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
 }
 
index f055df0b167fc83e1f2f6f17e0aef47e129007b4..445564c790f65ddc587d2a8185468c28b9e711a7 100644 (file)
@@ -186,55 +186,71 @@ void airq_iv_release(struct airq_iv *iv)
 EXPORT_SYMBOL(airq_iv_release);
 
 /**
- * airq_iv_alloc_bit - allocate an irq bit from an interrupt vector
+ * airq_iv_alloc - allocate irq bits from an interrupt vector
  * @iv: pointer to an interrupt vector structure
+ * @num: number of consecutive irq bits to allocate
  *
- * Returns the bit number of the allocated irq, or -1UL if no bit
- * is available or the AIRQ_IV_ALLOC flag has not been specified
+ * Returns the bit number of the first irq in the allocated block of irqs,
+ * or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
+ * specified
  */
-unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
+unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
 {
-       unsigned long bit;
+       unsigned long bit, i;
 
-       if (!iv->avail)
+       if (!iv->avail || num == 0)
                return -1UL;
        spin_lock(&iv->lock);
        bit = find_first_bit_inv(iv->avail, iv->bits);
-       if (bit < iv->bits) {
-               clear_bit_inv(bit, iv->avail);
-               if (bit >= iv->end)
-                       iv->end = bit + 1;
-       } else
+       while (bit + num <= iv->bits) {
+               for (i = 1; i < num; i++)
+                       if (!test_bit_inv(bit + i, iv->avail))
+                               break;
+               if (i >= num) {
+                       /* Found a suitable block of irqs */
+                       for (i = 0; i < num; i++)
+                               clear_bit_inv(bit + i, iv->avail);
+                       if (bit + num >= iv->end)
+                               iv->end = bit + num + 1;
+                       break;
+               }
+               bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
+       }
+       if (bit + num > iv->bits)
                bit = -1UL;
        spin_unlock(&iv->lock);
        return bit;
 
 }
-EXPORT_SYMBOL(airq_iv_alloc_bit);
+EXPORT_SYMBOL(airq_iv_alloc);
 
 /**
- * airq_iv_free_bit - free an irq bit of an interrupt vector
+ * airq_iv_free - free irq bits of an interrupt vector
  * @iv: pointer to interrupt vector structure
- * @bit: number of the irq bit to free
+ * @bit: number of the first irq bit to free
+ * @num: number of consecutive irq bits to free
  */
-void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
+void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
 {
-       if (!iv->avail)
+       unsigned long i;
+
+       if (!iv->avail || num == 0)
                return;
        spin_lock(&iv->lock);
-       /* Clear (possibly left over) interrupt bit */
-       clear_bit_inv(bit, iv->vector);
-       /* Make the bit position available again */
-       set_bit_inv(bit, iv->avail);
-       if (bit == iv->end - 1) {
+       for (i = 0; i < num; i++) {
+               /* Clear (possibly left over) interrupt bit */
+               clear_bit_inv(bit + i, iv->vector);
+               /* Make the bit positions available again */
+               set_bit_inv(bit + i, iv->avail);
+       }
+       if (bit + num >= iv->end) {
                /* Find new end of bit-field */
-               while (--iv->end > 0)
-                       if (!test_bit_inv(iv->end - 1, iv->avail))
-                               break;
+               while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
+                       iv->end--;
        }
        spin_unlock(&iv->lock);
 }
-EXPORT_SYMBOL(airq_iv_free_bit);
+EXPORT_SYMBOL(airq_iv_free);
 
 /**
  * airq_iv_scan - scan interrupt vector for non-zero bits
index 7b29d0be0ca33b610443a2059374a54fb0802f26..1d3661af7bd83c47afc4a468446278f121df056c 100644 (file)
@@ -173,8 +173,7 @@ static struct css_driver chsc_subchannel_driver = {
 
 static int __init chsc_init_dbfs(void)
 {
-       chsc_debug_msg_id = debug_register("chsc_msg", 16, 1,
-                                          16 * sizeof(long));
+       chsc_debug_msg_id = debug_register("chsc_msg", 8, 1, 4 * sizeof(long));
        if (!chsc_debug_msg_id)
                goto out;
        debug_register_view(chsc_debug_msg_id, &debug_sprintf_view);
index 8ee88c4ebd83e8dcdd78f45a2adc8500205e850b..9e058c4657a3fdef34a26f00f3e1fc3601d23841 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/device.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <asm/cio.h>
 #include <asm/delay.h>
 #include <asm/irq.h>
@@ -28,7 +29,7 @@
 #include <asm/chpid.h>
 #include <asm/airq.h>
 #include <asm/isc.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
 #include <asm/fcx.h>
 #include <asm/nmi.h>
 #include <asm/crw.h>
@@ -54,7 +55,7 @@ debug_info_t *cio_debug_crw_id;
  */
 static int __init cio_debug_init(void)
 {
-       cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
+       cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
        if (!cio_debug_msg_id)
                goto out_unregister;
        debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
@@ -64,7 +65,7 @@ static int __init cio_debug_init(void)
                goto out_unregister;
        debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
        debug_set_level(cio_debug_trace_id, 2);
-       cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
+       cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
        if (!cio_debug_crw_id)
                goto out_unregister;
        debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
@@ -584,8 +585,6 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
        return IRQ_HANDLED;
 }
 
-static struct irq_desc *irq_desc_io;
-
 static struct irqaction io_interrupt = {
        .name    = "IO",
        .handler = do_cio_interrupt,
@@ -596,7 +595,6 @@ void __init init_cio_interrupts(void)
        irq_set_chip_and_handler(IO_INTERRUPT,
                                 &dummy_irq_chip, handle_percpu_irq);
        setup_irq(IO_INTERRUPT, &io_interrupt);
-       irq_desc_io = irq_to_desc(IO_INTERRUPT);
 }
 
 #ifdef CONFIG_CCW_CONSOLE
@@ -623,7 +621,7 @@ void cio_tsch(struct subchannel *sch)
                local_bh_disable();
                irq_enter();
        }
-       kstat_incr_irqs_this_cpu(IO_INTERRUPT, irq_desc_io);
+       kstat_incr_irq_this_cpu(IO_INTERRUPT);
        if (sch->driver && sch->driver->irq)
                sch->driver->irq(sch);
        else
index e9d783563cbb8894182efcfde40376eb731d286b..d8d9b5b5cc56f9508cd9bae511c592867775a3cc 100644 (file)
@@ -1571,12 +1571,27 @@ out:
        return rc;
 }
 
+static void ccw_device_set_int_class(struct ccw_device *cdev)
+{
+       struct ccw_driver *cdrv = cdev->drv;
+
+       /* Note: we interpret class 0 in this context as an uninitialized
+        * field since it translates to a non-I/O interrupt class. */
+       if (cdrv->int_class != 0)
+               cdev->private->int_class = cdrv->int_class;
+       else
+               cdev->private->int_class = IRQIO_CIO;
+}
+
 #ifdef CONFIG_CCW_CONSOLE
-static int ccw_device_console_enable(struct ccw_device *cdev,
-                                    struct subchannel *sch)
+int __init ccw_device_enable_console(struct ccw_device *cdev)
 {
+       struct subchannel *sch = to_subchannel(cdev->dev.parent);
        int rc;
 
+       if (!cdev->drv || !cdev->handler)
+               return -EINVAL;
+
        io_subchannel_init_fields(sch);
        rc = cio_commit_config(sch);
        if (rc)
@@ -1609,12 +1624,11 @@ out_unlock:
        return rc;
 }
 
-struct ccw_device *ccw_device_probe_console(void)
+struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
 {
        struct io_subchannel_private *io_priv;
        struct ccw_device *cdev;
        struct subchannel *sch;
-       int ret;
 
        sch = cio_probe_console();
        if (IS_ERR(sch))
@@ -1631,18 +1645,23 @@ struct ccw_device *ccw_device_probe_console(void)
                kfree(io_priv);
                return cdev;
        }
+       cdev->drv = drv;
        set_io_private(sch, io_priv);
-       ret = ccw_device_console_enable(cdev, sch);
-       if (ret) {
-               set_io_private(sch, NULL);
-               put_device(&sch->dev);
-               put_device(&cdev->dev);
-               kfree(io_priv);
-               return ERR_PTR(ret);
-       }
+       ccw_device_set_int_class(cdev);
        return cdev;
 }
 
+void __init ccw_device_destroy_console(struct ccw_device *cdev)
+{
+       struct subchannel *sch = to_subchannel(cdev->dev.parent);
+       struct io_subchannel_private *io_priv = to_io_private(sch);
+
+       set_io_private(sch, NULL);
+       put_device(&sch->dev);
+       put_device(&cdev->dev);
+       kfree(io_priv);
+}
+
 /**
  * ccw_device_wait_idle() - busy wait for device to become idle
  * @cdev: ccw device
@@ -1726,15 +1745,8 @@ ccw_device_probe (struct device *dev)
        int ret;
 
        cdev->drv = cdrv; /* to let the driver call _set_online */
-       /* Note: we interpret class 0 in this context as an uninitialized
-        * field since it translates to a non-I/O interrupt class. */
-       if (cdrv->int_class != 0)
-               cdev->private->int_class = cdrv->int_class;
-       else
-               cdev->private->int_class = IRQIO_CIO;
-
+       ccw_device_set_int_class(cdev);
        ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
-
        if (ret) {
                cdev->drv = NULL;
                cdev->private->int_class = IRQIO_CIO;
index 795ed61a549632adc16b808622f75c911f66634d..a0aff2eb247c2fb9657aff821299404aa2d20ddc 100644 (file)
@@ -33,8 +33,8 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
        /*                   N  P  A    M  L  V                      H  */
        [QETH_DBF_SETUP] = {"qeth_setup",
                                8, 1,   8, 5, &debug_hex_ascii_view, NULL},
-       [QETH_DBF_MSG]   = {"qeth_msg",
-                               8, 1, 128, 3, &debug_sprintf_view,   NULL},
+       [QETH_DBF_MSG]   = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
+                           &debug_sprintf_view, NULL},
        [QETH_DBF_CTRL]  = {"qeth_control",
                8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
 };
index a3e6c8a3ff0faa852f163185c46e98d80fb49a43..296c936cc03cc43397ba4905d53ac1fd7b050f89 100644 (file)
@@ -90,6 +90,7 @@
 #include <linux/init.h>
 #include <linux/nvram.h>
 #include <linux/bitops.h>
+#include <linux/wait.h>
 
 #include <asm/setup.h>
 #include <asm/atarihw.h>
@@ -549,8 +550,10 @@ static void falcon_get_lock(void)
 
        local_irq_save(flags);
 
-       while (!in_irq() && falcon_got_lock && stdma_others_waiting())
-               sleep_on(&falcon_fairness_wait);
+       wait_event_cmd(falcon_fairness_wait,
+               in_interrupt() || !falcon_got_lock || !stdma_others_waiting(),
+               local_irq_restore(flags),
+               local_irq_save(flags));
 
        while (!falcon_got_lock) {
                if (in_irq())
@@ -562,7 +565,10 @@ static void falcon_get_lock(void)
                        falcon_trying_lock = 0;
                        wake_up(&falcon_try_wait);
                } else {
-                       sleep_on(&falcon_try_wait);
+                       wait_event_cmd(falcon_try_wait,
+                               falcon_got_lock && !falcon_trying_lock,
+                               local_irq_restore(flags),
+                               local_irq_save(flags));
                }
        }
 
index d2895836f9fa4c00fec1a46d993074ecb3edeaea..766098af4eb79aebf8f080b6906db8aeade896e7 100644 (file)
@@ -700,46 +700,26 @@ void sas_probe_sata(struct asd_sas_port *port)
 
 }
 
-static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
+static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
 {
        struct domain_device *dev, *n;
-       bool retry = false;
 
        list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
-               int rc;
-
                if (!dev_is_sata(dev))
                        continue;
 
                sas_ata_wait_eh(dev);
-               rc = dev->sata_dev.pm_result;
-               if (rc == -EAGAIN)
-                       retry = true;
-               else if (rc) {
-                       /* since we don't have a
-                        * ->port_{suspend|resume} routine in our
-                        *  ata_port ops, and no entanglements with
-                        *  acpi, suspend should just be mechanical trip
-                        *  through eh, catch cases where these
-                        *  assumptions are invalidated
-                        */
-                       WARN_ONCE(1, "failed %s %s error: %d\n", func,
-                                dev_name(&dev->rphy->dev), rc);
-               }
 
                /* if libata failed to power manage the device, tear it down */
                if (ata_dev_disabled(sas_to_ata_dev(dev)))
                        sas_fail_probe(dev, func, -ENODEV);
        }
-
-       return retry;
 }
 
 void sas_suspend_sata(struct asd_sas_port *port)
 {
        struct domain_device *dev;
 
- retry:
        mutex_lock(&port->ha->disco_mutex);
        list_for_each_entry(dev, &port->dev_list, dev_list_node) {
                struct sata_device *sata;
@@ -751,20 +731,17 @@ void sas_suspend_sata(struct asd_sas_port *port)
                if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
                        continue;
 
-               sata->pm_result = -EIO;
-               ata_sas_port_async_suspend(sata->ap, &sata->pm_result);
+               ata_sas_port_suspend(sata->ap);
        }
        mutex_unlock(&port->ha->disco_mutex);
 
-       if (sas_ata_flush_pm_eh(port, __func__))
-               goto retry;
+       sas_ata_flush_pm_eh(port, __func__);
 }
 
 void sas_resume_sata(struct asd_sas_port *port)
 {
        struct domain_device *dev;
 
- retry:
        mutex_lock(&port->ha->disco_mutex);
        list_for_each_entry(dev, &port->dev_list, dev_list_node) {
                struct sata_device *sata;
@@ -776,13 +753,11 @@ void sas_resume_sata(struct asd_sas_port *port)
                if (sata->ap->pm_mesg.event == PM_EVENT_ON)
                        continue;
 
-               sata->pm_result = -EIO;
-               ata_sas_port_async_resume(sata->ap, &sata->pm_result);
+               ata_sas_port_resume(sata->ap);
        }
        mutex_unlock(&port->ha->disco_mutex);
 
-       if (sas_ata_flush_pm_eh(port, __func__))
-               goto retry;
+       sas_ata_flush_pm_eh(port, __func__);
 }
 
 /**
index 8af136e9c9dc16e1ab5f5f5cecf7dfe1579c528b..b22142ee52625c8bce4130b24cf0e9046544a050 100644 (file)
@@ -2036,6 +2036,13 @@ static void fwserial_auto_connect(struct work_struct *work)
                schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
 }
 
+static void fwserial_peer_workfn(struct work_struct *work)
+{
+       struct fwtty_peer *peer = to_peer(work, work);
+
+       peer->workfn(work);
+}
+
 /**
  * fwserial_add_peer - add a newly probed 'serial' unit device as a 'peer'
  * @serial: aggregate representing the specific fw_card to add the peer to
@@ -2100,7 +2107,7 @@ static int fwserial_add_peer(struct fw_serial *serial, struct fw_unit *unit)
        peer->port = NULL;
 
        init_timer(&peer->timer);
-       INIT_WORK(&peer->work, NULL);
+       INIT_WORK(&peer->work, fwserial_peer_workfn);
        INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
 
        /* associate peer with specific fw_card */
@@ -2702,7 +2709,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
 
                } else {
                        peer->work_params.plug_req = pkt->plug_req;
-                       PREPARE_WORK(&peer->work, fwserial_handle_plug_req);
+                       peer->workfn = fwserial_handle_plug_req;
                        queue_work(system_unbound_wq, &peer->work);
                }
                break;
@@ -2731,7 +2738,7 @@ static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
                        fwtty_err(&peer->unit, "unplug req: busy\n");
                        rcode = RCODE_CONFLICT_ERROR;
                } else {
-                       PREPARE_WORK(&peer->work, fwserial_handle_unplug_req);
+                       peer->workfn = fwserial_handle_unplug_req;
                        queue_work(system_unbound_wq, &peer->work);
                }
                break;
index 54f7f9b9b2123a120a48595a878723b5d89d465e..98b853d4acbcf16181984bf3f529c24f4bab3a1c 100644 (file)
@@ -91,6 +91,7 @@ struct fwtty_peer {
        struct rcu_head         rcu;
 
        spinlock_t              lock;
+       work_func_t             workfn;
        struct work_struct      work;
        struct peer_work_params work_params;
        struct timer_list       timer;
index be33d2b0613bb95a51092b1173c9d1291082a211..7e0b626026322c9319d6588de604231d496e76fc 100644 (file)
@@ -1041,8 +1041,7 @@ static int sci_notifier(struct notifier_block *self,
 
        sci_port = container_of(self, struct sci_port, freq_transition);
 
-       if ((phase == CPUFREQ_POSTCHANGE) ||
-           (phase == CPUFREQ_RESUMECHANGE)) {
+       if (phase == CPUFREQ_POSTCHANGE) {
                struct uart_port *port = &sci_port->port;
 
                spin_lock_irqsave(&port->lock, flags);
index d8a55e87877f06f3141602e4f08cdcb668c465b0..0ffb0cbe28237239ffd093877a363c7957c8e328 100644 (file)
                                lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
 # define __rel(l, n, i)                                \
                                lock_release(&(l)->dep_map, n, i)
-# ifdef CONFIG_PROVE_LOCKING
-#  define lockdep_acquire(l, s, t, i)          __acq(l, s, t, 0, 2, NULL, i)
-#  define lockdep_acquire_nest(l, s, t, n, i)  __acq(l, s, t, 0, 2, n, i)
-#  define lockdep_acquire_read(l, s, t, i)     __acq(l, s, t, 1, 2, NULL, i)
-#  define lockdep_release(l, n, i)             __rel(l, n, i)
-# else
-#  define lockdep_acquire(l, s, t, i)          __acq(l, s, t, 0, 1, NULL, i)
-#  define lockdep_acquire_nest(l, s, t, n, i)  __acq(l, s, t, 0, 1, n, i)
-#  define lockdep_acquire_read(l, s, t, i)     __acq(l, s, t, 1, 1, NULL, i)
-#  define lockdep_release(l, n, i)             __rel(l, n, i)
-# endif
+#define lockdep_acquire(l, s, t, i)            __acq(l, s, t, 0, 1, NULL, i)
+#define lockdep_acquire_nest(l, s, t, n, i)    __acq(l, s, t, 0, 1, n, i)
+#define lockdep_acquire_read(l, s, t, i)       __acq(l, s, t, 1, 1, NULL, i)
+#define lockdep_release(l, n, i)               __rel(l, n, i)
 #else
 # define lockdep_acquire(l, s, t, i)           do { } while (0)
 # define lockdep_acquire_nest(l, s, t, n, i)   do { } while (0)
index 64ea21971be23f770986b25cf05890553cf8d195..5cbf78d0be2539c868dab16a815b15e4bfe28b99 100644 (file)
@@ -1040,7 +1040,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
                 */
                if (type == HUB_INIT) {
                        delay = hub_power_on(hub, false);
-                       PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func2);
+                       INIT_DELAYED_WORK(&hub->init_work, hub_init_func2);
                        schedule_delayed_work(&hub->init_work,
                                        msecs_to_jiffies(delay));
 
@@ -1194,7 +1194,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 
                /* Don't do a long sleep inside a workqueue routine */
                if (type == HUB_INIT2) {
-                       PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
+                       INIT_DELAYED_WORK(&hub->init_work, hub_init_func3);
                        schedule_delayed_work(&hub->init_work,
                                        msecs_to_jiffies(delay));
                        return;         /* Continues at init3: below */
index a0fa5de210cf57ac6842625ceb21499ac07a7969..e1e22e0f01e881fe2961dbf1c43cb56f4e22cade 100644 (file)
@@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
                        r = -ENOBUFS;
                        goto err;
                }
-               d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
+               r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
                                      ARRAY_SIZE(vq->iov) - seg, &out,
                                      &in, log, log_num);
+               if (unlikely(r < 0))
+                       goto err;
+
+               d = r;
                if (d == vq->num) {
                        r = 0;
                        goto err;
@@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
        *iovcount = seg;
        if (unlikely(log))
                *log_num = nlogs;
+
+       /* Detect overrun */
+       if (unlikely(datalen > 0)) {
+               r = UIO_MAXIOV + 1;
+               goto err;
+       }
        return headcount;
 err:
        vhost_discard_vq_desc(vq, headcount);
@@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net)
                /* On error, stop handling until the next kick. */
                if (unlikely(headcount < 0))
                        break;
+               /* On overrun, truncate and discard */
+               if (unlikely(headcount > UIO_MAXIOV)) {
+                       msg.msg_iovlen = 1;
+                       err = sock->ops->recvmsg(NULL, sock, &msg,
+                                                1, MSG_DONTWAIT | MSG_TRUNC);
+                       pr_debug("Discarded rx packet: len %zd\n", sock_len);
+                       continue;
+               }
                /* OK, now we need to know about added descriptors. */
                if (!headcount) {
                        if (unlikely(vhost_enable_notify(&net->dev, vq))) {
index dade5b7699bc240e81225e8d489136af9e0d71e0..97a8f3a12a7b2bafb7ee704fcba1ee13e40f37ab 100644 (file)
@@ -27,12 +27,6 @@ config VGASTATE
        tristate
        default n
 
-config VIDEO_OUTPUT_CONTROL
-       tristate "Lowlevel video output switch controls"
-       help
-         This framework adds support for low-level control of the video 
-         output switch.
-
 config VIDEOMODE_HELPERS
        bool
 
index ae17ddf49a00d39c04a31570cd1f4955c9630af5..08d6a4ab3ace29a6ac2c96cd4526ffda9d27be12 100644 (file)
@@ -172,8 +172,6 @@ obj-$(CONFIG_FB_SIMPLE)           += simplefb.o
 # the test framebuffer is last
 obj-$(CONFIG_FB_VIRTUAL)          += vfb.o
 
-#video output switch sysfs driver
-obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
 obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
 ifeq ($(CONFIG_OF),y)
 obj-$(CONFIG_VIDEOMODE_HELPERS) += of_display_timing.o of_videomode.o
diff --git a/drivers/video/output.c b/drivers/video/output.c
deleted file mode 100644 (file)
index 1446c49..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *  output.c - Display Output Switch driver
- *
- *  Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/module.h>
-#include <linux/video_output.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/ctype.h>
-
-
-MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       ssize_t ret_size = 0;
-       struct output_device *od = to_output_device(dev);
-       if (od->props)
-               ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od));
-       return ret_size;
-}
-
-static ssize_t state_store(struct device *dev, struct device_attribute *attr,
-                          const char *buf,size_t count)
-{
-       char *endp;
-       struct output_device *od = to_output_device(dev);
-       int request_state = simple_strtoul(buf,&endp,0);
-       size_t size = endp - buf;
-
-       if (isspace(*endp))
-               size++;
-       if (size != count)
-               return -EINVAL;
-
-       if (od->props) {
-               od->request_state = request_state;
-               od->props->set_state(od);
-       }
-       return count;
-}
-static DEVICE_ATTR_RW(state);
-
-static void video_output_release(struct device *dev)
-{
-       struct output_device *od = to_output_device(dev);
-       kfree(od);
-}
-
-static struct attribute *video_output_attrs[] = {
-       &dev_attr_state.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(video_output);
-
-static struct class video_output_class = {
-       .name = "video_output",
-       .dev_release = video_output_release,
-       .dev_groups = video_output_groups,
-};
-
-struct output_device *video_output_register(const char *name,
-       struct device *dev,
-       void *devdata,
-       struct output_properties *op)
-{
-       struct output_device *new_dev;
-       int ret_code = 0;
-
-       new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL);
-       if (!new_dev) {
-               ret_code = -ENOMEM;
-               goto error_return;
-       }
-       new_dev->props = op;
-       new_dev->dev.class = &video_output_class;
-       new_dev->dev.parent = dev;
-       dev_set_name(&new_dev->dev, "%s", name);
-       dev_set_drvdata(&new_dev->dev, devdata);
-       ret_code = device_register(&new_dev->dev);
-       if (ret_code) {
-               kfree(new_dev);
-               goto error_return;
-       }
-       return new_dev;
-
-error_return:
-       return ERR_PTR(ret_code);
-}
-EXPORT_SYMBOL(video_output_register);
-
-void video_output_unregister(struct output_device *dev)
-{
-       if (!dev)
-               return;
-       device_unregister(&dev->dev);
-}
-EXPORT_SYMBOL(video_output_unregister);
-
-static void __exit video_output_class_exit(void)
-{
-       class_unregister(&video_output_class);
-}
-
-static int __init video_output_class_init(void)
-{
-       return class_register(&video_output_class);
-}
-
-postcore_initcall(video_output_class_init);
-module_exit(video_output_class_exit);
index 37d06ea624aa953d40448bcd2a2d4943baf79fa9..61a6ac8fa8fc7ab00dcc7c33cea47981f2509d4b 100644 (file)
@@ -399,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                        state = BP_EAGAIN;
                        break;
                }
+               scrub_page(page);
 
-               pfn = page_to_pfn(page);
-               frame_list[i] = pfn_to_mfn(pfn);
+               frame_list[i] = page_to_pfn(page);
+       }
 
-               scrub_page(page);
+       /*
+        * Ensure that ballooned highmem pages don't have kmaps.
+        *
+        * Do this before changing the p2m as kmap_flush_unused()
+        * reads PTEs to obtain pages (and hence needs the original
+        * p2m entry).
+        */
+       kmap_flush_unused();
+
+       /* Update direct mapping, invalidate P2M, and add to balloon. */
+       for (i = 0; i < nr_pages; i++) {
+               pfn = frame_list[i];
+               frame_list[i] = pfn_to_mfn(pfn);
+               page = pfn_to_page(pfn);
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
                /*
@@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                }
 #endif
 
-               balloon_append(pfn_to_page(pfn));
+               balloon_append(page);
        }
 
-       /* Ensure that ballooned highmem pages don't have kmaps. */
-       kmap_flush_unused();
        flush_tlb_all();
 
        set_xen_guest_handle(reservation.extent_start, frame_list);
index d7ff9175730747488aac13569ed6125c3ae1a6ca..5db43fc100a413bf0ee55676d2b359e4db94e7ec 100644 (file)
@@ -166,7 +166,6 @@ static void evtchn_2l_handle_events(unsigned cpu)
        int start_word_idx, start_bit_idx;
        int word_idx, bit_idx;
        int i;
-       struct irq_desc *desc;
        struct shared_info *s = HYPERVISOR_shared_info;
        struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
 
@@ -176,11 +175,8 @@ static void evtchn_2l_handle_events(unsigned cpu)
                unsigned int evtchn = evtchn_from_irq(irq);
                word_idx = evtchn / BITS_PER_LONG;
                bit_idx = evtchn % BITS_PER_LONG;
-               if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx)) {
-                       desc = irq_to_desc(irq);
-                       if (desc)
-                               generic_handle_irq_desc(irq, desc);
-               }
+               if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
+                       generic_handle_irq(irq);
        }
 
        /*
@@ -245,11 +241,8 @@ static void evtchn_2l_handle_events(unsigned cpu)
                        port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
                        irq = get_evtchn_to_irq(port);
 
-                       if (irq != -1) {
-                               desc = irq_to_desc(irq);
-                               if (desc)
-                                       generic_handle_irq_desc(irq, desc);
-                       }
+                       if (irq != -1)
+                               generic_handle_irq(irq);
 
                        bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
 
index f4a9e3311297b7b562f9235dae03e92b9266a9cc..c3458f58de905efb9eb8ae6a611532abda763f9e 100644 (file)
@@ -336,9 +336,8 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 
        BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-       cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
+       cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
 #endif
-
        xen_evtchn_port_bind_to_cpu(info, cpu);
 
        info->cpu = cpu;
@@ -373,10 +372,8 @@ static void xen_irq_init(unsigned irq)
 {
        struct irq_info *info;
 #ifdef CONFIG_SMP
-       struct irq_desc *desc = irq_to_desc(irq);
-
        /* By default all event channels notify CPU#0. */
-       cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
+       cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
 #endif
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
@@ -490,13 +487,6 @@ static void pirq_query_unmask(int irq)
                info->u.pirq.flags |= PIRQ_NEEDS_EOI;
 }
 
-static bool probing_irq(int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       return desc && desc->action == NULL;
-}
-
 static void eoi_pirq(struct irq_data *data)
 {
        int evtchn = evtchn_from_irq(data->irq);
@@ -538,8 +528,7 @@ static unsigned int __startup_pirq(unsigned int irq)
                                        BIND_PIRQ__WILL_SHARE : 0;
        rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
        if (rc != 0) {
-               if (!probing_irq(irq))
-                       pr_info("Failed to obtain physical IRQ %d\n", irq);
+               pr_warn("Failed to obtain physical IRQ %d\n", irq);
                return 0;
        }
        evtchn = bind_pirq.port;
@@ -772,17 +761,12 @@ error_irq:
 
 int xen_destroy_irq(int irq)
 {
-       struct irq_desc *desc;
        struct physdev_unmap_pirq unmap_irq;
        struct irq_info *info = info_for_irq(irq);
        int rc = -ENOENT;
 
        mutex_lock(&irq_mapping_update_lock);
 
-       desc = irq_to_desc(irq);
-       if (!desc)
-               goto out;
-
        if (xen_initial_domain()) {
                unmap_irq.pirq = info->u.pirq.pirq;
                unmap_irq.domid = info->u.pirq.domid;
@@ -1251,6 +1235,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
 #ifdef CONFIG_X86
        exit_idle();
 #endif
+       inc_irq_stat(irq_hv_callback_count);
 
        __xen_evtchn_do_upcall();
 
@@ -1339,7 +1324,7 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
                            bool force)
 {
-       unsigned tcpu = cpumask_first(dest);
+       unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
 
        return rebind_irq_to_cpu(data->irq, tcpu);
 }
index 1de2a191b395b342491ce10398f70d5bc7d395d3..96109a9972b6113cdb88d1861bf00353a00a0a93 100644 (file)
@@ -235,14 +235,10 @@ static uint32_t clear_linked(volatile event_word_t *word)
 static void handle_irq_for_port(unsigned port)
 {
        int irq;
-       struct irq_desc *desc;
 
        irq = get_evtchn_to_irq(port);
-       if (irq != -1) {
-               desc = irq_to_desc(irq);
-               if (desc)
-                       generic_handle_irq_desc(irq, desc);
-       }
+       if (irq != -1)
+               generic_handle_irq(irq);
 }
 
 static void consume_one_event(unsigned cpu,
index 80875fb770ed931681c75db5aa4209c15acfc7a7..3e62ee4b3b6641208e6833b639e25e5203f75ec9 100644 (file)
@@ -313,7 +313,7 @@ static void acpi_processor_hotplug_notify(acpi_handle handle,
                goto out;
        }
 
-       (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+       (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
 
 out:
        acpi_scan_lock_release();
index f8d18626969a48819ae5810f1d424dc4eacf3f31..34e40b733f9a8f27cdfb142f8ce67f081188d63b 100644 (file)
@@ -285,7 +285,7 @@ static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
                return;
        }
 
-       (void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
+       (void) acpi_evaluate_ost(handle, event, ost_code, NULL);
        return;
 }
 
index 40c4bc06b5fa0929af0b9b8ffddd9d59e6bcbc36..f83b754505f83e1f0906f7d0e4d628bf2ad16e83 100644 (file)
@@ -77,27 +77,14 @@ static int acpi_pad_pur(acpi_handle handle)
        return num;
 }
 
-/* Notify firmware how many CPUs are idle */
-static void acpi_pad_ost(acpi_handle handle, int stat,
-       uint32_t idle_nums)
-{
-       union acpi_object params[3] = {
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_INTEGER,},
-               {.type = ACPI_TYPE_BUFFER,},
-       };
-       struct acpi_object_list arg_list = {3, params};
-
-       params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
-       params[1].integer.value =  stat;
-       params[2].buffer.length = 4;
-       params[2].buffer.pointer = (void *)&idle_nums;
-       acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
-}
-
 static void acpi_pad_handle_notify(acpi_handle handle)
 {
        int idle_nums;
+       struct acpi_buffer param = {
+               .length = 4,
+               .pointer = (void *)&idle_nums,
+       };
+
 
        mutex_lock(&xen_cpu_lock);
        idle_nums = acpi_pad_pur(handle);
@@ -109,7 +96,8 @@ static void acpi_pad_handle_notify(acpi_handle handle)
        idle_nums = xen_acpi_pad_idle_cpus(idle_nums)
                    ?: xen_acpi_pad_idle_cpus_num();
        if (idle_nums >= 0)
-               acpi_pad_ost(handle, 0, idle_nums);
+               acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY,
+                                 0, &param);
        mutex_unlock(&xen_cpu_lock);
 }
 
index 6621f800812287f6f0fc27cdc7f71a2a32287af2..be75b500005d0d4f68b2d6e3855e71660016353d 100644 (file)
@@ -75,6 +75,7 @@ struct afs_call {
        const struct afs_call_type *type;       /* type of call */
        const struct afs_wait_mode *wait_mode;  /* completion wait mode */
        wait_queue_head_t       waitq;          /* processes awaiting completion */
+       work_func_t             async_workfn;
        struct work_struct      async_work;     /* asynchronous work processor */
        struct work_struct      work;           /* actual work processor */
        struct sk_buff_head     rx_queue;       /* received packets */
index 8ad8c2a0703a120c2dde7f425225f1d515b13f0c..ef943df73b8cdee2c6964439b81417a9c1110b12 100644 (file)
@@ -644,7 +644,7 @@ static void afs_process_async_call(struct work_struct *work)
 
                /* we can't just delete the call because the work item may be
                 * queued */
-               PREPARE_WORK(&call->async_work, afs_delete_async_call);
+               call->async_workfn = afs_delete_async_call;
                queue_work(afs_async_calls, &call->async_work);
        }
 
@@ -663,6 +663,13 @@ void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb)
        call->reply_size += len;
 }
 
+static void afs_async_workfn(struct work_struct *work)
+{
+       struct afs_call *call = container_of(work, struct afs_call, async_work);
+
+       call->async_workfn(work);
+}
+
 /*
  * accept the backlog of incoming calls
  */
@@ -685,7 +692,8 @@ static void afs_collect_incoming_call(struct work_struct *work)
                                return;
                        }
 
-                       INIT_WORK(&call->async_work, afs_process_async_call);
+                       call->async_workfn = afs_process_async_call;
+                       INIT_WORK(&call->async_work, afs_async_workfn);
                        call->wait_mode = &afs_async_incoming_call;
                        call->type = &afs_RXCMxxxx;
                        init_waitqueue_head(&call->waitq);
index 24084732b1d0b264b5c3262796f1ec3dd5fc505f..80ef38c73e5a16af0f9443d94bec3aae700370df 100644 (file)
@@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
 static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
                                int flags, const char *dev_name, void *data)
 {
-       struct dentry *root;
-       root = mount_pseudo(fs_type, "anon_inode:", NULL,
+       return mount_pseudo(fs_type, "anon_inode:", NULL,
                        &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
-       if (!IS_ERR(root)) {
-               struct super_block *s = root->d_sb;
-               anon_inode_inode = alloc_anon_inode(s);
-               if (IS_ERR(anon_inode_inode)) {
-                       dput(root);
-                       deactivate_locked_super(s);
-                       root = ERR_CAST(anon_inode_inode);
-               }
-       }
-       return root;
 }
 
 static struct file_system_type anon_inode_fs_type = {
@@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
 
 static int __init anon_inode_init(void)
 {
-       int error;
-
-       error = register_filesystem(&anon_inode_fs_type);
-       if (error)
-               goto err_exit;
        anon_inode_mnt = kern_mount(&anon_inode_fs_type);
-       if (IS_ERR(anon_inode_mnt)) {
-               error = PTR_ERR(anon_inode_mnt);
-               goto err_unregister_filesystem;
-       }
-       return 0;
+       if (IS_ERR(anon_inode_mnt))
+               panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
 
-err_unregister_filesystem:
-       unregister_filesystem(&anon_inode_fs_type);
-err_exit:
-       panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
+       anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
+       if (IS_ERR(anon_inode_inode))
+               panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
+
+       return 0;
 }
 
 fs_initcall(anon_inode_init);
index 6af20de2c1a3c29d5cc7c251d8fb3fa2182de445..19252b97f0cc9662940c2782c76e6782c7fa787a 100644 (file)
@@ -72,8 +72,8 @@ int compat_printk(const char *fmt, ...)
  * Not all architectures have sys_utime, so implement this in terms
  * of sys_utimes.
  */
-asmlinkage long compat_sys_utime(const char __user *filename,
-                                struct compat_utimbuf __user *t)
+COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
+                      struct compat_utimbuf __user *, t)
 {
        struct timespec tv[2];
 
@@ -87,7 +87,7 @@ asmlinkage long compat_sys_utime(const char __user *filename,
        return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
 }
 
-asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filename, struct compat_timespec __user *t, int flags)
+COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct compat_timespec __user *, t, int, flags)
 {
        struct timespec tv[2];
 
@@ -102,7 +102,7 @@ asmlinkage long compat_sys_utimensat(unsigned int dfd, const char __user *filena
        return do_utimes(dfd, filename, t ? tv : NULL, flags);
 }
 
-asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filename, struct compat_timeval __user *t)
+COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd, const char __user *, filename, struct compat_timeval __user *, t)
 {
        struct timespec tv[2];
 
@@ -121,7 +121,7 @@ asmlinkage long compat_sys_futimesat(unsigned int dfd, const char __user *filena
        return do_utimes(dfd, filename, t ? tv : NULL, 0);
 }
 
-asmlinkage long compat_sys_utimes(const char __user *filename, struct compat_timeval __user *t)
+COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct compat_timeval __user *, t)
 {
        return compat_sys_futimesat(AT_FDCWD, filename, t);
 }
@@ -159,8 +159,8 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
        return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
 }
 
-asmlinkage long compat_sys_newstat(const char __user * filename,
-               struct compat_stat __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
+                      struct compat_stat __user *, statbuf)
 {
        struct kstat stat;
        int error;
@@ -171,8 +171,8 @@ asmlinkage long compat_sys_newstat(const char __user * filename,
        return cp_compat_stat(&stat, statbuf);
 }
 
-asmlinkage long compat_sys_newlstat(const char __user * filename,
-               struct compat_stat __user *statbuf)
+COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
+                      struct compat_stat __user *, statbuf)
 {
        struct kstat stat;
        int error;
@@ -184,9 +184,9 @@ asmlinkage long compat_sys_newlstat(const char __user * filename,
 }
 
 #ifndef __ARCH_WANT_STAT64
-asmlinkage long compat_sys_newfstatat(unsigned int dfd,
-               const char __user *filename,
-               struct compat_stat __user *statbuf, int flag)
+COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
+                      const char __user *, filename,
+                      struct compat_stat __user *, statbuf, int, flag)
 {
        struct kstat stat;
        int error;
@@ -198,8 +198,8 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd,
 }
 #endif
 
-asmlinkage long compat_sys_newfstat(unsigned int fd,
-               struct compat_stat __user * statbuf)
+COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
+                      struct compat_stat __user *, statbuf)
 {
        struct kstat stat;
        int error = vfs_fstat(fd, &stat);
@@ -247,7 +247,7 @@ static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs *
  * The following statfs calls are copies of code from fs/statfs.c and
  * should be checked against those from time to time
  */
-asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf)
+COMPAT_SYSCALL_DEFINE2(statfs, const char __user *, pathname, struct compat_statfs __user *, buf)
 {
        struct kstatfs tmp;
        int error = user_statfs(pathname, &tmp);
@@ -256,7 +256,7 @@ asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_sta
        return error;
 }
 
-asmlinkage long compat_sys_fstatfs(unsigned int fd, struct compat_statfs __user *buf)
+COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *, buf)
 {
        struct kstatfs tmp;
        int error = fd_statfs(fd, &tmp);
@@ -298,7 +298,7 @@ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstat
        return 0;
 }
 
-asmlinkage long compat_sys_statfs64(const char __user *pathname, compat_size_t sz, struct compat_statfs64 __user *buf)
+COMPAT_SYSCALL_DEFINE3(statfs64, const char __user *, pathname, compat_size_t, sz, struct compat_statfs64 __user *, buf)
 {
        struct kstatfs tmp;
        int error;
@@ -312,7 +312,7 @@ asmlinkage long compat_sys_statfs64(const char __user *pathname, compat_size_t s
        return error;
 }
 
-asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user *buf)
+COMPAT_SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, compat_size_t, sz, struct compat_statfs64 __user *, buf)
 {
        struct kstatfs tmp;
        int error;
@@ -331,7 +331,7 @@ asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct c
  * Given how simple this syscall is that apporach is more maintainable
  * than the various conversion hacks.
  */
-asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u)
+COMPAT_SYSCALL_DEFINE2(ustat, unsigned, dev, struct compat_ustat __user *, u)
 {
        struct compat_ustat tmp;
        struct kstatfs sbuf;
@@ -399,8 +399,8 @@ static int put_compat_flock64(struct flock *kfl, struct compat_flock64 __user *u
 }
 #endif
 
-asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
-               unsigned long arg)
+COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
+                      compat_ulong_t, arg)
 {
        mm_segment_t old_fs;
        struct flock f;
@@ -468,16 +468,15 @@ asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
        return ret;
 }
 
-asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
-               unsigned long arg)
+COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
+                      compat_ulong_t, arg)
 {
        if ((cmd == F_GETLK64) || (cmd == F_SETLK64) || (cmd == F_SETLKW64))
                return -EINVAL;
        return compat_sys_fcntl64(fd, cmd, arg);
 }
 
-asmlinkage long
-compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
+COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_reqs, u32 __user *, ctx32p)
 {
        long ret;
        aio_context_t ctx64;
@@ -496,32 +495,24 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
        return ret;
 }
 
-asmlinkage long
-compat_sys_io_getevents(aio_context_t ctx_id,
-                                unsigned long min_nr,
-                                unsigned long nr,
-                                struct io_event __user *events,
-                                struct compat_timespec __user *timeout)
+COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
+                      compat_long_t, min_nr,
+                      compat_long_t, nr,
+                      struct io_event __user *, events,
+                      struct compat_timespec __user *, timeout)
 {
-       long ret;
        struct timespec t;
        struct timespec __user *ut = NULL;
 
-       ret = -EFAULT;
-       if (unlikely(!access_ok(VERIFY_WRITE, events, 
-                               nr * sizeof(struct io_event))))
-               goto out;
        if (timeout) {
                if (get_compat_timespec(&t, timeout))
-                       goto out;
+                       return -EFAULT;
 
                ut = compat_alloc_user_space(sizeof(*ut));
                if (copy_to_user(ut, &t, sizeof(t)) )
-                       goto out;
+                       return -EFAULT;
        } 
-       ret = sys_io_getevents(ctx_id, min_nr, nr, events, ut);
-out:
-       return ret;
+       return sys_io_getevents(ctx_id, min_nr, nr, events, ut);
 }
 
 /* A write operation does a read from user space and vice versa */
@@ -617,8 +608,8 @@ copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64)
 
 #define MAX_AIO_SUBMITS        (PAGE_SIZE/sizeof(struct iocb *))
 
-asmlinkage long
-compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
+COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
+                      int, nr, u32 __user *, iocb)
 {
        struct iocb __user * __user *iocb64; 
        long ret;
@@ -770,10 +761,10 @@ static int do_nfs4_super_data_conv(void *raw_data)
 #define NCPFS_NAME      "ncpfs"
 #define NFS4_NAME      "nfs4"
 
-asmlinkage long compat_sys_mount(const char __user * dev_name,
-                                const char __user * dir_name,
-                                const char __user * type, unsigned long flags,
-                                const void __user * data)
+COMPAT_SYSCALL_DEFINE5(mount, const char __user *, dev_name,
+                      const char __user *, dir_name,
+                      const char __user *, type, compat_ulong_t, flags,
+                      const void __user *, data)
 {
        char *kernel_type;
        unsigned long data_page;
@@ -869,8 +860,8 @@ efault:
        return -EFAULT;
 }
 
-asmlinkage long compat_sys_old_readdir(unsigned int fd,
-       struct compat_old_linux_dirent __user *dirent, unsigned int count)
+COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
+               struct compat_old_linux_dirent __user *, dirent, unsigned int, count)
 {
        int error;
        struct fd f = fdget(fd);
@@ -948,8 +939,8 @@ efault:
        return -EFAULT;
 }
 
-asmlinkage long compat_sys_getdents(unsigned int fd,
-               struct compat_linux_dirent __user *dirent, unsigned int count)
+COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd,
+               struct compat_linux_dirent __user *, dirent, unsigned int, count)
 {
        struct fd f;
        struct compat_linux_dirent __user * lastdirent;
@@ -981,7 +972,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
        return error;
 }
 
-#ifndef __ARCH_OMIT_COMPAT_SYS_GETDENTS64
+#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
 
 struct compat_getdents_callback64 {
        struct dir_context ctx;
@@ -1033,8 +1024,8 @@ efault:
        return -EFAULT;
 }
 
-asmlinkage long compat_sys_getdents64(unsigned int fd,
-               struct linux_dirent64 __user * dirent, unsigned int count)
+COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd,
+               struct linux_dirent64 __user *, dirent, unsigned int, count)
 {
        struct fd f;
        struct linux_dirent64 __user * lastdirent;
@@ -1066,7 +1057,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
        fdput(f);
        return error;
 }
-#endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
+#endif /* __ARCH_WANT_COMPAT_SYS_GETDENTS64 */
 
 /*
  * Exactly like fs/open.c:sys_open(), except that it doesn't set the
@@ -1287,9 +1278,9 @@ out_nofds:
        return ret;
 }
 
-asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
-       compat_ulong_t __user *outp, compat_ulong_t __user *exp,
-       struct compat_timeval __user *tvp)
+COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
+       compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
+       struct compat_timeval __user *tvp)
 {
        struct timespec end_time, *to = NULL;
        struct compat_timeval tv;
@@ -1320,7 +1311,7 @@ struct compat_sel_arg_struct {
        compat_uptr_t tvp;
 };
 
-asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg)
+COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
 {
        struct compat_sel_arg_struct a;
 
@@ -1381,9 +1372,9 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
        return ret;
 }
 
-asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
-       compat_ulong_t __user *outp, compat_ulong_t __user *exp,
-       struct compat_timespec __user *tsp, void __user *sig)
+COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
+       compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
+       struct compat_timespec __user *, tsp, void __user *, sig)
 {
        compat_size_t sigsetsize = 0;
        compat_uptr_t up = 0;
@@ -1400,9 +1391,9 @@ asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
                                 sigsetsize);
 }
 
-asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
-       unsigned int nfds, struct compat_timespec __user *tsp,
-       const compat_sigset_t __user *sigmask, compat_size_t sigsetsize)
+COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
+       unsigned int,  nfds, struct compat_timespec __user *, tsp,
+       const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
 {
        compat_sigset_t ss32;
        sigset_t ksigmask, sigsaved;
index a81147e2e4ef2eef2c2fb330a0f9ce464e2c67f6..4d24d17bcfc1dc3ecd917f12f2a889da1a506e24 100644 (file)
@@ -88,6 +88,11 @@ static void cputime_to_compat_timeval(const cputime_t cputime,
 #define        ELF_HWCAP               COMPAT_ELF_HWCAP
 #endif
 
+#ifdef COMPAT_ELF_HWCAP2
+#undef ELF_HWCAP2
+#define        ELF_HWCAP2              COMPAT_ELF_HWCAP2
+#endif
+
 #ifdef COMPAT_ARCH_DLINFO
 #undef ARCH_DLINFO
 #define        ARCH_DLINFO             COMPAT_ARCH_DLINFO
index 3881610b64384cee634367c3c35d237a4fa2f2aa..e82289047272d6e636583e20969f59450987da05 100644 (file)
@@ -1538,9 +1538,10 @@ static int compat_ioctl_check_table(unsigned int xcmd)
        return ioctl_pointer[i] == xcmd;
 }
 
-asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
-                               unsigned long arg)
+COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
+                      compat_ulong_t, arg32)
 {
+       unsigned long arg = arg32;
        struct fd f = fdget(fd);
        int error = -EBADF;
        if (!f.file)
index 265e0ce9769c70db65d5f9df11c4365f44c6dd29..ca02c13a84aa24d55b18bbced577ece923605665 100644 (file)
@@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
        u32 dlen = ACCESS_ONCE(name->len);
        char *p;
 
-       if (*buflen < dlen + 1)
-               return -ENAMETOOLONG;
        *buflen -= dlen + 1;
+       if (*buflen < 0)
+               return -ENAMETOOLONG;
        p = *buffer -= dlen + 1;
        *p++ = '/';
        while (dlen--) {
index 8dd524f322847b346e119c6d5c5b3693de6fb8bc..cdb2971192a53fbfe2e340feb9819b7454f8ffa2 100644 (file)
@@ -21,7 +21,7 @@ static ssize_t efivarfs_file_write(struct file *file,
        u32 attributes;
        struct inode *inode = file->f_mapping->host;
        unsigned long datasize = count - sizeof(attributes);
-       ssize_t bytes = 0;
+       ssize_t bytes;
        bool set = false;
 
        if (count < sizeof(attributes))
@@ -33,14 +33,9 @@ static ssize_t efivarfs_file_write(struct file *file,
        if (attributes & ~(EFI_VARIABLE_MASK))
                return -EINVAL;
 
-       data = kmalloc(datasize, GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-
-       if (copy_from_user(data, userbuf + sizeof(attributes), datasize)) {
-               bytes = -EFAULT;
-               goto out;
-       }
+       data = memdup_user(userbuf + sizeof(attributes), datasize);
+       if (IS_ERR(data))
+               return PTR_ERR(data);
 
        bytes = efivar_entry_set_get_size(var, attributes, &datasize,
                                          data, &set);
index 3d78fccdd723e21119b6c93c70e2564a11d0e6a3..4f59402fdda541bd22349aad786a95d12cc6d089 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1619,9 +1619,9 @@ SYSCALL_DEFINE3(execve,
        return do_execve(getname(filename), argv, envp);
 }
 #ifdef CONFIG_COMPAT
-asmlinkage long compat_sys_execve(const char __user * filename,
-       const compat_uptr_t __user * argv,
-       const compat_uptr_t __user * envp)
+COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
+       const compat_uptr_t __user *, argv,
+       const compat_uptr_t __user *, envp)
 {
        return compat_do_execve(getname(filename), argv, envp);
 }
index 6e39895a91b80aaae3914cc3a51756b3fa48a4c8..24bfd7ff30491457283b22821eb5a9b93b2adec4 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
 #include <linux/aio.h>
+#include <linux/bitops.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -3921,18 +3922,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
 void ext4_set_inode_flags(struct inode *inode)
 {
        unsigned int flags = EXT4_I(inode)->i_flags;
+       unsigned int new_fl = 0;
 
-       inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
        if (flags & EXT4_SYNC_FL)
-               inode->i_flags |= S_SYNC;
+               new_fl |= S_SYNC;
        if (flags & EXT4_APPEND_FL)
-               inode->i_flags |= S_APPEND;
+               new_fl |= S_APPEND;
        if (flags & EXT4_IMMUTABLE_FL)
-               inode->i_flags |= S_IMMUTABLE;
+               new_fl |= S_IMMUTABLE;
        if (flags & EXT4_NOATIME_FL)
-               inode->i_flags |= S_NOATIME;
+               new_fl |= S_NOATIME;
        if (flags & EXT4_DIRSYNC_FL)
-               inode->i_flags |= S_DIRSYNC;
+               new_fl |= S_DIRSYNC;
+       set_mask_bits(&inode->i_flags,
+                     S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
 }
 
 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
index 60a45e9f53231379c87b9ee75cb64eb8d6d71979..b61293badfb1a9c98742a5bcc790751979251741 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -497,7 +497,7 @@ repeat:
        error = fd;
 #if 1
        /* Sanity check */
-       if (rcu_dereference_raw(fdt->fd[fd]) != NULL) {
+       if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
                printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
                rcu_assign_pointer(fdt->fd[fd], NULL);
        }
@@ -713,27 +713,16 @@ unsigned long __fdget_raw(unsigned int fd)
 
 unsigned long __fdget_pos(unsigned int fd)
 {
-       struct files_struct *files = current->files;
-       struct file *file;
-       unsigned long v;
-
-       if (atomic_read(&files->count) == 1) {
-               file = __fcheck_files(files, fd);
-               v = 0;
-       } else {
-               file = __fget(fd, 0);
-               v = FDPUT_FPUT;
-       }
-       if (!file)
-               return 0;
+       unsigned long v = __fdget(fd);
+       struct file *file = (struct file *)(v & ~3);
 
-       if (file->f_mode & FMODE_ATOMIC_POS) {
+       if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
                if (file_count(file) > 1) {
                        v |= FDPUT_POS_UNLOCK;
                        mutex_lock(&file->f_pos_lock);
                }
        }
-       return v | (unsigned long)file;
+       return v;
 }
 
 /*
index a17458ca6f29e4dbf1f1a1b8bb9e97949b279d29..b29e42f05f3442b887e58ca1972a9333ec474bd7 100644 (file)
@@ -19,13 +19,13 @@ struct mnt_pcp {
 };
 
 struct mountpoint {
-       struct list_head m_hash;
+       struct hlist_node m_hash;
        struct dentry *m_dentry;
        int m_count;
 };
 
 struct mount {
-       struct list_head mnt_hash;
+       struct hlist_node mnt_hash;
        struct mount *mnt_parent;
        struct dentry *mnt_mountpoint;
        struct vfsmount mnt;
index 2f730ef9b4b3da78afd6a830c13c7ba13c706ffe..4b491b4319905b2dd6698d3f58aae1d0d0141499 100644 (file)
@@ -1109,7 +1109,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                        return false;
 
                if (!d_mountpoint(path->dentry))
-                       break;
+                       return true;
 
                mounted = __lookup_mnt(path->mnt, path->dentry);
                if (!mounted)
@@ -1125,20 +1125,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                 */
                *inode = path->dentry->d_inode;
        }
-       return true;
-}
-
-static void follow_mount_rcu(struct nameidata *nd)
-{
-       while (d_mountpoint(nd->path.dentry)) {
-               struct mount *mounted;
-               mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
-               if (!mounted)
-                       break;
-               nd->path.mnt = &mounted->mnt;
-               nd->path.dentry = mounted->mnt.mnt_root;
-               nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
-       }
+       return read_seqretry(&mount_lock, nd->m_seq);
 }
 
 static int follow_dotdot_rcu(struct nameidata *nd)
@@ -1166,7 +1153,17 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                        break;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
        }
-       follow_mount_rcu(nd);
+       while (d_mountpoint(nd->path.dentry)) {
+               struct mount *mounted;
+               mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
+               if (!mounted)
+                       break;
+               nd->path.mnt = &mounted->mnt;
+               nd->path.dentry = mounted->mnt.mnt_root;
+               nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
+               if (!read_seqretry(&mount_lock, nd->m_seq))
+                       goto failed;
+       }
        nd->inode = nd->path.dentry->d_inode;
        return 0;
 
index 22e536705c45d128ab8651759cea74d83f481ab1..2ffc5a2905d463e828c387fbfb92a31f219f98bc 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
+#include <linux/bootmem.h>
 #include "pnode.h"
 #include "internal.h"
 
-#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
-#define HASH_SIZE (1UL << HASH_SHIFT)
+static unsigned int m_hash_mask __read_mostly;
+static unsigned int m_hash_shift __read_mostly;
+static unsigned int mp_hash_mask __read_mostly;
+static unsigned int mp_hash_shift __read_mostly;
+
+static __initdata unsigned long mhash_entries;
+static int __init set_mhash_entries(char *str)
+{
+       if (!str)
+               return 0;
+       mhash_entries = simple_strtoul(str, &str, 0);
+       return 1;
+}
+__setup("mhash_entries=", set_mhash_entries);
+
+static __initdata unsigned long mphash_entries;
+static int __init set_mphash_entries(char *str)
+{
+       if (!str)
+               return 0;
+       mphash_entries = simple_strtoul(str, &str, 0);
+       return 1;
+}
+__setup("mphash_entries=", set_mphash_entries);
 
 static int event;
 static DEFINE_IDA(mnt_id_ida);
@@ -36,8 +59,8 @@ static DEFINE_SPINLOCK(mnt_id_lock);
 static int mnt_id_start = 0;
 static int mnt_group_start = 1;
 
-static struct list_head *mount_hashtable __read_mostly;
-static struct list_head *mountpoint_hashtable __read_mostly;
+static struct hlist_head *mount_hashtable __read_mostly;
+static struct hlist_head *mountpoint_hashtable __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
 static DECLARE_RWSEM(namespace_sem);
 
@@ -55,12 +78,19 @@ EXPORT_SYMBOL_GPL(fs_kobj);
  */
 __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
 
-static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
+static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
 {
        unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
        tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
-       tmp = tmp + (tmp >> HASH_SHIFT);
-       return tmp & (HASH_SIZE - 1);
+       tmp = tmp + (tmp >> m_hash_shift);
+       return &mount_hashtable[tmp & m_hash_mask];
+}
+
+static inline struct hlist_head *mp_hash(struct dentry *dentry)
+{
+       unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
+       tmp = tmp + (tmp >> mp_hash_shift);
+       return &mountpoint_hashtable[tmp & mp_hash_mask];
 }
 
 /*
@@ -187,7 +217,7 @@ static struct mount *alloc_vfsmnt(const char *name)
                mnt->mnt_writers = 0;
 #endif
 
-               INIT_LIST_HEAD(&mnt->mnt_hash);
+               INIT_HLIST_NODE(&mnt->mnt_hash);
                INIT_LIST_HEAD(&mnt->mnt_child);
                INIT_LIST_HEAD(&mnt->mnt_mounts);
                INIT_LIST_HEAD(&mnt->mnt_list);
@@ -575,10 +605,10 @@ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
  */
 struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
 {
-       struct list_head *head = mount_hashtable + hash(mnt, dentry);
+       struct hlist_head *head = m_hash(mnt, dentry);
        struct mount *p;
 
-       list_for_each_entry_rcu(p, head, mnt_hash)
+       hlist_for_each_entry_rcu(p, head, mnt_hash)
                if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
                        return p;
        return NULL;
@@ -590,13 +620,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
  */
 struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
 {
-       struct list_head *head = mount_hashtable + hash(mnt, dentry);
-       struct mount *p;
-
-       list_for_each_entry_reverse(p, head, mnt_hash)
-               if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
-                       return p;
-       return NULL;
+       struct mount *p, *res;
+       res = p = __lookup_mnt(mnt, dentry);
+       if (!p)
+               goto out;
+       hlist_for_each_entry_continue(p, mnt_hash) {
+               if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
+                       break;
+               res = p;
+       }
+out:
+       return res;
 }
 
 /*
@@ -633,11 +667,11 @@ struct vfsmount *lookup_mnt(struct path *path)
 
 static struct mountpoint *new_mountpoint(struct dentry *dentry)
 {
-       struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry);
+       struct hlist_head *chain = mp_hash(dentry);
        struct mountpoint *mp;
        int ret;
 
-       list_for_each_entry(mp, chain, m_hash) {
+       hlist_for_each_entry(mp, chain, m_hash) {
                if (mp->m_dentry == dentry) {
                        /* might be worth a WARN_ON() */
                        if (d_unlinked(dentry))
@@ -659,7 +693,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry)
 
        mp->m_dentry = dentry;
        mp->m_count = 1;
-       list_add(&mp->m_hash, chain);
+       hlist_add_head(&mp->m_hash, chain);
        return mp;
 }
 
@@ -670,7 +704,7 @@ static void put_mountpoint(struct mountpoint *mp)
                spin_lock(&dentry->d_lock);
                dentry->d_flags &= ~DCACHE_MOUNTED;
                spin_unlock(&dentry->d_lock);
-               list_del(&mp->m_hash);
+               hlist_del(&mp->m_hash);
                kfree(mp);
        }
 }
@@ -712,7 +746,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
        mnt->mnt_parent = mnt;
        mnt->mnt_mountpoint = mnt->mnt.mnt_root;
        list_del_init(&mnt->mnt_child);
-       list_del_init(&mnt->mnt_hash);
+       hlist_del_init_rcu(&mnt->mnt_hash);
        put_mountpoint(mnt->mnt_mp);
        mnt->mnt_mp = NULL;
 }
@@ -739,15 +773,14 @@ static void attach_mnt(struct mount *mnt,
                        struct mountpoint *mp)
 {
        mnt_set_mountpoint(parent, mp, mnt);
-       list_add_tail(&mnt->mnt_hash, mount_hashtable +
-                       hash(&parent->mnt, mp->m_dentry));
+       hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
        list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
 }
 
 /*
  * vfsmount lock must be held for write
  */
-static void commit_tree(struct mount *mnt)
+static void commit_tree(struct mount *mnt, struct mount *shadows)
 {
        struct mount *parent = mnt->mnt_parent;
        struct mount *m;
@@ -762,8 +795,11 @@ static void commit_tree(struct mount *mnt)
 
        list_splice(&head, n->list.prev);
 
-       list_add_tail(&mnt->mnt_hash, mount_hashtable +
-                               hash(&parent->mnt, mnt->mnt_mountpoint));
+       if (shadows)
+               hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
+       else
+               hlist_add_head_rcu(&mnt->mnt_hash,
+                               m_hash(&parent->mnt, mnt->mnt_mountpoint));
        list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
        touch_mnt_namespace(n);
 }
@@ -1153,26 +1189,28 @@ int may_umount(struct vfsmount *mnt)
 
 EXPORT_SYMBOL(may_umount);
 
-static LIST_HEAD(unmounted);   /* protected by namespace_sem */
+static HLIST_HEAD(unmounted);  /* protected by namespace_sem */
 
 static void namespace_unlock(void)
 {
        struct mount *mnt;
-       LIST_HEAD(head);
+       struct hlist_head head = unmounted;
 
-       if (likely(list_empty(&unmounted))) {
+       if (likely(hlist_empty(&head))) {
                up_write(&namespace_sem);
                return;
        }
 
-       list_splice_init(&unmounted, &head);
+       head.first->pprev = &head.first;
+       INIT_HLIST_HEAD(&unmounted);
+
        up_write(&namespace_sem);
 
        synchronize_rcu();
 
-       while (!list_empty(&head)) {
-               mnt = list_first_entry(&head, struct mount, mnt_hash);
-               list_del_init(&mnt->mnt_hash);
+       while (!hlist_empty(&head)) {
+               mnt = hlist_entry(head.first, struct mount, mnt_hash);
+               hlist_del_init(&mnt->mnt_hash);
                if (mnt->mnt_ex_mountpoint.mnt)
                        path_put(&mnt->mnt_ex_mountpoint);
                mntput(&mnt->mnt);
@@ -1193,16 +1231,19 @@ static inline void namespace_lock(void)
  */
 void umount_tree(struct mount *mnt, int how)
 {
-       LIST_HEAD(tmp_list);
+       HLIST_HEAD(tmp_list);
        struct mount *p;
+       struct mount *last = NULL;
 
-       for (p = mnt; p; p = next_mnt(p, mnt))
-               list_move(&p->mnt_hash, &tmp_list);
+       for (p = mnt; p; p = next_mnt(p, mnt)) {
+               hlist_del_init_rcu(&p->mnt_hash);
+               hlist_add_head(&p->mnt_hash, &tmp_list);
+       }
 
        if (how)
                propagate_umount(&tmp_list);
 
-       list_for_each_entry(p, &tmp_list, mnt_hash) {
+       hlist_for_each_entry(p, &tmp_list, mnt_hash) {
                list_del_init(&p->mnt_expire);
                list_del_init(&p->mnt_list);
                __touch_mnt_namespace(p->mnt_ns);
@@ -1220,8 +1261,13 @@ void umount_tree(struct mount *mnt, int how)
                        p->mnt_mp = NULL;
                }
                change_mnt_propagation(p, MS_PRIVATE);
+               last = p;
+       }
+       if (last) {
+               last->mnt_hash.next = unmounted.first;
+               unmounted.first = tmp_list.first;
+               unmounted.first->pprev = &unmounted.first;
        }
-       list_splice(&tmp_list, &unmounted);
 }
 
 static void shrink_submounts(struct mount *mnt);
@@ -1605,24 +1651,23 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                        struct mountpoint *dest_mp,
                        struct path *parent_path)
 {
-       LIST_HEAD(tree_list);
+       HLIST_HEAD(tree_list);
        struct mount *child, *p;
+       struct hlist_node *n;
        int err;
 
        if (IS_MNT_SHARED(dest_mnt)) {
                err = invent_group_ids(source_mnt, true);
                if (err)
                        goto out;
-       }
-       err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
-       if (err)
-               goto out_cleanup_ids;
-
-       lock_mount_hash();
-
-       if (IS_MNT_SHARED(dest_mnt)) {
+               err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
+               if (err)
+                       goto out_cleanup_ids;
+               lock_mount_hash();
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
                        set_mnt_shared(p);
+       } else {
+               lock_mount_hash();
        }
        if (parent_path) {
                detach_mnt(source_mnt, parent_path);
@@ -1630,20 +1675,22 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                touch_mnt_namespace(source_mnt->mnt_ns);
        } else {
                mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
-               commit_tree(source_mnt);
+               commit_tree(source_mnt, NULL);
        }
 
-       list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
-               list_del_init(&child->mnt_hash);
-               commit_tree(child);
+       hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
+               struct mount *q;
+               hlist_del_init(&child->mnt_hash);
+               q = __lookup_mnt_last(&child->mnt_parent->mnt,
+                                     child->mnt_mountpoint);
+               commit_tree(child, q);
        }
        unlock_mount_hash();
 
        return 0;
 
  out_cleanup_ids:
-       if (IS_MNT_SHARED(dest_mnt))
-               cleanup_group_ids(source_mnt, NULL);
+       cleanup_group_ids(source_mnt, NULL);
  out:
        return err;
 }
@@ -2777,18 +2824,24 @@ void __init mnt_init(void)
        mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
-       mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
-       mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
+       mount_hashtable = alloc_large_system_hash("Mount-cache",
+                               sizeof(struct hlist_head),
+                               mhash_entries, 19,
+                               0,
+                               &m_hash_shift, &m_hash_mask, 0, 0);
+       mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
+                               sizeof(struct hlist_head),
+                               mphash_entries, 19,
+                               0,
+                               &mp_hash_shift, &mp_hash_mask, 0, 0);
 
        if (!mount_hashtable || !mountpoint_hashtable)
                panic("Failed to allocate mount hash table\n");
 
-       printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
-
-       for (u = 0; u < HASH_SIZE; u++)
-               INIT_LIST_HEAD(&mount_hashtable[u]);
-       for (u = 0; u < HASH_SIZE; u++)
-               INIT_LIST_HEAD(&mountpoint_hashtable[u]);
+       for (u = 0; u <= m_hash_mask; u++)
+               INIT_HLIST_HEAD(&mount_hashtable[u]);
+       for (u = 0; u <= mp_hash_mask; u++)
+               INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
 
        kernfs_init();
 
index 017d3cb5e99b4391027fe37b7c56c23966754e2d..6d7be3f8035631cb207a5733c71dbfc2f800fa4c 100644 (file)
@@ -449,6 +449,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
        fh_lock(fhp);
        host_err = notify_change(dentry, iap, NULL);
        fh_unlock(fhp);
+       err = nfserrno(host_err);
 
 out_put_write_access:
        if (size_change)
index 1324e6600e57378b8e4d1624dc1366af57fde7b2..ca5ce14cbddcef81498305f8ccbccc978750faed 100644 (file)
@@ -346,7 +346,9 @@ int ocfs2_cluster_connect(const char *stack_name,
 
        strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
        new_conn->cc_namelen = grouplen;
-       strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1);
+       if (cluster_name_len)
+               strlcpy(new_conn->cc_cluster_name, cluster_name,
+                       CLUSTER_NAME_MAX + 1);
        new_conn->cc_cluster_name_len = cluster_name_len;
        new_conn->cc_recovery_handler = recovery_handler;
        new_conn->cc_recovery_data = recovery_data;
index c7221bb19801e6b6251ad1ed3cea6f4f79dcf876..88396df725b4bbe84dc7d57eaf7a259877e5d87c 100644 (file)
@@ -220,14 +220,14 @@ static struct mount *get_source(struct mount *dest,
  * @tree_list : list of heads of trees to be attached.
  */
 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
-                   struct mount *source_mnt, struct list_head *tree_list)
+                   struct mount *source_mnt, struct hlist_head *tree_list)
 {
        struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
        struct mount *m, *child;
        int ret = 0;
        struct mount *prev_dest_mnt = dest_mnt;
        struct mount *prev_src_mnt  = source_mnt;
-       LIST_HEAD(tmp_list);
+       HLIST_HEAD(tmp_list);
 
        for (m = propagation_next(dest_mnt, dest_mnt); m;
                        m = propagation_next(m, dest_mnt)) {
@@ -246,27 +246,29 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
                child = copy_tree(source, source->mnt.mnt_root, type);
                if (IS_ERR(child)) {
                        ret = PTR_ERR(child);
-                       list_splice(tree_list, tmp_list.prev);
+                       tmp_list = *tree_list;
+                       tmp_list.first->pprev = &tmp_list.first;
+                       INIT_HLIST_HEAD(tree_list);
                        goto out;
                }
 
                if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
                        mnt_set_mountpoint(m, dest_mp, child);
-                       list_add_tail(&child->mnt_hash, tree_list);
+                       hlist_add_head(&child->mnt_hash, tree_list);
                } else {
                        /*
                         * This can happen if the parent mount was bind mounted
                         * on some subdirectory of a shared/slave mount.
                         */
-                       list_add_tail(&child->mnt_hash, &tmp_list);
+                       hlist_add_head(&child->mnt_hash, &tmp_list);
                }
                prev_dest_mnt = m;
                prev_src_mnt  = child;
        }
 out:
        lock_mount_hash();
-       while (!list_empty(&tmp_list)) {
-               child = list_first_entry(&tmp_list, struct mount, mnt_hash);
+       while (!hlist_empty(&tmp_list)) {
+               child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
                umount_tree(child, 0);
        }
        unlock_mount_hash();
@@ -338,8 +340,10 @@ static void __propagate_umount(struct mount *mnt)
                 * umount the child only if the child has no
                 * other children
                 */
-               if (child && list_empty(&child->mnt_mounts))
-                       list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
+               if (child && list_empty(&child->mnt_mounts)) {
+                       hlist_del_init_rcu(&child->mnt_hash);
+                       hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
+               }
        }
 }
 
@@ -350,11 +354,11 @@ static void __propagate_umount(struct mount *mnt)
  *
  * vfsmount lock must be held for write
  */
-int propagate_umount(struct list_head *list)
+int propagate_umount(struct hlist_head *list)
 {
        struct mount *mnt;
 
-       list_for_each_entry(mnt, list, mnt_hash)
+       hlist_for_each_entry(mnt, list, mnt_hash)
                __propagate_umount(mnt);
        return 0;
 }
index 59e7eda1851ec447d705a02866c19a07d91d2c99..fc28a27fa89233d24b90bdd748477994b8713ecd 100644 (file)
@@ -36,8 +36,8 @@ static inline void set_mnt_shared(struct mount *mnt)
 
 void change_mnt_propagation(struct mount *, int);
 int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
-               struct list_head *);
-int propagate_umount(struct list_head *);
+               struct hlist_head *);
+int propagate_umount(struct hlist_head *);
 int propagate_mount_busy(struct mount *, int);
 void mnt_release_group_id(struct mount *);
 int get_dominating_id(struct mount *mnt, const struct path *root);
index 6f599c62f0cc939ce2b68a75dbcd38d366ee7fa9..9d231e9e5f0ef48cd7ef372437d85fbf9b8cbc2e 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/slab.h>
 #include <linux/time.h>
 #include <linux/irqnr.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
 #include <linux/tick.h>
 
 #ifndef arch_irq_stat_cpu
index 7141b8d0ca9ed9800afb1ae87def5f8aa06b8f46..33de567c25af4b04a6ba283caaf5e4f6d9b4ecdb 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/seq_file.h>
 #include <linux/time.h>
 #include <linux/kernel_stat.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
 
 static int uptime_proc_show(struct seq_file *m, void *v)
 {
index 54e19b9392dc74381fec154fe3f0ceab32ce9bd8..31c6efa431839e41f4b39b314cb640dd89b68ecc 100644 (file)
@@ -307,7 +307,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
                unsigned int, whence)
 {
        int retval;
-       struct fd f = fdget(fd);
+       struct fd f = fdget_pos(fd);
        loff_t offset;
 
        if (!f.file)
@@ -327,7 +327,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
                        retval = 0;
        }
 out_putf:
-       fdput(f);
+       fdput_pos(f);
        return retval;
 }
 #endif
@@ -994,9 +994,9 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
        return ret;
 }
 
-COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
-               const struct compat_iovec __user *,vec,
-               unsigned long, vlen, loff_t, pos)
+static long __compat_sys_preadv64(unsigned long fd,
+                                 const struct compat_iovec __user *vec,
+                                 unsigned long vlen, loff_t pos)
 {
        struct fd f;
        ssize_t ret;
@@ -1013,12 +1013,22 @@ COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
        return ret;
 }
 
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+COMPAT_SYSCALL_DEFINE4(preadv64, unsigned long, fd,
+               const struct compat_iovec __user *,vec,
+               unsigned long, vlen, loff_t, pos)
+{
+       return __compat_sys_preadv64(fd, vec, vlen, pos);
+}
+#endif
+
 COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
                const struct compat_iovec __user *,vec,
                compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
 {
        loff_t pos = ((loff_t)pos_high << 32) | pos_low;
-       return compat_sys_preadv64(fd, vec, vlen, pos);
+
+       return __compat_sys_preadv64(fd, vec, vlen, pos);
 }
 
 static size_t compat_writev(struct file *file,
@@ -1061,9 +1071,9 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
        return ret;
 }
 
-COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
-               const struct compat_iovec __user *,vec,
-               unsigned long, vlen, loff_t, pos)
+static long __compat_sys_pwritev64(unsigned long fd,
+                                  const struct compat_iovec __user *vec,
+                                  unsigned long vlen, loff_t pos)
 {
        struct fd f;
        ssize_t ret;
@@ -1080,12 +1090,22 @@ COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
        return ret;
 }
 
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+COMPAT_SYSCALL_DEFINE4(pwritev64, unsigned long, fd,
+               const struct compat_iovec __user *,vec,
+               unsigned long, vlen, loff_t, pos)
+{
+       return __compat_sys_pwritev64(fd, vec, vlen, pos);
+}
+#endif
+
 COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
                const struct compat_iovec __user *,vec,
                compat_ulong_t, vlen, u32, pos_low, u32, pos_high)
 {
        loff_t pos = ((loff_t)pos_high << 32) | pos_low;
-       return compat_sys_pwritev64(fd, vec, vlen, pos);
+
+       return __compat_sys_pwritev64(fd, vec, vlen, pos);
 }
 #endif
 
index 929312180dd00c39e48de0a4df37a796b326be35..0013142c04759b527485f2abde83c084e17a7f8f 100644 (file)
@@ -317,6 +317,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
            (clockid != CLOCK_MONOTONIC &&
             clockid != CLOCK_REALTIME &&
             clockid != CLOCK_REALTIME_ALARM &&
+            clockid != CLOCK_BOOTTIME &&
             clockid != CLOCK_BOOTTIME_ALARM))
                return -EINVAL;
 
index c927a0b1de78569aa0e49ab8ad13b7b284f2b997..88cb477524a60913201cae59c42935e789906065 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3ea214cff349c87482d4d3a29b0370bb6c90cd42..932a60d6ed8283de2e5c893d9aad53626dabbbbe 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4e280bd226ddaa107a3d19f08b07389279b53922..8b06e4c1dd5d1873b08c3490b8d80a40283b4e87 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1f36777e26febf8fcbbd38650950d6a91ba9b1bd..3dd6e838dc300b22f111449d7314441a4adbf38f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4607b027a6577111d3179d07cc292e25ba8de493..1baae6edda8925d5205f3151c72c19cb31391e43 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 618787715d560c4162ec15f4351c80391436f203..ca0cb603b171cb63bfd957903bb01fa8545e4674 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -62,5 +62,8 @@
 #include <acpi/acrestyp.h>             /* Resource Descriptor structs */
 #include <acpi/acpiosxf.h>             /* OSL interfaces (ACPICA-to-OS) */
 #include <acpi/acpixf.h>               /* ACPI core subsystem external interfaces */
+#ifdef ACPI_NATIVE_INTERFACE_HEADER
+#include ACPI_NATIVE_INTERFACE_HEADER
+#endif
 
 #endif                         /* __ACPI_H__ */
index 8256eb4ad0579be822efd37e22193fa4fdffefdb..84a2e29a231409701de1a9d579a1a139cccbd40c 100644 (file)
@@ -49,8 +49,8 @@ acpi_evaluate_reference(acpi_handle handle,
                        struct acpi_object_list *arguments,
                        struct acpi_handle_list *list);
 acpi_status
-acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
-                       u32 status_code, struct acpi_buffer *status_buf);
+acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
+                 struct acpi_buffer *status_buf);
 
 acpi_status
 acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
@@ -133,9 +133,23 @@ struct acpi_scan_handler {
        struct list_head list_node;
        int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
        void (*detach)(struct acpi_device *dev);
+       void (*bind)(struct device *phys_dev);
+       void (*unbind)(struct device *phys_dev);
        struct acpi_hotplug_profile hotplug;
 };
 
+/*
+ * ACPI Hotplug Context
+ * --------------------
+ */
+
+struct acpi_hotplug_context {
+       struct acpi_device *self;
+       int (*notify)(struct acpi_device *, u32);
+       void (*uevent)(struct acpi_device *, u32);
+       void (*fixup)(struct acpi_device *);
+};
+
 /*
  * ACPI Driver
  * -----------
@@ -190,7 +204,9 @@ struct acpi_device_flags {
        u32 initialized:1;
        u32 visited:1;
        u32 no_hotplug:1;
-       u32 reserved:24;
+       u32 hotplug_notify:1;
+       u32 is_dock_station:1;
+       u32 reserved:22;
 };
 
 /* File System */
@@ -329,6 +345,7 @@ struct acpi_device {
        struct acpi_device_perf performance;
        struct acpi_device_dir dir;
        struct acpi_scan_handler *handler;
+       struct acpi_hotplug_context *hp;
        struct acpi_driver *driver;
        void *driver_data;
        struct device dev;
@@ -351,6 +368,24 @@ static inline void acpi_set_device_status(struct acpi_device *adev, u32 sta)
        *((u32 *)&adev->status) = sta;
 }
 
+static inline void acpi_set_hp_context(struct acpi_device *adev,
+                                      struct acpi_hotplug_context *hp,
+                                      int (*notify)(struct acpi_device *, u32),
+                                      void (*uevent)(struct acpi_device *, u32),
+                                      void (*fixup)(struct acpi_device *))
+{
+       hp->self = adev;
+       hp->notify = notify;
+       hp->uevent = uevent;
+       hp->fixup = fixup;
+       adev->hp = hp;
+}
+
+void acpi_initialize_hp_context(struct acpi_device *adev,
+                               struct acpi_hotplug_context *hp,
+                               int (*notify)(struct acpi_device *, u32),
+                               void (*uevent)(struct acpi_device *, u32));
+
 /* acpi_device.dev.bus == &acpi_bus_type */
 extern struct bus_type acpi_bus_type;
 
@@ -381,6 +416,8 @@ extern int unregister_acpi_notifier(struct notifier_block *);
  */
 
 int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
+struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle);
+void acpi_bus_put_acpi_device(struct acpi_device *adev);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
 int acpi_bus_get_status(struct acpi_device *device);
@@ -402,6 +439,8 @@ static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
 
 void acpi_scan_lock_acquire(void);
 void acpi_scan_lock_release(void);
+void acpi_lock_hp_context(void);
+void acpi_unlock_hp_context(void);
 int acpi_scan_add_handler(struct acpi_scan_handler *handler);
 int acpi_bus_register_driver(struct acpi_driver *driver);
 void acpi_bus_unregister_driver(struct acpi_driver *driver);
@@ -418,10 +457,6 @@ static inline bool acpi_device_enumerated(struct acpi_device *adev)
        return adev && adev->flags.initialized && adev->flags.visited;
 }
 
-typedef void (*acpi_hp_callback)(void *data, u32 src);
-
-acpi_status acpi_hotplug_execute(acpi_hp_callback func, void *data, u32 src);
-
 /**
  * module_acpi_driver(acpi_driver) - Helper macro for registering an ACPI driver
  * @__acpi_driver: acpi_driver struct
index b124fdb260469bfa07dbc14fa511cb7dbd0240e2..d504613bbf8081c68c8faa603e9ac9b952d0e23b 100644 (file)
@@ -109,36 +109,14 @@ void pci_acpi_crs_quirks(void);
 /*--------------------------------------------------------------------------
                                   Dock Station
   -------------------------------------------------------------------------- */
-struct acpi_dock_ops {
-       acpi_notify_handler fixup;
-       acpi_notify_handler handler;
-       acpi_notify_handler uevent;
-};
 
 #ifdef CONFIG_ACPI_DOCK
-extern int is_dock_device(acpi_handle handle);
-extern int register_hotplug_dock_device(acpi_handle handle,
-                                       const struct acpi_dock_ops *ops,
-                                       void *context,
-                                       void (*init)(void *),
-                                       void (*release)(void *));
-extern void unregister_hotplug_dock_device(acpi_handle handle);
+extern int is_dock_device(struct acpi_device *adev);
 #else
-static inline int is_dock_device(acpi_handle handle)
+static inline int is_dock_device(struct acpi_device *adev)
 {
        return 0;
 }
-static inline int register_hotplug_dock_device(acpi_handle handle,
-                                              const struct acpi_dock_ops *ops,
-                                              void *context,
-                                              void (*init)(void *),
-                                              void (*release)(void *))
-{
-       return -ENODEV;
-}
-static inline void unregister_hotplug_dock_device(acpi_handle handle)
-{
-}
 #endif /* CONFIG_ACPI_DOCK */
 
 #endif /*__ACPI_DRIVERS_H__*/
index 01e6c6d8b7e142d58b6e45bc921bace23e2b00f1..f6f5f8af211245571f2c2bd38415afe7e988eb94 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fea6773f87fc7f5fe8e3141579ebd63b4b8efcc8..b0b01b13ea990655c490d7e2c81db4336e486bce 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20131218
+#define ACPI_CA_VERSION                 0x20140214
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -229,6 +229,10 @@ acpi_attach_data(acpi_handle object, acpi_object_handler handler, void *data);
 
 acpi_status acpi_detach_data(acpi_handle object, acpi_object_handler handler);
 
+acpi_status
+acpi_get_data_full(acpi_handle object, acpi_object_handler handler, void **data,
+                  void (*callback)(void *));
+
 acpi_status
 acpi_get_data(acpi_handle object, acpi_object_handler handler, void **data);
 
index cbf4bf977f75c0483ac9a8fb9211e7d59ff88a6b..eb760ca0b2e05884745e02e09aec222744829c25 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 325aeae1fa9964a8a6512e72082637eca2f75898..3b30e36b53b509d92fc5c00f88bb7c07c5a4f3cc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4ec8c194bfe55334fa5076401da04db5a0fa259b..212c65de75df9899a19e05c6c609be3d7d10a309 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 094a906a0e98320747042890dcc98eb4dbf470d0..f3372441e3a5269d8f9d593a5917df9e5ce04368 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 01c2a9013e402bbc8af785372b90165284d7f065..c2295cc4a5c07596fe14558647b3398e2346aca5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 68a3ada689c900b90b26af4ec61377cd8405472f..e76356574374d449fb4ea86305d3d030b2eb2484 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #error ACPI_MACHINE_WIDTH not defined
 #endif
 
-/*! [Begin] no source code translation */
-
 /*
  * Data type ranges
  * Note: These macros are designed to be compiler independent as well as
  * working around problems that some 32-bit compilers have with 64-bit
  * constants.
  */
-#define ACPI_UINT8_MAX                  (UINT8) (~((UINT8)  0))        /* 0xFF               */
-#define ACPI_UINT16_MAX                 (UINT16)(~((UINT16) 0))        /* 0xFFFF             */
-#define ACPI_UINT32_MAX                 (UINT32)(~((UINT32) 0))        /* 0xFFFFFFFF         */
-#define ACPI_UINT64_MAX                 (UINT64)(~((UINT64) 0))        /* 0xFFFFFFFFFFFFFFFF */
+#define ACPI_UINT8_MAX                  (u8) (~((u8)  0))      /* 0xFF               */
+#define ACPI_UINT16_MAX                 (u16)(~((u16) 0))      /* 0xFFFF             */
+#define ACPI_UINT32_MAX                 (u32)(~((u32) 0))      /* 0xFFFFFFFF         */
+#define ACPI_UINT64_MAX                 (u64)(~((u64) 0))      /* 0xFFFFFFFFFFFFFFFF */
 #define ACPI_ASCII_MAX                  0x7F
 
 /*
  *
  * 1) The following types are of fixed size for all targets (16/32/64):
  *
- * BOOLEAN      Logical boolean
+ * u8           Logical boolean
  *
- * UINT8        8-bit  (1 byte) unsigned value
- * UINT16       16-bit (2 byte) unsigned value
- * UINT32       32-bit (4 byte) unsigned value
- * UINT64       64-bit (8 byte) unsigned value
+ * u8           8-bit  (1 byte) unsigned value
+ * u16          16-bit (2 byte) unsigned value
+ * u32          32-bit (4 byte) unsigned value
+ * u64          64-bit (8 byte) unsigned value
  *
- * INT16        16-bit (2 byte) signed value
- * INT32        32-bit (4 byte) signed value
- * INT64        64-bit (8 byte) signed value
+ * s16          16-bit (2 byte) signed value
+ * s32          32-bit (4 byte) signed value
+ * s64          64-bit (8 byte) signed value
  *
- * COMPILER_DEPENDENT_UINT64/INT64 - These types are defined in the
+ * COMPILER_DEPENDENT_UINT64/s64 - These types are defined in the
  * compiler-dependent header(s) and were introduced because there is no common
  * 64-bit integer type across the various compilation models, as shown in
  * the table below.
  * usually used for memory allocation, efficient loop counters, and array
  * indexes. The types are similar to the size_t type in the C library and are
  * required because there is no C type that consistently represents the native
- * data width. ACPI_SIZE is needed because there is no guarantee that a
+ * data width. acpi_size is needed because there is no guarantee that a
  * kernel-level C library is present.
  *
- * ACPI_SIZE        16/32/64-bit unsigned value
- * ACPI_NATIVE_INT  16/32/64-bit signed value
+ * acpi_size        16/32/64-bit unsigned value
+ * acpi_native_int  16/32/64-bit signed value
  */
 
 /*******************************************************************************
  *
  ******************************************************************************/
 
-typedef unsigned char BOOLEAN;
-typedef unsigned char UINT8;
-typedef unsigned short UINT16;
-typedef COMPILER_DEPENDENT_UINT64 UINT64;
-typedef COMPILER_DEPENDENT_INT64 INT64;
+#ifndef ACPI_USE_SYSTEM_INTTYPES
+
+typedef unsigned char u8;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef COMPILER_DEPENDENT_UINT64 u64;
+typedef COMPILER_DEPENDENT_INT64 s64;
 
-/*! [End] no source code translation !*/
+#endif                         /* ACPI_USE_SYSTEM_INTTYPES */
 
 /*
  * Value returned by acpi_os_get_thread_id. There is no standard "thread_id"
@@ -149,12 +149,12 @@ typedef COMPILER_DEPENDENT_INT64 INT64;
 
 #if ACPI_MACHINE_WIDTH == 64
 
-/*! [Begin] no source code translation (keep the typedefs as-is) */
+#ifndef ACPI_USE_SYSTEM_INTTYPES
 
-typedef unsigned int UINT32;
-typedef int INT32;
+typedef unsigned int u32;
+typedef int s32;
 
-/*! [End] no source code translation !*/
+#endif                         /* ACPI_USE_SYSTEM_INTTYPES */
 
 typedef s64 acpi_native_int;
 
@@ -188,12 +188,12 @@ typedef u64 acpi_physical_address;
 
 #elif ACPI_MACHINE_WIDTH == 32
 
-/*! [Begin] no source code translation (keep the typedefs as-is) */
+#ifndef ACPI_USE_SYSTEM_INTTYPES
 
-typedef unsigned int UINT32;
-typedef int INT32;
+typedef unsigned int u32;
+typedef int s32;
 
-/*! [End] no source code translation !*/
+#endif                         /* ACPI_USE_SYSTEM_INTTYPES */
 
 typedef s32 acpi_native_int;
 
index b402eb67af83fd5496669eec6e8ed6fe2d168f58..e863dd5c4e0417411754910c334ee870d8bd8ad7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e077ce6c38ca658756be11c13589d6a2c76f240c..a476b9118b49bbe064def5eb5897ffb1e1b5c3d1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 008aa287c7a9f3184bcd554058471aaf07e882a2..93c55ed7c53d88ec500123d361987478d975d11e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2013, Intel Corp.
+ * Copyright (C) 2000 - 2014, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 #ifdef __KERNEL__
 
+#define ACPI_USE_SYSTEM_INTTYPES
+
+/* Compile for reduced hardware mode only with this kernel config */
+
+#ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY
+#define ACPI_REDUCED_HARDWARE 1
+#endif
+
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/ctype.h>
 #include <ctype.h>
 #include <unistd.h>
 
+/* Disable kernel specific declarators */
+
+#ifndef __init
+#define __init
+#endif
+
+#ifndef __iomem
+#define __iomem
+#endif
+
 /* Host-dependent types and defines for user-space ACPICA */
 
 #define ACPI_FLUSH_CPU_CACHE()
index fa2a50b7ee660c774313e20f0027302bfb0b495c..0a7e06623470586b0509ef99410dbc54aa572f9e 100644 (file)
@@ -5,14 +5,15 @@
  * Compile time versions of __arch_hweightN()
  */
 #define __const_hweight8(w)            \
-      (        (!!((w) & (1ULL << 0))) +       \
-       (!!((w) & (1ULL << 1))) +       \
-       (!!((w) & (1ULL << 2))) +       \
-       (!!((w) & (1ULL << 3))) +       \
-       (!!((w) & (1ULL << 4))) +       \
-       (!!((w) & (1ULL << 5))) +       \
-       (!!((w) & (1ULL << 6))) +       \
-       (!!((w) & (1ULL << 7))) )
+       ((unsigned int)                 \
+        ((!!((w) & (1ULL << 0))) +     \
+         (!!((w) & (1ULL << 1))) +     \
+         (!!((w) & (1ULL << 2))) +     \
+         (!!((w) & (1ULL << 3))) +     \
+         (!!((w) & (1ULL << 4))) +     \
+         (!!((w) & (1ULL << 5))) +     \
+         (!!((w) & (1ULL << 6))) +     \
+         (!!((w) & (1ULL << 7)))))
 
 #define __const_hweight16(w) (__const_hweight8(w)  + __const_hweight8((w)  >> 8 ))
 #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
index 272ecba9f5889672b4b4e555c3208946a8c5e002..d5cb78f539861afdc82eae345ee4c8148fb2448e 100644 (file)
@@ -15,8 +15,10 @@ typedef u64 __nocast cputime64_t;
 
 
 /*
- * Convert nanoseconds to cputime
+ * Convert nanoseconds <-> cputime
  */
+#define cputime_to_nsecs(__ct)         \
+       jiffies_to_nsecs(cputime_to_jiffies(__ct))
 #define nsecs_to_cputime64(__nsec)     \
        jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
 #define nsecs_to_cputime(__nsec)       \
index 2c9e62c2bfd03cce685243832d5db4c8e71ec04c..4e817606c5494126a31ae50e58a6e76242d89f07 100644 (file)
@@ -44,7 +44,10 @@ typedef u64 __nocast cputime64_t;
 /*
  * Convert cputime <-> nanoseconds
  */
-#define nsecs_to_cputime(__nsecs)      ((__force u64)(__nsecs))
+#define cputime_to_nsecs(__ct)         \
+       (__force u64)(__ct)
+#define nsecs_to_cputime(__nsecs)      \
+       (__force cputime_t)(__nsecs)
 
 
 /*
diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h
new file mode 100644 (file)
index 0000000..10cd4ff
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __ASM_MCS_SPINLOCK_H
+#define __ASM_MCS_SPINLOCK_H
+
+/*
+ * Architectures can define their own:
+ *
+ *   arch_mcs_spin_lock_contended(l)
+ *   arch_mcs_spin_unlock_contended(l)
+ *
+ * See kernel/locking/mcs_spinlock.c.
+ */
+
+#endif /* __ASM_MCS_SPINLOCK_H */
index 34c7bdc06014c0b5c787ec2faa85ab4d2b388096..1ec08c198b66a8ed7203b63d4cec3af7dc6424df 100644 (file)
@@ -193,6 +193,19 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
 }
 #endif
 
+#ifndef __HAVE_ARCH_PTE_UNUSED
+/*
+ * Some architectures provide facilities to virtualization guests
+ * so that they can flag allocated pages as unused. This allows the
+ * host to transparently reclaim unused pages. This function returns
+ * whether the pte's page is unused.
+ */
+static inline int pte_unused(pte_t pte)
+{
+       return 0;
+}
+#endif
+
 #ifndef __HAVE_ARCH_PMD_SAME
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
index bb1e2cdeb9bff0ec6322680196c11e5e676b2ca6..d48bf5a95cc1bd6e2a3f93ebbe6f3022202c1b98 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_RWSEM_H
-#define _ASM_POWERPC_RWSEM_H
+#ifndef _ASM_GENERIC_RWSEM_H
+#define _ASM_GENERIC_RWSEM_H
 
 #ifndef _LINUX_RWSEM_H
 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
@@ -8,7 +8,7 @@
 #ifdef __KERNEL__
 
 /*
- * R/W semaphores for PPC using the stuff in lib/rwsem.c.
+ * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
  * Adapted largely from include/asm-i386/rwsem.h
  * by Paul Mackerras <paulus@samba.org>.
  */
@@ -16,7 +16,7 @@
 /*
  * the semaphore definition
  */
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_64BIT
 # define RWSEM_ACTIVE_MASK             0xffffffffL
 #else
 # define RWSEM_ACTIVE_MASK             0x0000ffffL
@@ -129,4 +129,4 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 }
 
 #endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_RWSEM_H */
+#endif /* _ASM_GENERIC_RWSEM_H */
index 1151a1dcfe41d950339bef27d8f9b12fa40d5c68..6a15dddbaa09a1e55eaa09e56b4582fee94f4813 100644 (file)
@@ -108,6 +108,10 @@ static inline void acpi_initrd_override(void *data, size_t size)
 }
 #endif
 
+#define BAD_MADT_ENTRY(entry, end) (                                       \
+               (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
+               ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
+
 char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
 void __acpi_unmap_table(char *map, unsigned long size);
 int early_acpi_boot_init(void);
index 73a25005d88aa7cc76f7e8049b9697be18955c39..1f16d502600c5d887dbb54fea12440bd7bf9692a 100644 (file)
 
 struct device;
 struct ata_port_info;
+struct ahci_host_priv;
+struct platform_device;
 
+/*
+ * Note ahci_platform_data is deprecated, it is only kept around for use
+ * by the old da850 and spear13xx ahci code.
+ * New drivers should instead declare their own platform_driver struct, and
+ * use ahci_platform* functions in their own probe, suspend and resume methods.
+ */
 struct ahci_platform_data {
        int (*init)(struct device *dev, void __iomem *addr);
        void (*exit)(struct device *dev);
        int (*suspend)(struct device *dev);
        int (*resume)(struct device *dev);
-       const struct ata_port_info *ata_port_info;
-       unsigned int force_port_map;
-       unsigned int mask_port_map;
 };
 
+int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
+struct ahci_host_priv *ahci_platform_get_resources(
+       struct platform_device *pdev);
+int ahci_platform_init_host(struct platform_device *pdev,
+                           struct ahci_host_priv *hpriv,
+                           const struct ata_port_info *pi_template,
+                           unsigned int force_port_map,
+                           unsigned int mask_port_map);
+
+int ahci_platform_suspend_host(struct device *dev);
+int ahci_platform_resume_host(struct device *dev);
+int ahci_platform_suspend(struct device *dev);
+int ahci_platform_resume(struct device *dev);
+
 #endif /* _AHCI_PLATFORM_H */
index abc9ca7784568ba5737add4fce84577348428eac..be5fd38bd5a05d83eaac250055defcccacafe054 100644 (file)
@@ -196,6 +196,21 @@ static inline unsigned long __ffs64(u64 word)
 
 #ifdef __KERNEL__
 
+#ifndef set_mask_bits
+#define set_mask_bits(ptr, _mask, _bits)       \
+({                                                             \
+       const typeof(*ptr) mask = (_mask), bits = (_bits);      \
+       typeof(*ptr) old, new;                                  \
+                                                               \
+       do {                                                    \
+               old = ACCESS_ONCE(*ptr);                        \
+               new = (old & ~mask) | bits;                     \
+       } while (cmpxchg(ptr, old, new) != old);                \
+                                                               \
+       new;                                                    \
+})
+#endif
+
 #ifndef find_last_bit
 /**
  * find_last_bit - find the last set bit in a memory region
index 493aa021c7a9429a892c8ccb6f19110d1b4c9065..2e4cb67f6e560094aa719fe75f595dfbb562cf8e 100644 (file)
@@ -62,6 +62,11 @@ enum clock_event_mode {
 #define CLOCK_EVT_FEAT_DYNIRQ          0x000020
 #define CLOCK_EVT_FEAT_PERCPU          0x000040
 
+/*
+ * Clockevent device is based on a hrtimer for broadcast
+ */
+#define CLOCK_EVT_FEAT_HRTIMER         0x000080
+
 /**
  * struct clock_event_device - clock event device descriptor
  * @event_handler:     Assigned by the framework to be called by the low
@@ -83,6 +88,7 @@ enum clock_event_mode {
  * @name:              ptr to clock event name
  * @rating:            variable to rate clock event devices
  * @irq:               IRQ number (only for non CPU local devices)
+ * @bound_on:          Bound on CPU
  * @cpumask:           cpumask to indicate for which CPUs this device works
  * @list:              list head for the management code
  * @owner:             module reference
@@ -113,6 +119,7 @@ struct clock_event_device {
        const char              *name;
        int                     rating;
        int                     irq;
+       int                     bound_on;
        const struct cpumask    *cpumask;
        struct list_head        list;
        struct module           *owner;
@@ -180,15 +187,17 @@ extern int tick_receive_broadcast(void);
 #endif
 
 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+extern void tick_setup_hrtimer_broadcast(void);
 extern int tick_check_broadcast_expired(void);
 #else
 static inline int tick_check_broadcast_expired(void) { return 0; }
+static inline void tick_setup_hrtimer_broadcast(void) {};
 #endif
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
-extern void clockevents_notify(unsigned long reason, void *arg);
+extern int clockevents_notify(unsigned long reason, void *arg);
 #else
-static inline void clockevents_notify(unsigned long reason, void *arg) {}
+static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
 #endif
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
@@ -196,8 +205,9 @@ static inline void clockevents_notify(unsigned long reason, void *arg) {}
 static inline void clockevents_suspend(void) {}
 static inline void clockevents_resume(void) {}
 
-static inline void clockevents_notify(unsigned long reason, void *arg) {}
+static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
 static inline int tick_check_broadcast_expired(void) { return 0; }
+static inline void tick_setup_hrtimer_broadcast(void) {};
 
 #endif
 
index 3f448c65511b525ae9d92c38d7201f087b568334..01c0aa57ccecf39420b3fd30f830614e613fbe58 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/if.h>
 #include <linux/fs.h>
 #include <linux/aio_abi.h>     /* for aio_context_t */
+#include <linux/unistd.h>
 
 #include <asm/compat.h>
 #include <asm/siginfo.h>
@@ -27,6 +28,9 @@
 #define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
 #endif
 
+#define COMPAT_SYSCALL_DEFINE0(name) \
+       asmlinkage long compat_sys_##name(void)
+
 #define COMPAT_SYSCALL_DEFINE1(name, ...) \
         COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
 #define COMPAT_SYSCALL_DEFINE2(name, ...) \
@@ -68,6 +72,8 @@ typedef struct compat_sigaltstack {
 typedef __compat_uid32_t       compat_uid_t;
 typedef __compat_gid32_t       compat_gid_t;
 
+typedef        compat_ulong_t          compat_aio_context_t;
+
 struct compat_sel_arg_struct;
 struct rusage;
 
@@ -318,7 +324,7 @@ asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
                compat_ssize_t msgsz, int msgflg);
 asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
-               compat_ssize_t msgsz, long msgtyp, int msgflg);
+               compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
 long compat_sys_msgctl(int first, int second, void __user *uptr);
 long compat_sys_shmctl(int first, int second, void __user *uptr);
 long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
@@ -337,6 +343,19 @@ asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
 asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
                const struct compat_iovec __user *vec,
                compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+asmlinkage long compat_sys_preadv64(unsigned long fd,
+               const struct compat_iovec __user *vec,
+               unsigned long vlen, loff_t pos);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+asmlinkage long compat_sys_pwritev64(unsigned long fd,
+               const struct compat_iovec __user *vec,
+               unsigned long vlen, loff_t pos);
+#endif
+
 asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
 
 asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
@@ -451,7 +470,7 @@ asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
 asmlinkage long compat_sys_timerfd_gettime(int ufd,
                                   struct compat_itimerspec __user *otmr);
 
-asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_page,
+asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
                                      __u32 __user *pages,
                                      const int __user *nodes,
                                      int __user *status,
@@ -481,20 +500,20 @@ asmlinkage long compat_sys_statfs64(const char __user *pathname,
 asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
                                     struct compat_statfs64 __user *buf);
 asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
-                                  unsigned long arg);
+                                  compat_ulong_t arg);
 asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
-                                unsigned long arg);
+                                compat_ulong_t arg);
 asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
-asmlinkage long compat_sys_io_getevents(aio_context_t ctx_id,
-                                       unsigned long min_nr,
-                                       unsigned long nr,
+asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
+                                       compat_long_t min_nr,
+                                       compat_long_t nr,
                                        struct io_event __user *events,
                                        struct compat_timespec __user *timeout);
-asmlinkage long compat_sys_io_submit(aio_context_t ctx_id, int nr,
+asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
                                     u32 __user *iocb);
 asmlinkage long compat_sys_mount(const char __user *dev_name,
                                 const char __user *dir_name,
-                                const char __user *type, unsigned long flags,
+                                const char __user *type, compat_ulong_t flags,
                                 const void __user *data);
 asmlinkage long compat_sys_old_readdir(unsigned int fd,
                                       struct compat_old_linux_dirent __user *,
@@ -502,9 +521,11 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
 asmlinkage long compat_sys_getdents(unsigned int fd,
                                    struct compat_linux_dirent __user *dirent,
                                    unsigned int count);
+#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
 asmlinkage long compat_sys_getdents64(unsigned int fd,
                                      struct linux_dirent64 __user *dirent,
                                      unsigned int count);
+#endif
 asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
                                    unsigned int nr_segs, unsigned int flags);
 asmlinkage long compat_sys_open(const char __user *filename, int flags,
@@ -549,9 +570,9 @@ asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
                                    unsigned vlen, unsigned int flags);
 asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
                                   unsigned int flags);
-asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len,
+asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
                                unsigned flags);
-asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
+asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
                            unsigned flags, struct sockaddr __user *addr,
                            int __user *addrlen);
 asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
@@ -615,16 +636,16 @@ asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
                                struct compat_siginfo __user *uinfo);
 asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
 asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
-                                unsigned long arg);
+                                compat_ulong_t arg);
 asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
                struct compat_timespec __user *utime, u32 __user *uaddr2,
                u32 val3);
 asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
                                      char __user *optval, int __user *optlen);
-asmlinkage long compat_sys_kexec_load(unsigned long entry,
-                                     unsigned long nr_segments,
+asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
+                                     compat_ulong_t nr_segments,
                                      struct compat_kexec_segment __user *,
-                                     unsigned long flags);
+                                     compat_ulong_t flags);
 asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
                        const struct compat_mq_attr __user *u_mqstat,
                        struct compat_mq_attr __user *u_omqstat);
@@ -635,11 +656,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
                        struct compat_mq_attr __user *u_attr);
 asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
                        const char __user *u_msg_ptr,
-                       size_t msg_len, unsigned int msg_prio,
+                       compat_size_t msg_len, unsigned int msg_prio,
                        const struct compat_timespec __user *u_abs_timeout);
 asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
                        char __user *u_msg_ptr,
-                       size_t msg_len, unsigned int __user *u_msg_prio,
+                       compat_size_t msg_len, unsigned int __user *u_msg_prio,
                        const struct compat_timespec __user *u_abs_timeout);
 asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
 asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
@@ -654,12 +675,12 @@ extern void __user *compat_alloc_user_space(unsigned long len);
 
 asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
                const struct compat_iovec __user *lvec,
-               unsigned long liovcnt, const struct compat_iovec __user *rvec,
-               unsigned long riovcnt, unsigned long flags);
+               compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
+               compat_ulong_t riovcnt, compat_ulong_t flags);
 asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
                const struct compat_iovec __user *lvec,
-               unsigned long liovcnt, const struct compat_iovec __user *rvec,
-               unsigned long riovcnt, unsigned long flags);
+               compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
+               compat_ulong_t riovcnt, compat_ulong_t flags);
 
 asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
                                    compat_off_t __user *offset, compat_size_t count);
index 4d89e0e6f9ccaa641dd2a6b2c35c8c7b7bbf2916..2d2e62c8666ad71962315ac4f7041029474a5fec 100644 (file)
@@ -74,6 +74,8 @@ struct cpufreq_policy {
        unsigned int            max;    /* in kHz */
        unsigned int            cur;    /* in kHz, only needed if cpufreq
                                         * governors are used */
+       unsigned int            suspend_freq; /* freq to set during suspend */
+
        unsigned int            policy; /* see above */
        struct cpufreq_governor *governor; /* see below */
        void                    *governor_data;
@@ -83,6 +85,7 @@ struct cpufreq_policy {
                                         * called, but you're in IRQ context */
 
        struct cpufreq_real_policy      user_policy;
+       struct cpufreq_frequency_table  *freq_table;
 
        struct list_head        policy_list;
        struct kobject          kobj;
@@ -224,6 +227,7 @@ struct cpufreq_driver {
        int     (*bios_limit)   (int cpu, unsigned int *limit);
 
        int     (*exit)         (struct cpufreq_policy *policy);
+       void    (*stop_cpu)     (struct cpufreq_policy *policy);
        int     (*suspend)      (struct cpufreq_policy *policy);
        int     (*resume)       (struct cpufreq_policy *policy);
        struct freq_attr        **attr;
@@ -296,6 +300,15 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
                        policy->cpuinfo.max_freq);
 }
 
+#ifdef CONFIG_CPU_FREQ
+void cpufreq_suspend(void);
+void cpufreq_resume(void);
+int cpufreq_generic_suspend(struct cpufreq_policy *policy);
+#else
+static inline void cpufreq_suspend(void) {}
+static inline void cpufreq_resume(void) {}
+#endif
+
 /*********************************************************************
  *                     CPUFREQ NOTIFIER INTERFACE                    *
  *********************************************************************/
@@ -306,8 +319,6 @@ cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
 /* Transition notifiers */
 #define CPUFREQ_PRECHANGE              (0)
 #define CPUFREQ_POSTCHANGE             (1)
-#define CPUFREQ_RESUMECHANGE           (8)
-#define CPUFREQ_SUSPENDCHANGE          (9)
 
 /* Policy Notifiers  */
 #define CPUFREQ_ADJUST                 (0)
@@ -463,7 +474,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
                unsigned int freq);
 
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
 
 #ifdef CONFIG_CPU_FREQ
@@ -490,9 +500,6 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
 extern struct freq_attr *cpufreq_generic_attr[];
-void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
-                                     unsigned int cpu);
-void cpufreq_frequency_table_put_attr(unsigned int cpu);
 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
                                      struct cpufreq_frequency_table *table);
 
@@ -500,10 +507,4 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
 int cpufreq_generic_init(struct cpufreq_policy *policy,
                struct cpufreq_frequency_table *table,
                unsigned int transition_latency);
-static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
 #endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cputime.h b/include/linux/cputime.h
new file mode 100644 (file)
index 0000000..f2eb2ee
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef __LINUX_CPUTIME_H
+#define __LINUX_CPUTIME_H
+
+#include <asm/cputime.h>
+
+#ifndef cputime_to_nsecs
+# define cputime_to_nsecs(__ct)        \
+       (cputime_to_usecs(__ct) * NSEC_PER_USEC)
+#endif
+
+#ifndef nsecs_to_cputime
+# define nsecs_to_cputime(__nsecs)     \
+       usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
+#endif
+
+#endif /* __LINUX_CPUTIME_H */
index 952b01033c32dedcf83349a988b920c1c5ed8aa8..ec1b6e21f0efb88cdb6c60a83d27c98957278757 100644 (file)
@@ -626,6 +626,7 @@ static inline void *devm_kcalloc(struct device *dev,
        return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
 }
 extern void devm_kfree(struct device *dev, void *p);
+extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
 
 void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
 void __iomem *devm_request_and_ioremap(struct device *dev,
index 0a819e7a60c961246c529570fedf7717dd6c3c7c..6c100ff0cae4b6eb886e48530c9f48c858b0f257 100644 (file)
@@ -153,6 +153,102 @@ typedef struct {
        u8 sets_to_zero;
 } efi_time_cap_t;
 
+typedef struct {
+       efi_table_hdr_t hdr;
+       u32 raise_tpl;
+       u32 restore_tpl;
+       u32 allocate_pages;
+       u32 free_pages;
+       u32 get_memory_map;
+       u32 allocate_pool;
+       u32 free_pool;
+       u32 create_event;
+       u32 set_timer;
+       u32 wait_for_event;
+       u32 signal_event;
+       u32 close_event;
+       u32 check_event;
+       u32 install_protocol_interface;
+       u32 reinstall_protocol_interface;
+       u32 uninstall_protocol_interface;
+       u32 handle_protocol;
+       u32 __reserved;
+       u32 register_protocol_notify;
+       u32 locate_handle;
+       u32 locate_device_path;
+       u32 install_configuration_table;
+       u32 load_image;
+       u32 start_image;
+       u32 exit;
+       u32 unload_image;
+       u32 exit_boot_services;
+       u32 get_next_monotonic_count;
+       u32 stall;
+       u32 set_watchdog_timer;
+       u32 connect_controller;
+       u32 disconnect_controller;
+       u32 open_protocol;
+       u32 close_protocol;
+       u32 open_protocol_information;
+       u32 protocols_per_handle;
+       u32 locate_handle_buffer;
+       u32 locate_protocol;
+       u32 install_multiple_protocol_interfaces;
+       u32 uninstall_multiple_protocol_interfaces;
+       u32 calculate_crc32;
+       u32 copy_mem;
+       u32 set_mem;
+       u32 create_event_ex;
+} __packed efi_boot_services_32_t;
+
+typedef struct {
+       efi_table_hdr_t hdr;
+       u64 raise_tpl;
+       u64 restore_tpl;
+       u64 allocate_pages;
+       u64 free_pages;
+       u64 get_memory_map;
+       u64 allocate_pool;
+       u64 free_pool;
+       u64 create_event;
+       u64 set_timer;
+       u64 wait_for_event;
+       u64 signal_event;
+       u64 close_event;
+       u64 check_event;
+       u64 install_protocol_interface;
+       u64 reinstall_protocol_interface;
+       u64 uninstall_protocol_interface;
+       u64 handle_protocol;
+       u64 __reserved;
+       u64 register_protocol_notify;
+       u64 locate_handle;
+       u64 locate_device_path;
+       u64 install_configuration_table;
+       u64 load_image;
+       u64 start_image;
+       u64 exit;
+       u64 unload_image;
+       u64 exit_boot_services;
+       u64 get_next_monotonic_count;
+       u64 stall;
+       u64 set_watchdog_timer;
+       u64 connect_controller;
+       u64 disconnect_controller;
+       u64 open_protocol;
+       u64 close_protocol;
+       u64 open_protocol_information;
+       u64 protocols_per_handle;
+       u64 locate_handle_buffer;
+       u64 locate_protocol;
+       u64 install_multiple_protocol_interfaces;
+       u64 uninstall_multiple_protocol_interfaces;
+       u64 calculate_crc32;
+       u64 copy_mem;
+       u64 set_mem;
+       u64 create_event_ex;
+} __packed efi_boot_services_64_t;
+
 /*
  * EFI Boot Services table
  */
@@ -231,12 +327,61 @@ typedef enum {
     EfiPciIoAttributeOperationMaximum
 } EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
 
+typedef struct {
+       u32 read;
+       u32 write;
+} efi_pci_io_protocol_access_32_t;
+
+typedef struct {
+       u64 read;
+       u64 write;
+} efi_pci_io_protocol_access_64_t;
 
 typedef struct {
        void *read;
        void *write;
 } efi_pci_io_protocol_access_t;
 
+typedef struct {
+       u32 poll_mem;
+       u32 poll_io;
+       efi_pci_io_protocol_access_32_t mem;
+       efi_pci_io_protocol_access_32_t io;
+       efi_pci_io_protocol_access_32_t pci;
+       u32 copy_mem;
+       u32 map;
+       u32 unmap;
+       u32 allocate_buffer;
+       u32 free_buffer;
+       u32 flush;
+       u32 get_location;
+       u32 attributes;
+       u32 get_bar_attributes;
+       u32 set_bar_attributes;
+       uint64_t romsize;
+       void *romimage;
+} efi_pci_io_protocol_32;
+
+typedef struct {
+       u64 poll_mem;
+       u64 poll_io;
+       efi_pci_io_protocol_access_64_t mem;
+       efi_pci_io_protocol_access_64_t io;
+       efi_pci_io_protocol_access_64_t pci;
+       u64 copy_mem;
+       u64 map;
+       u64 unmap;
+       u64 allocate_buffer;
+       u64 free_buffer;
+       u64 flush;
+       u64 get_location;
+       u64 attributes;
+       u64 get_bar_attributes;
+       u64 set_bar_attributes;
+       uint64_t romsize;
+       void *romimage;
+} efi_pci_io_protocol_64;
+
 typedef struct {
        void *poll_mem;
        void *poll_io;
@@ -290,6 +435,42 @@ typedef struct {
 #define EFI_RUNTIME_SERVICES_SIGNATURE ((u64)0x5652453544e5552ULL)
 #define EFI_RUNTIME_SERVICES_REVISION  0x00010000
 
+typedef struct {
+       efi_table_hdr_t hdr;
+       u32 get_time;
+       u32 set_time;
+       u32 get_wakeup_time;
+       u32 set_wakeup_time;
+       u32 set_virtual_address_map;
+       u32 convert_pointer;
+       u32 get_variable;
+       u32 get_next_variable;
+       u32 set_variable;
+       u32 get_next_high_mono_count;
+       u32 reset_system;
+       u32 update_capsule;
+       u32 query_capsule_caps;
+       u32 query_variable_info;
+} efi_runtime_services_32_t;
+
+typedef struct {
+       efi_table_hdr_t hdr;
+       u64 get_time;
+       u64 set_time;
+       u64 get_wakeup_time;
+       u64 set_wakeup_time;
+       u64 set_virtual_address_map;
+       u64 convert_pointer;
+       u64 get_variable;
+       u64 get_next_variable;
+       u64 set_variable;
+       u64 get_next_high_mono_count;
+       u64 reset_system;
+       u64 update_capsule;
+       u64 query_capsule_caps;
+       u64 query_variable_info;
+} efi_runtime_services_64_t;
+
 typedef struct {
        efi_table_hdr_t hdr;
        void *get_time;
@@ -483,6 +664,38 @@ struct efi_memory_map {
        unsigned long desc_size;
 };
 
+typedef struct {
+       u32 revision;
+       u32 parent_handle;
+       u32 system_table;
+       u32 device_handle;
+       u32 file_path;
+       u32 reserved;
+       u32 load_options_size;
+       u32 load_options;
+       u32 image_base;
+       __aligned_u64 image_size;
+       unsigned int image_code_type;
+       unsigned int image_data_type;
+       unsigned long unload;
+} efi_loaded_image_32_t;
+
+typedef struct {
+       u32 revision;
+       u64 parent_handle;
+       u64 system_table;
+       u64 device_handle;
+       u64 file_path;
+       u64 reserved;
+       u32 load_options_size;
+       u64 load_options;
+       u64 image_base;
+       __aligned_u64 image_size;
+       unsigned int image_code_type;
+       unsigned int image_data_type;
+       unsigned long unload;
+} efi_loaded_image_64_t;
+
 typedef struct {
        u32 revision;
        void *parent_handle;
@@ -511,6 +724,34 @@ typedef struct {
        efi_char16_t filename[1];
 } efi_file_info_t;
 
+typedef struct {
+       u64 revision;
+       u32 open;
+       u32 close;
+       u32 delete;
+       u32 read;
+       u32 write;
+       u32 get_position;
+       u32 set_position;
+       u32 get_info;
+       u32 set_info;
+       u32 flush;
+} efi_file_handle_32_t;
+
+typedef struct {
+       u64 revision;
+       u64 open;
+       u64 close;
+       u64 delete;
+       u64 read;
+       u64 write;
+       u64 get_position;
+       u64 set_position;
+       u64 get_info;
+       u64 set_info;
+       u64 flush;
+} efi_file_handle_64_t;
+
 typedef struct _efi_file_handle {
        u64 revision;
        efi_status_t (*open)(struct _efi_file_handle *,
@@ -573,6 +814,7 @@ extern struct efi {
        efi_reset_system_t *reset_system;
        efi_set_virtual_address_map_t *set_virtual_address_map;
        struct efi_memory_map *memmap;
+       unsigned long flags;
 } efi;
 
 static inline int
@@ -659,18 +901,17 @@ extern int __init efi_setup_pcdp_console(char *);
 #define EFI_ARCH_1             6       /* First arch-specific bit */
 
 #ifdef CONFIG_EFI
-# ifdef CONFIG_X86
-extern int efi_enabled(int facility);
-# else
-static inline int efi_enabled(int facility)
+/*
+ * Test whether the above EFI_* bits are enabled.
+ */
+static inline bool efi_enabled(int feature)
 {
-       return 1;
+       return test_bit(feature, &efi.flags) != 0;
 }
-# endif
 #else
-static inline int efi_enabled(int facility)
+static inline bool efi_enabled(int feature)
 {
-       return 0;
+       return false;
 }
 #endif
 
@@ -809,6 +1050,17 @@ struct efivar_entry {
        bool deleting;
 };
 
+struct efi_simple_text_output_protocol_32 {
+       u32 reset;
+       u32 output_string;
+       u32 test_string;
+};
+
+struct efi_simple_text_output_protocol_64 {
+       u64 reset;
+       u64 output_string;
+       u64 test_string;
+};
 
 struct efi_simple_text_output_protocol {
        void *reset;
index b0d95cac826e8f310aee6dfa999c69ee56727bfb..6435f46d6e1319b71f52050144f3e871986196df 100644 (file)
@@ -55,7 +55,11 @@ union futex_key {
 #ifdef CONFIG_FUTEX
 extern void exit_robust_list(struct task_struct *curr);
 extern void exit_pi_state_list(struct task_struct *curr);
+#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
+#define futex_cmpxchg_enabled 1
+#else
 extern int futex_cmpxchg_enabled;
+#endif
 #else
 static inline void exit_robust_list(struct task_struct *curr)
 {
index 12d5f972f23f46cf1dbf83c0e7fc93333cf49015..cba442ec3c66583a7aceefbbc9ff9eca5fee1563 100644 (file)
@@ -9,6 +9,7 @@
 
 
 extern void synchronize_irq(unsigned int irq);
+extern void synchronize_hardirq(unsigned int irq);
 
 #if defined(CONFIG_TINY_RCU)
 
index d19a5c2d2270ebb9bbe01a167a97875e255da357..e7a8d3fa91d574e322a01a4cc1fff3a30a513a15 100644 (file)
@@ -96,12 +96,12 @@ enum hrtimer_restart {
  * @function:  timer expiry callback function
  * @base:      pointer to the timer base (per cpu and per clock)
  * @state:     state information (See bit values above)
+ * @start_pid: timer statistics field to store the pid of the task which
+ *             started the timer
  * @start_site:        timer statistics field to store the site where the timer
  *             was started
  * @start_comm: timer statistics field to store the name of the process which
  *             started the timer
- * @start_pid: timer statistics field to store the pid of the task which
- *             started the timer
  *
  * The hrtimer structure must be initialized by hrtimer_init()
  */
index e1688802964f3ccb6aebc0738a98f9b838a6d02d..a3ba270763426bd7dfd9f0633e0abb4476d2dc50 100644 (file)
@@ -163,6 +163,23 @@ extern bool initcall_debug;
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_LTO
+/* Work around a LTO gcc problem: when there is no reference to a variable
+ * in a module it will be moved to the end of the program. This causes
+ * reordering of initcalls which the kernel does not like.
+ * Add a dummy reference function to avoid this. The function is
+ * deleted by the linker.
+ */
+#define LTO_REFERENCE_INITCALL(x) \
+       ; /* yes this is needed */                      \
+       static __used __exit void *reference_##x(void)  \
+       {                                               \
+               return &x;                              \
+       }
+#else
+#define LTO_REFERENCE_INITCALL(x)
+#endif
+
 /* initcalls are now grouped by functionality into separate 
  * subsections. Ordering inside the subsections is determined
  * by link order. 
@@ -175,7 +192,8 @@ extern bool initcall_debug;
 
 #define __define_initcall(fn, id) \
        static initcall_t __initcall_##fn##id __used \
-       __attribute__((__section__(".initcall" #id ".init"))) = fn
+       __attribute__((__section__(".initcall" #id ".init"))) = fn; \
+       LTO_REFERENCE_INITCALL(__initcall_##fn##id)
 
 /*
  * Early initcalls run before initializing SMP.
index a2678d35b5a2e8fc8f840d91f1ac5c7ec0c97bec..c7bfac1c4a7b8f6c82742b4d9f97c058131ae4fc 100644 (file)
@@ -188,6 +188,7 @@ extern void disable_irq(unsigned int irq);
 extern void disable_percpu_irq(unsigned int irq);
 extern void enable_irq(unsigned int irq);
 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
+extern void irq_wake_thread(unsigned int irq, void *dev_id);
 
 /* The following three functions are for the core kernel use only. */
 extern void suspend_device_irqs(void);
index f4f42faec68677a6fb032e7ccdf07b8b0bb130cb..8a18e75600ccb25b47f17decfacdef673de89713 100644 (file)
@@ -24,7 +24,7 @@
 
 struct device;
 
-void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
 void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
 
 #ifdef CONFIG_MMU
index 7dc10036eff552229cdd964e88669a7759a8152f..d278838908cbc3f1cdac08ae3f3714934f01f188 100644 (file)
@@ -303,6 +303,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  * @irq_pm_shutdown:   function called from core code on shutdown once per chip
  * @irq_calc_mask:     Optional function to set irq_data.mask for special cases
  * @irq_print_chip:    optional to print special chip info in show_interrupts
+ * @irq_request_resources:     optional to request resources before calling
+ *                             any other callback related to this irq
+ * @irq_release_resources:     optional to release resources acquired with
+ *                             irq_request_resources
  * @flags:             chip specific flags
  */
 struct irq_chip {
@@ -336,6 +340,8 @@ struct irq_chip {
        void            (*irq_calc_mask)(struct irq_data *data);
 
        void            (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
+       int             (*irq_request_resources)(struct irq_data *data);
+       void            (*irq_release_resources)(struct irq_data *data);
 
        unsigned long   flags;
 };
@@ -349,6 +355,8 @@ struct irq_chip {
  * IRQCHIP_ONOFFLINE_ENABLED:  Only call irq_on/off_line callbacks
  *                             when irq enabled
  * IRQCHIP_SKIP_SET_WAKE:      Skip chip.irq_set_wake(), for this irq chip
+ * IRQCHIP_ONESHOT_SAFE:       One shot does not require mask/unmask
+ * IRQCHIP_EOI_THREADED:       Chip requires eoi() on unmask in threaded mode
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED         = (1 <<  0),
@@ -357,6 +365,7 @@ enum {
        IRQCHIP_ONOFFLINE_ENABLED       = (1 <<  3),
        IRQCHIP_SKIP_SET_WAKE           = (1 <<  4),
        IRQCHIP_ONESHOT_SAFE            = (1 <<  5),
+       IRQCHIP_EOI_THREADED            = (1 <<  6),
 };
 
 /* This include will go away once we isolated irq_desc usage to core code */
index 66017028dcb3d0aebf70aa7281e7758a42bcb3bf..19ae05d4b8ec26b6dc6fa5f568d2b25b698e3417 100644 (file)
@@ -30,7 +30,9 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
        work->func = func;
 }
 
-void irq_work_queue(struct irq_work *work);
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+
+bool irq_work_queue(struct irq_work *work);
 void irq_work_run(void);
 void irq_work_sync(struct irq_work *work);
 
index 196d1ea86df081a5224928b4d75117aec85cd822..08fb0247764177a8b96f83f5181a17c9d33b099c 100644 (file)
@@ -458,7 +458,7 @@ extern enum system_states {
 
 #define TAINT_PROPRIETARY_MODULE       0
 #define TAINT_FORCED_MODULE            1
-#define TAINT_UNSAFE_SMP               2
+#define TAINT_CPU_OUT_OF_SPEC          2
 #define TAINT_FORCED_RMMOD             3
 #define TAINT_MACHINE_CHECK            4
 #define TAINT_BAD_PAGE                 5
index 51c72be4a7c3f1782a5d5f21801b06bea1720f57..ecbc52f9ff77e7921acb4c069bc88056718f6e6a 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/sched.h>
 #include <linux/vtime.h>
 #include <asm/irq.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
 
 /*
  * 'kernel_stat.h' contains the definitions needed for doing
@@ -51,14 +51,8 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 
 extern unsigned long long nr_context_switches(void);
 
-#include <linux/irq.h>
 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
-
-#define kstat_incr_irqs_this_cpu(irqno, DESC)          \
-do {                                                   \
-       __this_cpu_inc(*(DESC)->kstat_irqs);            \
-       __this_cpu_inc(kstat.irqs_sum);                 \
-} while (0)
+extern void kstat_incr_irq_this_cpu(unsigned int irq);
 
 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
 {
index 6d4066cdb5b5b8508be5347e50f1c16c1fddf493..a7564193004959ba12bc28361a4c32d76b8297c2 100644 (file)
@@ -127,12 +127,6 @@ extern asmlinkage long sys_kexec_load(unsigned long entry,
                                        struct kexec_segment __user *segments,
                                        unsigned long flags);
 extern int kernel_kexec(void);
-#ifdef CONFIG_COMPAT
-extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
-                               unsigned long nr_segments,
-                               struct compat_kexec_segment __user *segments,
-                               unsigned long flags);
-#endif
 extern struct page *kimage_alloc_control_pages(struct kimage *image,
                                                unsigned int order);
 extern void crash_kexec(struct pt_regs *);
index bec6dbe939a0267def1f73864d07c5025cb687f1..1de36be64df4d1516a2a451901733d36fd32ec4e 100644 (file)
@@ -848,7 +848,6 @@ struct ata_port {
        struct completion       park_req_pending;
 
        pm_message_t            pm_mesg;
-       int                     *pm_result;
        enum ata_lpm_policy     target_lpm_policy;
 
        struct timer_list       fastdrain_timer;
@@ -1140,16 +1139,14 @@ extern bool ata_link_offline(struct ata_link *link);
 #ifdef CONFIG_PM
 extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
 extern void ata_host_resume(struct ata_host *host);
-extern int ata_sas_port_async_suspend(struct ata_port *ap, int *async);
-extern int ata_sas_port_async_resume(struct ata_port *ap, int *async);
+extern void ata_sas_port_suspend(struct ata_port *ap);
+extern void ata_sas_port_resume(struct ata_port *ap);
 #else
-static inline int ata_sas_port_async_suspend(struct ata_port *ap, int *async)
+static inline void ata_sas_port_suspend(struct ata_port *ap)
 {
-       return 0;
 }
-static inline int ata_sas_port_async_resume(struct ata_port *ap, int *async)
+static inline void ata_sas_port_resume(struct ata_port *ap)
 {
-       return 0;
 }
 #endif
 extern int ata_ratelimit(void);
index a6a42dd024661324dbeed5b9cfaa028744bae154..34a513a2727bbe83adff047613a1ad3458684ac2 100644 (file)
@@ -12,9 +12,9 @@
 #endif
 
 #ifdef __cplusplus
-#define CPP_ASMLINKAGE extern "C"
+#define CPP_ASMLINKAGE extern "C" __visible
 #else
-#define CPP_ASMLINKAGE
+#define CPP_ASMLINKAGE __visible
 #endif
 
 #ifndef asmlinkage
index 92b1bfc5da6087850e43015ebdaf7d3de455f522..008388f920d7e93e32ba388dd4b497f33d7bd49d 100644 (file)
@@ -252,9 +252,9 @@ struct held_lock {
        unsigned int trylock:1;                                         /* 16 bits */
 
        unsigned int read:2;        /* see lock_acquire() comment */
-       unsigned int check:2;       /* see lock_acquire() comment */
+       unsigned int check:1;       /* see lock_acquire() comment */
        unsigned int hardirqs_off:1;
-       unsigned int references:11;                                     /* 32 bits */
+       unsigned int references:12;                                     /* 32 bits */
 };
 
 /*
@@ -265,7 +265,7 @@ extern void lockdep_info(void);
 extern void lockdep_reset(void);
 extern void lockdep_reset_lock(struct lockdep_map *lock);
 extern void lockdep_free_key_range(void *start, unsigned long size);
-extern void lockdep_sys_exit(void);
+extern asmlinkage void lockdep_sys_exit(void);
 
 extern void lockdep_off(void);
 extern void lockdep_on(void);
@@ -303,7 +303,7 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
                                 (lock)->dep_map.key, sub)
 
 #define lockdep_set_novalidate_class(lock) \
-       lockdep_set_class(lock, &__lockdep_no_validate__)
+       lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
 /*
  * Compare locking classes
  */
@@ -326,9 +326,8 @@ static inline int lockdep_match_key(struct lockdep_map *lock,
  *
  * Values for check:
  *
- *   0: disabled
- *   1: simple checks (freeing, held-at-exit-time, etc.)
- *   2: full validation
+ *   0: simple checks (freeing, held-at-exit-time, etc.)
+ *   1: full validation
  */
 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                         int trylock, int read, int check,
@@ -479,15 +478,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)
  * on the per lock-class debug mode:
  */
 
-#ifdef CONFIG_PROVE_LOCKING
- #define lock_acquire_exclusive(l, s, t, n, i)         lock_acquire(l, s, t, 0, 2, n, i)
- #define lock_acquire_shared(l, s, t, n, i)            lock_acquire(l, s, t, 1, 2, n, i)
- #define lock_acquire_shared_recursive(l, s, t, n, i)  lock_acquire(l, s, t, 2, 2, n, i)
-#else
- #define lock_acquire_exclusive(l, s, t, n, i)         lock_acquire(l, s, t, 0, 1, n, i)
- #define lock_acquire_shared(l, s, t, n, i)            lock_acquire(l, s, t, 1, 1, n, i)
- #define lock_acquire_shared_recursive(l, s, t, n, i)  lock_acquire(l, s, t, 2, 1, n, i)
-#endif
+#define lock_acquire_exclusive(l, s, t, n, i)          lock_acquire(l, s, t, 0, 1, n, i)
+#define lock_acquire_shared(l, s, t, n, i)             lock_acquire(l, s, t, 1, 1, n, i)
+#define lock_acquire_shared_recursive(l, s, t, n, i)   lock_acquire(l, s, t, 2, 1, n, i)
 
 #define spin_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
 #define spin_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
@@ -518,13 +511,13 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 # define might_lock(lock)                                              \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
-       lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_);    \
+       lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
        lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
 } while (0)
 # define might_lock_read(lock)                                                 \
 do {                                                                   \
        typecheck(struct lockdep_map *, &(lock)->dep_map);              \
-       lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_);    \
+       lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
        lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
 } while (0)
 #else
index 41c9bde410c5b31f0bcdfb61de557ca0fe5353ce..157e32b6ca28790fccbc2a3d47d30c9b210d6ec4 100644 (file)
@@ -18,7 +18,9 @@ enum sec_device_type {
        S5M8751X,
        S5M8763X,
        S5M8767X,
+       S2MPA01,
        S2MPS11X,
+       S2MPS14X,
 };
 
 /**
@@ -50,7 +52,7 @@ struct sec_pmic_dev {
        struct regmap_irq_chip_data *irq_data;
 
        int ono;
-       int type;
+       unsigned long type;
        bool wakeup;
        bool wtsr_smpl;
 };
@@ -92,7 +94,7 @@ struct sec_platform_data {
        int                             buck3_default_idx;
        int                             buck4_default_idx;
 
-       int                             buck_ramp_delay;
+       int                             buck_ramp_delay;
 
        int                             buck2_ramp_delay;
        int                             buck34_ramp_delay;
@@ -100,10 +102,15 @@ struct sec_platform_data {
        int                             buck16_ramp_delay;
        int                             buck7810_ramp_delay;
        int                             buck9_ramp_delay;
-
-       bool                            buck2_ramp_enable;
-       bool                            buck3_ramp_enable;
-       bool                            buck4_ramp_enable;
+       int                             buck24_ramp_delay;
+       int                             buck3_ramp_delay;
+       int                             buck7_ramp_delay;
+       int                             buck8910_ramp_delay;
+
+       bool                            buck1_ramp_enable;
+       bool                            buck2_ramp_enable;
+       bool                            buck3_ramp_enable;
+       bool                            buck4_ramp_enable;
        bool                            buck6_ramp_enable;
 
        int                             buck2_init;
@@ -119,7 +126,8 @@ struct sec_platform_data {
 struct sec_regulator_data {
        int                             id;
        struct regulator_init_data      *initdata;
-       struct device_node *reg_node;
+       struct device_node              *reg_node;
+       int                             ext_control_gpio;
 };
 
 /*
index d43b4f9e7fb27ab4b5014592fe363d3a97ba153e..1224f447356b90a5601fbc1826eb0a67606aeefe 100644 (file)
 #ifndef __LINUX_MFD_SEC_IRQ_H
 #define __LINUX_MFD_SEC_IRQ_H
 
+enum s2mpa01_irq {
+       S2MPA01_IRQ_PWRONF,
+       S2MPA01_IRQ_PWRONR,
+       S2MPA01_IRQ_JIGONBF,
+       S2MPA01_IRQ_JIGONBR,
+       S2MPA01_IRQ_ACOKBF,
+       S2MPA01_IRQ_ACOKBR,
+       S2MPA01_IRQ_PWRON1S,
+       S2MPA01_IRQ_MRB,
+
+       S2MPA01_IRQ_RTC60S,
+       S2MPA01_IRQ_RTCA1,
+       S2MPA01_IRQ_RTCA0,
+       S2MPA01_IRQ_SMPL,
+       S2MPA01_IRQ_RTC1S,
+       S2MPA01_IRQ_WTSR,
+
+       S2MPA01_IRQ_INT120C,
+       S2MPA01_IRQ_INT140C,
+       S2MPA01_IRQ_LDO3_TSD,
+       S2MPA01_IRQ_B16_TSD,
+       S2MPA01_IRQ_B24_TSD,
+       S2MPA01_IRQ_B35_TSD,
+
+       S2MPA01_IRQ_NR,
+};
+
+#define S2MPA01_IRQ_PWRONF_MASK                (1 << 0)
+#define S2MPA01_IRQ_PWRONR_MASK                (1 << 1)
+#define S2MPA01_IRQ_JIGONBF_MASK       (1 << 2)
+#define S2MPA01_IRQ_JIGONBR_MASK       (1 << 3)
+#define S2MPA01_IRQ_ACOKBF_MASK                (1 << 4)
+#define S2MPA01_IRQ_ACOKBR_MASK                (1 << 5)
+#define S2MPA01_IRQ_PWRON1S_MASK       (1 << 6)
+#define S2MPA01_IRQ_MRB_MASK           (1 << 7)
+
+#define S2MPA01_IRQ_RTC60S_MASK                (1 << 0)
+#define S2MPA01_IRQ_RTCA1_MASK         (1 << 1)
+#define S2MPA01_IRQ_RTCA0_MASK         (1 << 2)
+#define S2MPA01_IRQ_SMPL_MASK          (1 << 3)
+#define S2MPA01_IRQ_RTC1S_MASK         (1 << 4)
+#define S2MPA01_IRQ_WTSR_MASK          (1 << 5)
+
+#define S2MPA01_IRQ_INT120C_MASK       (1 << 0)
+#define S2MPA01_IRQ_INT140C_MASK       (1 << 1)
+#define S2MPA01_IRQ_LDO3_TSD_MASK      (1 << 2)
+#define S2MPA01_IRQ_B16_TSD_MASK       (1 << 3)
+#define S2MPA01_IRQ_B24_TSD_MASK       (1 << 4)
+#define S2MPA01_IRQ_B35_TSD_MASK       (1 << 5)
+
 enum s2mps11_irq {
        S2MPS11_IRQ_PWRONF,
        S2MPS11_IRQ_PWRONR,
@@ -24,8 +74,8 @@ enum s2mps11_irq {
        S2MPS11_IRQ_MRB,
 
        S2MPS11_IRQ_RTC60S,
+       S2MPS11_IRQ_RTCA0,
        S2MPS11_IRQ_RTCA1,
-       S2MPS11_IRQ_RTCA2,
        S2MPS11_IRQ_SMPL,
        S2MPS11_IRQ_RTC1S,
        S2MPS11_IRQ_WTSR,
@@ -47,7 +97,7 @@ enum s2mps11_irq {
 
 #define S2MPS11_IRQ_RTC60S_MASK                (1 << 0)
 #define S2MPS11_IRQ_RTCA1_MASK         (1 << 1)
-#define S2MPS11_IRQ_RTCA2_MASK         (1 << 2)
+#define S2MPS11_IRQ_RTCA0_MASK         (1 << 2)
 #define S2MPS11_IRQ_SMPL_MASK          (1 << 3)
 #define S2MPS11_IRQ_RTC1S_MASK         (1 << 4)
 #define S2MPS11_IRQ_WTSR_MASK          (1 << 5)
@@ -55,6 +105,33 @@ enum s2mps11_irq {
 #define S2MPS11_IRQ_INT120C_MASK       (1 << 0)
 #define S2MPS11_IRQ_INT140C_MASK       (1 << 1)
 
+enum s2mps14_irq {
+       S2MPS14_IRQ_PWRONF,
+       S2MPS14_IRQ_PWRONR,
+       S2MPS14_IRQ_JIGONBF,
+       S2MPS14_IRQ_JIGONBR,
+       S2MPS14_IRQ_ACOKBF,
+       S2MPS14_IRQ_ACOKBR,
+       S2MPS14_IRQ_PWRON1S,
+       S2MPS14_IRQ_MRB,
+
+       S2MPS14_IRQ_RTC60S,
+       S2MPS14_IRQ_RTCA1,
+       S2MPS14_IRQ_RTCA0,
+       S2MPS14_IRQ_SMPL,
+       S2MPS14_IRQ_RTC1S,
+       S2MPS14_IRQ_WTSR,
+
+       S2MPS14_IRQ_INT120C,
+       S2MPS14_IRQ_INT140C,
+       S2MPS14_IRQ_TSD,
+
+       S2MPS14_IRQ_NR,
+};
+
+/* Masks for interrupts are the same as in s2mps11 */
+#define S2MPS14_IRQ_TSD_MASK           (1 << 2)
+
 enum s5m8767_irq {
        S5M8767_IRQ_PWRR,
        S5M8767_IRQ_PWRF,
index 94b7cd6d889185f9df10b611099bf06038365b83..3e02b768d53704968a76e27af9cece0f455de6bc 100644 (file)
@@ -1,12 +1,17 @@
-/*  rtc.h
+/* rtc.h
  *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
  *              http://www.samsung.com
  *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  *
  */
 
@@ -43,6 +48,39 @@ enum sec_rtc_reg {
        SEC_RTC_STATUS,
        SEC_WTSR_SMPL_CNTL,
        SEC_RTC_UDR_CON,
+
+       SEC_RTC_REG_MAX,
+};
+
+enum s2mps_rtc_reg {
+       S2MPS_RTC_CTRL,
+       S2MPS_WTSR_SMPL_CNTL,
+       S2MPS_RTC_UDR_CON,
+       S2MPS_RSVD,
+       S2MPS_RTC_SEC,
+       S2MPS_RTC_MIN,
+       S2MPS_RTC_HOUR,
+       S2MPS_RTC_WEEKDAY,
+       S2MPS_RTC_DATE,
+       S2MPS_RTC_MONTH,
+       S2MPS_RTC_YEAR,
+       S2MPS_ALARM0_SEC,
+       S2MPS_ALARM0_MIN,
+       S2MPS_ALARM0_HOUR,
+       S2MPS_ALARM0_WEEKDAY,
+       S2MPS_ALARM0_DATE,
+       S2MPS_ALARM0_MONTH,
+       S2MPS_ALARM0_YEAR,
+       S2MPS_ALARM1_SEC,
+       S2MPS_ALARM1_MIN,
+       S2MPS_ALARM1_HOUR,
+       S2MPS_ALARM1_WEEKDAY,
+       S2MPS_ALARM1_DATE,
+       S2MPS_ALARM1_MONTH,
+       S2MPS_ALARM1_YEAR,
+       S2MPS_OFFSRC,
+
+       S2MPS_RTC_REG_MAX,
 };
 
 #define RTC_I2C_ADDR           (0x0C >> 1)
@@ -54,6 +92,9 @@ enum sec_rtc_reg {
 #define ALARM1_STATUS          (1 << 2)
 #define UPDATE_AD              (1 << 0)
 
+#define S2MPS_ALARM0_STATUS    (1 << 2)
+#define S2MPS_ALARM1_STATUS    (1 << 1)
+
 /* RTC Control Register */
 #define BCD_EN_SHIFT           0
 #define BCD_EN_MASK            (1 << BCD_EN_SHIFT)
@@ -62,6 +103,10 @@ enum sec_rtc_reg {
 /* RTC Update Register1 */
 #define RTC_UDR_SHIFT          0
 #define RTC_UDR_MASK           (1 << RTC_UDR_SHIFT)
+#define S2MPS_RTC_WUDR_SHIFT   4
+#define S2MPS_RTC_WUDR_MASK    (1 << S2MPS_RTC_WUDR_SHIFT)
+#define S2MPS_RTC_RUDR_SHIFT   0
+#define S2MPS_RTC_RUDR_MASK    (1 << S2MPS_RTC_RUDR_SHIFT)
 #define RTC_TCON_SHIFT         1
 #define RTC_TCON_MASK          (1 << RTC_TCON_SHIFT)
 #define RTC_TIME_EN_SHIFT      3
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
new file mode 100644 (file)
index 0000000..fbc63bc
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ *             http://www.samsung.com
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPA01_H
+#define __LINUX_MFD_S2MPA01_H
+
+/* S2MPA01 registers */
+enum s2mpa01_reg {
+       S2MPA01_REG_ID,
+       S2MPA01_REG_INT1,
+       S2MPA01_REG_INT2,
+       S2MPA01_REG_INT3,
+       S2MPA01_REG_INT1M,
+       S2MPA01_REG_INT2M,
+       S2MPA01_REG_INT3M,
+       S2MPA01_REG_ST1,
+       S2MPA01_REG_ST2,
+       S2MPA01_REG_PWRONSRC,
+       S2MPA01_REG_OFFSRC,
+       S2MPA01_REG_RTC_BUF,
+       S2MPA01_REG_CTRL1,
+       S2MPA01_REG_ETC_TEST,
+       S2MPA01_REG_RSVD1,
+       S2MPA01_REG_BU_CHG,
+       S2MPA01_REG_RAMP1,
+       S2MPA01_REG_RAMP2,
+       S2MPA01_REG_LDO_DSCH1,
+       S2MPA01_REG_LDO_DSCH2,
+       S2MPA01_REG_LDO_DSCH3,
+       S2MPA01_REG_LDO_DSCH4,
+       S2MPA01_REG_OTP_ADRL,
+       S2MPA01_REG_OTP_ADRH,
+       S2MPA01_REG_OTP_DATA,
+       S2MPA01_REG_MON1SEL,
+       S2MPA01_REG_MON2SEL,
+       S2MPA01_REG_LEE,
+       S2MPA01_REG_RSVD2,
+       S2MPA01_REG_RSVD3,
+       S2MPA01_REG_RSVD4,
+       S2MPA01_REG_RSVD5,
+       S2MPA01_REG_RSVD6,
+       S2MPA01_REG_TOP_RSVD,
+       S2MPA01_REG_DVS_SEL,
+       S2MPA01_REG_DVS_PTR,
+       S2MPA01_REG_DVS_DATA,
+       S2MPA01_REG_RSVD_NO,
+       S2MPA01_REG_UVLO,
+       S2MPA01_REG_LEE_NO,
+       S2MPA01_REG_B1CTRL1,
+       S2MPA01_REG_B1CTRL2,
+       S2MPA01_REG_B2CTRL1,
+       S2MPA01_REG_B2CTRL2,
+       S2MPA01_REG_B3CTRL1,
+       S2MPA01_REG_B3CTRL2,
+       S2MPA01_REG_B4CTRL1,
+       S2MPA01_REG_B4CTRL2,
+       S2MPA01_REG_B5CTRL1,
+       S2MPA01_REG_B5CTRL2,
+       S2MPA01_REG_B5CTRL3,
+       S2MPA01_REG_B5CTRL4,
+       S2MPA01_REG_B5CTRL5,
+       S2MPA01_REG_B5CTRL6,
+       S2MPA01_REG_B6CTRL1,
+       S2MPA01_REG_B6CTRL2,
+       S2MPA01_REG_B7CTRL1,
+       S2MPA01_REG_B7CTRL2,
+       S2MPA01_REG_B8CTRL1,
+       S2MPA01_REG_B8CTRL2,
+       S2MPA01_REG_B9CTRL1,
+       S2MPA01_REG_B9CTRL2,
+       S2MPA01_REG_B10CTRL1,
+       S2MPA01_REG_B10CTRL2,
+       S2MPA01_REG_L1CTRL,
+       S2MPA01_REG_L2CTRL,
+       S2MPA01_REG_L3CTRL,
+       S2MPA01_REG_L4CTRL,
+       S2MPA01_REG_L5CTRL,
+       S2MPA01_REG_L6CTRL,
+       S2MPA01_REG_L7CTRL,
+       S2MPA01_REG_L8CTRL,
+       S2MPA01_REG_L9CTRL,
+       S2MPA01_REG_L10CTRL,
+       S2MPA01_REG_L11CTRL,
+       S2MPA01_REG_L12CTRL,
+       S2MPA01_REG_L13CTRL,
+       S2MPA01_REG_L14CTRL,
+       S2MPA01_REG_L15CTRL,
+       S2MPA01_REG_L16CTRL,
+       S2MPA01_REG_L17CTRL,
+       S2MPA01_REG_L18CTRL,
+       S2MPA01_REG_L19CTRL,
+       S2MPA01_REG_L20CTRL,
+       S2MPA01_REG_L21CTRL,
+       S2MPA01_REG_L22CTRL,
+       S2MPA01_REG_L23CTRL,
+       S2MPA01_REG_L24CTRL,
+       S2MPA01_REG_L25CTRL,
+       S2MPA01_REG_L26CTRL,
+
+       S2MPA01_REG_LDO_OVCB1,
+       S2MPA01_REG_LDO_OVCB2,
+       S2MPA01_REG_LDO_OVCB3,
+       S2MPA01_REG_LDO_OVCB4,
+
+};
+
+/* S2MPA01 regulator ids */
+enum s2mpa01_regulators {
+       S2MPA01_LDO1,
+       S2MPA01_LDO2,
+       S2MPA01_LDO3,
+       S2MPA01_LDO4,
+       S2MPA01_LDO5,
+       S2MPA01_LDO6,
+       S2MPA01_LDO7,
+       S2MPA01_LDO8,
+       S2MPA01_LDO9,
+       S2MPA01_LDO10,
+       S2MPA01_LDO11,
+       S2MPA01_LDO12,
+       S2MPA01_LDO13,
+       S2MPA01_LDO14,
+       S2MPA01_LDO15,
+       S2MPA01_LDO16,
+       S2MPA01_LDO17,
+       S2MPA01_LDO18,
+       S2MPA01_LDO19,
+       S2MPA01_LDO20,
+       S2MPA01_LDO21,
+       S2MPA01_LDO22,
+       S2MPA01_LDO23,
+       S2MPA01_LDO24,
+       S2MPA01_LDO25,
+       S2MPA01_LDO26,
+
+       S2MPA01_BUCK1,
+       S2MPA01_BUCK2,
+       S2MPA01_BUCK3,
+       S2MPA01_BUCK4,
+       S2MPA01_BUCK5,
+       S2MPA01_BUCK6,
+       S2MPA01_BUCK7,
+       S2MPA01_BUCK8,
+       S2MPA01_BUCK9,
+       S2MPA01_BUCK10,
+
+       S2MPA01_REGULATOR_MAX,
+};
+
+#define S2MPA01_BUCK_MIN1      600000
+#define S2MPA01_BUCK_MIN2      800000
+#define S2MPA01_BUCK_MIN3      1000000
+#define S2MPA01_BUCK_MIN4      1500000
+#define S2MPA01_LDO_MIN                800000
+
+#define S2MPA01_BUCK_STEP1     6250
+#define S2MPA01_BUCK_STEP2     12500
+
+#define S2MPA01_LDO_STEP1      50000
+#define S2MPA01_LDO_STEP2      25000
+
+#define S2MPA01_LDO_VSEL_MASK  0x3F
+#define S2MPA01_BUCK_VSEL_MASK 0xFF
+#define S2MPA01_ENABLE_MASK    (0x03 << S2MPA01_ENABLE_SHIFT)
+#define S2MPA01_ENABLE_SHIFT   0x06
+#define S2MPA01_LDO_N_VOLTAGES (S2MPA01_LDO_VSEL_MASK + 1)
+#define S2MPA01_BUCK_N_VOLTAGES (S2MPA01_BUCK_VSEL_MASK + 1)
+
+#define S2MPA01_RAMP_DELAY     12500   /* uV/us */
+
+#define S2MPA01_BUCK16_RAMP_SHIFT      4
+#define S2MPA01_BUCK24_RAMP_SHIFT      6
+#define S2MPA01_BUCK3_RAMP_SHIFT       4
+#define S2MPA01_BUCK5_RAMP_SHIFT       6
+#define S2MPA01_BUCK7_RAMP_SHIFT       2
+#define S2MPA01_BUCK8910_RAMP_SHIFT    0
+
+#define S2MPA01_BUCK1_RAMP_EN_SHIFT    3
+#define S2MPA01_BUCK2_RAMP_EN_SHIFT    2
+#define S2MPA01_BUCK3_RAMP_EN_SHIFT    1
+#define S2MPA01_BUCK4_RAMP_EN_SHIFT    0
+#define S2MPA01_PMIC_EN_SHIFT  6
+
+#endif /*__LINUX_MFD_S2MPA01_H */
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
new file mode 100644 (file)
index 0000000..4b449b8
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * s2mps14.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ *              http://www.samsung.com
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPS14_H
+#define __LINUX_MFD_S2MPS14_H
+
+/* S2MPS14 registers */
+enum s2mps14_reg {
+       S2MPS14_REG_ID,
+       S2MPS14_REG_INT1,
+       S2MPS14_REG_INT2,
+       S2MPS14_REG_INT3,
+       S2MPS14_REG_INT1M,
+       S2MPS14_REG_INT2M,
+       S2MPS14_REG_INT3M,
+       S2MPS14_REG_ST1,
+       S2MPS14_REG_ST2,
+       S2MPS14_REG_PWRONSRC,
+       S2MPS14_REG_OFFSRC,
+       S2MPS14_REG_BU_CHG,
+       S2MPS14_REG_RTCCTRL,
+       S2MPS14_REG_CTRL1,
+       S2MPS14_REG_CTRL2,
+       S2MPS14_REG_RSVD1,
+       S2MPS14_REG_RSVD2,
+       S2MPS14_REG_RSVD3,
+       S2MPS14_REG_RSVD4,
+       S2MPS14_REG_RSVD5,
+       S2MPS14_REG_RSVD6,
+       S2MPS14_REG_CTRL3,
+       S2MPS14_REG_RSVD7,
+       S2MPS14_REG_RSVD8,
+       S2MPS14_REG_WRSTBI,
+       S2MPS14_REG_B1CTRL1,
+       S2MPS14_REG_B1CTRL2,
+       S2MPS14_REG_B2CTRL1,
+       S2MPS14_REG_B2CTRL2,
+       S2MPS14_REG_B3CTRL1,
+       S2MPS14_REG_B3CTRL2,
+       S2MPS14_REG_B4CTRL1,
+       S2MPS14_REG_B4CTRL2,
+       S2MPS14_REG_B5CTRL1,
+       S2MPS14_REG_B5CTRL2,
+       S2MPS14_REG_L1CTRL,
+       S2MPS14_REG_L2CTRL,
+       S2MPS14_REG_L3CTRL,
+       S2MPS14_REG_L4CTRL,
+       S2MPS14_REG_L5CTRL,
+       S2MPS14_REG_L6CTRL,
+       S2MPS14_REG_L7CTRL,
+       S2MPS14_REG_L8CTRL,
+       S2MPS14_REG_L9CTRL,
+       S2MPS14_REG_L10CTRL,
+       S2MPS14_REG_L11CTRL,
+       S2MPS14_REG_L12CTRL,
+       S2MPS14_REG_L13CTRL,
+       S2MPS14_REG_L14CTRL,
+       S2MPS14_REG_L15CTRL,
+       S2MPS14_REG_L16CTRL,
+       S2MPS14_REG_L17CTRL,
+       S2MPS14_REG_L18CTRL,
+       S2MPS14_REG_L19CTRL,
+       S2MPS14_REG_L20CTRL,
+       S2MPS14_REG_L21CTRL,
+       S2MPS14_REG_L22CTRL,
+       S2MPS14_REG_L23CTRL,
+       S2MPS14_REG_L24CTRL,
+       S2MPS14_REG_L25CTRL,
+       S2MPS14_REG_LDODSCH1,
+       S2MPS14_REG_LDODSCH2,
+       S2MPS14_REG_LDODSCH3,
+};
+
+/* S2MPS14 regulator ids */
+enum s2mps14_regulators {
+       S2MPS14_LDO1,
+       S2MPS14_LDO2,
+       S2MPS14_LDO3,
+       S2MPS14_LDO4,
+       S2MPS14_LDO5,
+       S2MPS14_LDO6,
+       S2MPS14_LDO7,
+       S2MPS14_LDO8,
+       S2MPS14_LDO9,
+       S2MPS14_LDO10,
+       S2MPS14_LDO11,
+       S2MPS14_LDO12,
+       S2MPS14_LDO13,
+       S2MPS14_LDO14,
+       S2MPS14_LDO15,
+       S2MPS14_LDO16,
+       S2MPS14_LDO17,
+       S2MPS14_LDO18,
+       S2MPS14_LDO19,
+       S2MPS14_LDO20,
+       S2MPS14_LDO21,
+       S2MPS14_LDO22,
+       S2MPS14_LDO23,
+       S2MPS14_LDO24,
+       S2MPS14_LDO25,
+       S2MPS14_BUCK1,
+       S2MPS14_BUCK2,
+       S2MPS14_BUCK3,
+       S2MPS14_BUCK4,
+       S2MPS14_BUCK5,
+
+       S2MPS14_REGULATOR_MAX,
+};
+
+/* Regulator constraints for BUCKx */
+#define S2MPS14_BUCK1235_MIN_600MV     600000
+#define S2MPS14_BUCK4_MIN_1400MV       1400000
+#define S2MPS14_BUCK1235_STEP_6_25MV   6250
+#define S2MPS14_BUCK4_STEP_12_5MV      12500
+#define S2MPS14_BUCK1235_START_SEL     0x20
+#define S2MPS14_BUCK4_START_SEL                0x40
+/*
+ * Default ramp delay in uv/us. Datasheet says that ramp delay can be
+ * controlled however it does not specify which register is used for that.
+ * Let's assume that default value will be set.
+ */
+#define S2MPS14_BUCK_RAMP_DELAY                12500
+
+/* Regulator constraints for different types of LDOx */
+#define S2MPS14_LDO_MIN_800MV          800000
+#define S2MPS14_LDO_MIN_1800MV         1800000
+#define S2MPS14_LDO_STEP_12_5MV                12500
+#define S2MPS14_LDO_STEP_25MV          25000
+
+#define S2MPS14_LDO_VSEL_MASK          0x3F
+#define S2MPS14_BUCK_VSEL_MASK         0xFF
+#define S2MPS14_ENABLE_MASK            (0x03 << S2MPS14_ENABLE_SHIFT)
+#define S2MPS14_ENABLE_SHIFT           6
+/* On/Off controlled by PWREN */
+#define S2MPS14_ENABLE_SUSPEND         (0x01 << S2MPS14_ENABLE_SHIFT)
+#define S2MPS14_LDO_N_VOLTAGES         (S2MPS14_LDO_VSEL_MASK + 1)
+#define S2MPS14_BUCK_N_VOLTAGES                (S2MPS14_BUCK_VSEL_MASK + 1)
+
+#endif /*  __LINUX_MFD_S2MPS14_H */
index 2ab0b0f03641334077cc16c4bbd8ec8c9159619c..243b58fec33dacd3fa053e10bf7ca2e2a9c027a3 100644 (file)
@@ -183,9 +183,16 @@ enum s5m8767_regulators {
        S5M8767_REG_MAX,
 };
 
+/* LDO_EN/BUCK_EN field in registers */
 #define S5M8767_ENCTRL_SHIFT           6
 #define S5M8767_ENCTRL_MASK            (0x3 << S5M8767_ENCTRL_SHIFT)
 
+/*
+ * LDO_EN/BUCK_EN register value for controlling this Buck or LDO
+ * by GPIO (PWREN, BUCKEN).
+ */
+#define S5M8767_ENCTRL_USE_GPIO                0x1
+
 /*
  * Values for BUCK_RAMP field in DVS_RAMP register, matching raw values
  * in mV/us.
index c1b7414c7bef7c0a2128edd30438e48974af40f3..a0df4295e1717a23463d96d4f973a4330f2349ad 100644 (file)
@@ -1487,9 +1487,15 @@ static inline void pgtable_page_dtor(struct page *page)
 
 #if USE_SPLIT_PMD_PTLOCKS
 
+static struct page *pmd_to_page(pmd_t *pmd)
+{
+       unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+       return virt_to_page((void *)((unsigned long) pmd & mask));
+}
+
 static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
 {
-       return ptlock_ptr(virt_to_page(pmd));
+       return ptlock_ptr(pmd_to_page(pmd));
 }
 
 static inline bool pgtable_pmd_page_ctor(struct page *page)
@@ -1508,7 +1514,7 @@ static inline void pgtable_pmd_page_dtor(struct page *page)
        ptlock_free(page);
 }
 
-#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
+#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
 
 #else
 
index d3181936c138ba2583960815a722aa8f90938a9d..11692dea18aa25a9410ce842cdd851a4d15560c7 100644 (file)
@@ -46,6 +46,7 @@
  * - detects multi-task circular deadlocks and prints out all affected
  *   locks and tasks (and only those tasks)
  */
+struct optimistic_spin_queue;
 struct mutex {
        /* 1: unlocked, 0: locked, negative: locked, possible waiters */
        atomic_t                count;
@@ -55,7 +56,7 @@ struct mutex {
        struct task_struct      *owner;
 #endif
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       void                    *spin_mlock;    /* Spinner MCS lock */
+       struct optimistic_spin_queue    *osq;   /* Spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_MUTEXES
        const char              *name;
@@ -179,4 +180,4 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 # define arch_mutex_cpu_relax() cpu_relax()
 #endif
 
-#endif
+#endif /* __LINUX_MUTEX_H */
index 1005ebf175752774ada359369a313379a82dc85a..5a09a48f2658a64f7149ecddd30abd56c65cf69f 100644 (file)
@@ -163,4 +163,11 @@ enum {
 /* changeable features with no special hardware requirements */
 #define NETIF_F_SOFT_FEATURES  (NETIF_F_GSO | NETIF_F_GRO)
 
+#define NETIF_F_VLAN_FEATURES  (NETIF_F_HW_VLAN_CTAG_FILTER | \
+                                NETIF_F_HW_VLAN_CTAG_RX | \
+                                NETIF_F_HW_VLAN_CTAG_TX | \
+                                NETIF_F_HW_VLAN_STAG_FILTER | \
+                                NETIF_F_HW_VLAN_STAG_RX | \
+                                NETIF_F_HW_VLAN_STAG_TX)
+
 #endif /* _LINUX_NETDEV_FEATURES_H */
index e8eeebd49a98279837bb6e30afa82f3645465452..daafd9561cbca2335c1f0a3f20cd593e30fa5206 100644 (file)
@@ -3014,7 +3014,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
 {
        return __skb_gso_segment(skb, features, true);
 }
-__be16 skb_network_protocol(struct sk_buff *skb);
+__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
 
 static inline bool can_checksum_protocol(netdev_features_t features,
                                         __be16 protocol)
index 69ae03f6eb159a39b2bd589add2c56b2c1c8f6cc..6b9aafed225fcd9a48228ac9f6044c7a956cc325 100644 (file)
@@ -87,6 +87,7 @@ struct nvme_dev {
        struct list_head namespaces;
        struct kref kref;
        struct miscdevice miscdev;
+       work_func_t reset_workfn;
        struct work_struct reset_work;
        char name[12];
        char serial[20];
index 435cb995904dedc6329916f29cd2378b5e0e7b2e..83d1ac80c91e3745a96d5fdd4e929d5e1542a525 100644 (file)
@@ -198,6 +198,8 @@ extern struct device_node *of_find_node_with_property(
 extern struct property *of_find_property(const struct device_node *np,
                                         const char *name,
                                         int *lenp);
+extern int of_property_count_elems_of_size(const struct device_node *np,
+                               const char *propname, int elem_size);
 extern int of_property_read_u32_index(const struct device_node *np,
                                       const char *propname,
                                       u32 index, u32 *out_value);
@@ -390,6 +392,12 @@ static inline struct device_node *of_find_compatible_node(
        return NULL;
 }
 
+static inline int of_property_count_elems_of_size(const struct device_node *np,
+                       const char *propname, int elem_size)
+{
+       return -ENOSYS;
+}
+
 static inline int of_property_read_u32_index(const struct device_node *np,
                        const char *propname, u32 index, u32 *out_value)
 {
@@ -535,6 +543,74 @@ static inline struct device_node *of_find_matching_node(
        return of_find_matching_node_and_match(from, matches, NULL);
 }
 
+/**
+ * of_property_count_u8_elems - Count the number of u8 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u8 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u8 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u8_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u8));
+}
+
+/**
+ * of_property_count_u16_elems - Count the number of u16 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u16 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u16 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u16_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u16));
+}
+
+/**
+ * of_property_count_u32_elems - Count the number of u32 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u32 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u32 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u32_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u32));
+}
+
+/**
+ * of_property_count_u64_elems - Count the number of u64 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u64 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u64 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u64_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u64));
+}
+
 /**
  * of_property_read_bool - Findfrom a property
  * @np:                device node from which the property value is to be read.
index 5a462c4e5009d68960525d2167728d59ee75e33b..637a608ded0b0091503db1ac4aaa04cf7155823c 100644 (file)
@@ -59,12 +59,12 @@ static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
 void acpiphp_init(void);
 void acpiphp_enumerate_slots(struct pci_bus *bus);
 void acpiphp_remove_slots(struct pci_bus *bus);
-void acpiphp_check_host_bridge(acpi_handle handle);
+void acpiphp_check_host_bridge(struct acpi_device *adev);
 #else
 static inline void acpiphp_init(void) { }
 static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
 static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
-static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
+static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
 #endif
 
 #else  /* CONFIG_ACPI */
index 97fbecdd7a401157cd290d6f4e981a8557ee4745..7399e6a3e9a00602722284d1d806a930898a450d 100644 (file)
 
 #define PCI_VENDOR_ID_INTEL            0x8086
 #define PCI_DEVICE_ID_INTEL_EESSC      0x0008
+#define PCI_DEVICE_ID_INTEL_SNB_IMC    0x0100
+#define PCI_DEVICE_ID_INTEL_IVB_IMC    0x0154
+#define PCI_DEVICE_ID_INTEL_HSW_IMC    0x0c00
 #define PCI_DEVICE_ID_INTEL_PXHD_0     0x0320
 #define PCI_DEVICE_ID_INTEL_PXHD_1     0x0321
 #define PCI_DEVICE_ID_INTEL_PXH_0      0x0329
index 8c6583a53a0608a796c543d196d52270cbe8ac4d..d915d0345fa1beb770a742e82247916c440e9543 100644 (file)
@@ -264,9 +264,9 @@ typedef struct pm_message {
  *     registers, so that it is fully operational.
  *
  * @runtime_idle: Device appears to be inactive and it might be put into a
- *     low-power state if all of the necessary conditions are satisfied.  Check
- *     these conditions and handle the device as appropriate, possibly queueing
- *     a suspend request for it.  The return value is ignored by the PM core.
+ *     low-power state if all of the necessary conditions are satisfied.
+ *     Check these conditions, and return 0 if it's appropriate to let the PM
+ *     core queue a suspend request for the device.
  *
  * Refer to Documentation/power/runtime_pm.txt for more information about the
  * role of the above callbacks in device runtime power management.
@@ -352,7 +352,7 @@ const struct dev_pm_ops name = { \
 
 /*
  * Use this for defining a set of PM operations to be used in all situations
- * (sustem suspend, hibernation or runtime PM).
+ * (system suspend, hibernation or runtime PM).
  * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
  * be different from the corresponding runtime PM callbacks, .runtime_suspend(),
  * and .runtime_resume(), because .runtime_suspend() always works on an already
@@ -379,7 +379,7 @@ const struct dev_pm_ops name = { \
  *
  * ON          No transition.
  *
- * FREEZE      System is going to hibernate, call ->prepare() and ->freeze()
+ * FREEZE      System is going to hibernate, call ->prepare() and ->freeze()
  *             for all devices.
  *
  * SUSPEND     System is going to suspend, call ->prepare() and ->suspend()
@@ -423,7 +423,7 @@ const struct dev_pm_ops name = { \
 
 #define PM_EVENT_INVALID       (-1)
 #define PM_EVENT_ON            0x0000
-#define PM_EVENT_FREEZE        0x0001
+#define PM_EVENT_FREEZE                0x0001
 #define PM_EVENT_SUSPEND       0x0002
 #define PM_EVENT_HIBERNATE     0x0004
 #define PM_EVENT_QUIESCE       0x0008
@@ -542,6 +542,8 @@ struct dev_pm_info {
        unsigned int            async_suspend:1;
        bool                    is_prepared:1;  /* Owned by the PM core */
        bool                    is_suspended:1; /* Ditto */
+       bool                    is_noirq_suspended:1;
+       bool                    is_late_suspended:1;
        bool                    ignore_children:1;
        bool                    early_init:1;   /* Owned by the PM core */
        spinlock_t              lock;
@@ -582,6 +584,7 @@ struct dev_pm_info {
        unsigned long           accounting_timestamp;
 #endif
        struct pm_subsys_data   *subsys_data;  /* Owned by the subsystem. */
+       void (*set_latency_tolerance)(struct device *, s32);
        struct dev_pm_qos       *qos;
 };
 
@@ -612,11 +615,11 @@ struct dev_pm_domain {
  * message is implicit:
  *
  * ON          Driver starts working again, responding to hardware events
- *             and software requests.  The hardware may have gone through
- *             a power-off reset, or it may have maintained state from the
- *             previous suspend() which the driver will rely on while
- *             resuming.  On most platforms, there are no restrictions on
- *             availability of resources like clocks during resume().
+ *             and software requests.  The hardware may have gone through
+ *             a power-off reset, or it may have maintained state from the
+ *             previous suspend() which the driver will rely on while
+ *             resuming.  On most platforms, there are no restrictions on
+ *             availability of resources like clocks during resume().
  *
  * Other transitions are triggered by messages sent using suspend().  All
  * these transitions quiesce the driver, so that I/O queues are inactive.
@@ -626,21 +629,21 @@ struct dev_pm_domain {
  * differ according to the message:
  *
  * SUSPEND     Quiesce, enter a low power device state appropriate for
- *             the upcoming system state (such as PCI_D3hot), and enable
- *             wakeup events as appropriate.
+ *             the upcoming system state (such as PCI_D3hot), and enable
+ *             wakeup events as appropriate.
  *
  * HIBERNATE   Enter a low power device state appropriate for the hibernation
- *             state (eg. ACPI S4) and enable wakeup events as appropriate.
+ *             state (eg. ACPI S4) and enable wakeup events as appropriate.
  *
  * FREEZE      Quiesce operations so that a consistent image can be saved;
- *             but do NOT otherwise enter a low power device state, and do
- *             NOT emit system wakeup events.
+ *             but do NOT otherwise enter a low power device state, and do
+ *             NOT emit system wakeup events.
  *
  * PRETHAW     Quiesce as if for FREEZE; additionally, prepare for restoring
- *             the system from a snapshot taken after an earlier FREEZE.
- *             Some drivers will need to reset their hardware state instead
- *             of preserving it, to ensure that it's never mistaken for the
- *             state which that earlier snapshot had set up.
+ *             the system from a snapshot taken after an earlier FREEZE.
+ *             Some drivers will need to reset their hardware state instead
+ *             of preserving it, to ensure that it's never mistaken for the
+ *             state which that earlier snapshot had set up.
  *
  * A minimally power-aware driver treats all messages as SUSPEND, fully
  * reinitializes its device during resume() -- whether or not it was reset
@@ -717,14 +720,26 @@ static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void
 {
 }
 
-#define pm_generic_prepare     NULL
-#define pm_generic_suspend     NULL
-#define pm_generic_resume      NULL
-#define pm_generic_freeze      NULL
-#define pm_generic_thaw                NULL
-#define pm_generic_restore     NULL
-#define pm_generic_poweroff    NULL
-#define pm_generic_complete    NULL
+#define pm_generic_prepare             NULL
+#define pm_generic_suspend_late                NULL
+#define pm_generic_suspend_noirq       NULL
+#define pm_generic_suspend             NULL
+#define pm_generic_resume_early                NULL
+#define pm_generic_resume_noirq                NULL
+#define pm_generic_resume              NULL
+#define pm_generic_freeze_noirq                NULL
+#define pm_generic_freeze_late         NULL
+#define pm_generic_freeze              NULL
+#define pm_generic_thaw_noirq          NULL
+#define pm_generic_thaw_early          NULL
+#define pm_generic_thaw                        NULL
+#define pm_generic_restore_noirq       NULL
+#define pm_generic_restore_early       NULL
+#define pm_generic_restore             NULL
+#define pm_generic_poweroff_noirq      NULL
+#define pm_generic_poweroff_late       NULL
+#define pm_generic_poweroff            NULL
+#define pm_generic_complete            NULL
 #endif /* !CONFIG_PM_SLEEP */
 
 /* How to reorder dpm_list after device_move() */
index 5a95013905c8c9ab96fde04c3dc8a914b78119da..9ab4bf7c464660821b976afad421d4b86d5811ac 100644 (file)
@@ -32,7 +32,10 @@ enum pm_qos_flags_status {
 #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
 #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE        0
-#define PM_QOS_DEV_LAT_DEFAULT_VALUE           0
+#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE    0
+#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
+#define PM_QOS_LATENCY_ANY                     ((s32)(~(__u32)0 >> 1))
 
 #define PM_QOS_FLAG_NO_POWER_OFF       (1 << 0)
 #define PM_QOS_FLAG_REMOTE_WAKEUP      (1 << 1)
@@ -49,7 +52,8 @@ struct pm_qos_flags_request {
 };
 
 enum dev_pm_qos_req_type {
-       DEV_PM_QOS_LATENCY = 1,
+       DEV_PM_QOS_RESUME_LATENCY = 1,
+       DEV_PM_QOS_LATENCY_TOLERANCE,
        DEV_PM_QOS_FLAGS,
 };
 
@@ -77,6 +81,7 @@ struct pm_qos_constraints {
        struct plist_head list;
        s32 target_value;       /* Do not change to 64 bit */
        s32 default_value;
+       s32 no_constraint_value;
        enum pm_qos_type type;
        struct blocking_notifier_head *notifiers;
 };
@@ -87,9 +92,11 @@ struct pm_qos_flags {
 };
 
 struct dev_pm_qos {
-       struct pm_qos_constraints latency;
+       struct pm_qos_constraints resume_latency;
+       struct pm_qos_constraints latency_tolerance;
        struct pm_qos_flags flags;
-       struct dev_pm_qos_request *latency_req;
+       struct dev_pm_qos_request *resume_latency_req;
+       struct dev_pm_qos_request *latency_tolerance_req;
        struct dev_pm_qos_request *flags_req;
 };
 
@@ -142,7 +149,8 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
 void dev_pm_qos_constraints_init(struct device *dev);
 void dev_pm_qos_constraints_destroy(struct device *dev);
 int dev_pm_qos_add_ancestor_request(struct device *dev,
-                                   struct dev_pm_qos_request *req, s32 value);
+                                   struct dev_pm_qos_request *req,
+                                   enum dev_pm_qos_req_type type, s32 value);
 #else
 static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
                                                          s32 mask)
@@ -185,7 +193,9 @@ static inline void dev_pm_qos_constraints_destroy(struct device *dev)
        dev->power.power_state = PMSG_INVALID;
 }
 static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
-                                   struct dev_pm_qos_request *req, s32 value)
+                                                 struct dev_pm_qos_request *req,
+                                                 enum dev_pm_qos_req_type type,
+                                                 s32 value)
                        { return 0; }
 #endif
 
@@ -195,10 +205,12 @@ void dev_pm_qos_hide_latency_limit(struct device *dev);
 int dev_pm_qos_expose_flags(struct device *dev, s32 value);
 void dev_pm_qos_hide_flags(struct device *dev);
 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
 
-static inline s32 dev_pm_qos_requested_latency(struct device *dev)
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
 {
-       return dev->power.qos->latency_req->data.pnode.prio;
+       return dev->power.qos->resume_latency_req->data.pnode.prio;
 }
 
 static inline s32 dev_pm_qos_requested_flags(struct device *dev)
@@ -214,8 +226,12 @@ static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
 static inline void dev_pm_qos_hide_flags(struct device *dev) {}
 static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
                        { return 0; }
+static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+                       { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
+static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+                       { return 0; }
 
-static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
 static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
 #endif
 
index dbaf9908411217c57a257309c5949e8d4b76ff5b..8183b46fbaa2d6da9817ead257735396a6ec7b0c 100644 (file)
@@ -247,9 +247,10 @@ static inline void list_splice_init_rcu(struct list_head *list,
  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  */
 #define list_entry_rcu(ptr, type, member) \
-       ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
-        container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
-       })
+({ \
+       typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
+       container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
+})
 
 /**
  * Where are list_empty_rcu() and list_first_entry_rcu()?
@@ -285,11 +286,11 @@ static inline void list_splice_init_rcu(struct list_head *list,
  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
  */
 #define list_first_or_null_rcu(ptr, type, member) \
-       ({struct list_head *__ptr = (ptr); \
-         struct list_head *__next = ACCESS_ONCE(__ptr->next); \
-         likely(__ptr != __next) ? \
-               list_entry_rcu(__next, type, member) : NULL; \
-       })
+({ \
+       struct list_head *__ptr = (ptr); \
+       struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+       likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
+})
 
 /**
  * list_for_each_entry_rcu     -       iterate over rcu list of given type
index 72bf3a01a4ee67ac8908212c3de3897d383a2161..00a7fd61b3c6540521109c4c79e412a0833bef02 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2001
  *
@@ -44,7 +44,9 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
+#include <asm/barrier.h>
 
+extern int rcu_expedited; /* for sysctl */
 #ifdef CONFIG_RCU_TORTURE_TEST
 extern int rcutorture_runnable; /* for sysctl */
 #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
@@ -314,7 +316,7 @@ static inline bool rcu_lockdep_current_cpu_online(void)
 
 static inline void rcu_lock_acquire(struct lockdep_map *map)
 {
-       lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
+       lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
 }
 
 static inline void rcu_lock_release(struct lockdep_map *map)
@@ -479,11 +481,9 @@ static inline void rcu_preempt_sleep_check(void)
        do {                                                            \
                rcu_preempt_sleep_check();                              \
                rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),     \
-                                  "Illegal context switch in RCU-bh"   \
-                                  " read-side critical section");      \
+                                  "Illegal context switch in RCU-bh read-side critical section"); \
                rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),  \
-                                  "Illegal context switch in RCU-sched"\
-                                  " read-side critical section");      \
+                                  "Illegal context switch in RCU-sched read-side critical section"); \
        } while (0)
 
 #else /* #ifdef CONFIG_PROVE_RCU */
@@ -510,43 +510,40 @@ static inline void rcu_preempt_sleep_check(void)
 #endif /* #else #ifdef __CHECKER__ */
 
 #define __rcu_access_pointer(p, space) \
-       ({ \
-               typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
-               rcu_dereference_sparse(p, space); \
-               ((typeof(*p) __force __kernel *)(_________p1)); \
-       })
+({ \
+       typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+       rcu_dereference_sparse(p, space); \
+       ((typeof(*p) __force __kernel *)(_________p1)); \
+})
 #define __rcu_dereference_check(p, c, space) \
-       ({ \
-               typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
-               rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
-                                     " usage"); \
-               rcu_dereference_sparse(p, space); \
-               smp_read_barrier_depends(); \
-               ((typeof(*p) __force __kernel *)(_________p1)); \
-       })
+({ \
+       typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+       rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+       rcu_dereference_sparse(p, space); \
+       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+       ((typeof(*p) __force __kernel *)(_________p1)); \
+})
 #define __rcu_dereference_protected(p, c, space) \
-       ({ \
-               rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
-                                     " usage"); \
-               rcu_dereference_sparse(p, space); \
-               ((typeof(*p) __force __kernel *)(p)); \
-       })
+({ \
+       rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+       rcu_dereference_sparse(p, space); \
+       ((typeof(*p) __force __kernel *)(p)); \
+})
 
 #define __rcu_access_index(p, space) \
-       ({ \
-               typeof(p) _________p1 = ACCESS_ONCE(p); \
-               rcu_dereference_sparse(p, space); \
-               (_________p1); \
-       })
+({ \
+       typeof(p) _________p1 = ACCESS_ONCE(p); \
+       rcu_dereference_sparse(p, space); \
+       (_________p1); \
+})
 #define __rcu_dereference_index_check(p, c) \
-       ({ \
-               typeof(p) _________p1 = ACCESS_ONCE(p); \
-               rcu_lockdep_assert(c, \
-                                  "suspicious rcu_dereference_index_check()" \
-                                  " usage"); \
-               smp_read_barrier_depends(); \
-               (_________p1); \
-       })
+({ \
+       typeof(p) _________p1 = ACCESS_ONCE(p); \
+       rcu_lockdep_assert(c, \
+                          "suspicious rcu_dereference_index_check() usage"); \
+       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+       (_________p1); \
+})
 
 /**
  * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
@@ -585,12 +582,7 @@ static inline void rcu_preempt_sleep_check(void)
  * please be careful when making changes to rcu_assign_pointer() and the
  * other macros that it invokes.
  */
-#define rcu_assign_pointer(p, v) \
-       do { \
-               smp_wmb(); \
-               ACCESS_ONCE(p) = RCU_INITIALIZER(v); \
-       } while (0)
-
+#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
 
 /**
  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
@@ -1015,11 +1007,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 #define kfree_rcu(ptr, rcu_head)                                       \
        __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 
-#ifdef CONFIG_RCU_NOCB_CPU
+#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
+static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
+{
+       *delta_jiffies = ULONG_MAX;
+       return 0;
+}
+#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+
+#if defined(CONFIG_RCU_NOCB_CPU_ALL)
+static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
+#elif defined(CONFIG_RCU_NOCB_CPU)
 bool rcu_is_nocb_cpu(int cpu);
 #else
 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
-#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+#endif
 
 
 /* Only for use by adaptive-ticks code. */
index 6f01771b571c00eede5aaae145fdcf7382e61cd9..425c659d54e576a1991aa9aa2d777ee001d66546 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2008
  *
 
 #include <linux/cache.h>
 
+static inline unsigned long get_state_synchronize_rcu(void)
+{
+       return 0;
+}
+
+static inline void cond_synchronize_rcu(unsigned long oldstate)
+{
+       might_sleep();
+}
+
 static inline void rcu_barrier_bh(void)
 {
        wait_rcu_gp(call_rcu_bh);
@@ -68,12 +78,6 @@ static inline void kfree_call_rcu(struct rcu_head *head,
        call_rcu(head, func);
 }
 
-static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
-{
-       *delta_jiffies = ULONG_MAX;
-       return 0;
-}
-
 static inline void rcu_note_context_switch(int cpu)
 {
        rcu_sched_qs(cpu);
index 72137ee8c603b2c0c45ea7f729211e12cfad3941..a59ca05fd4e36eca1de98e849f6e8b9855b37c82 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2008
  *
@@ -31,7 +31,9 @@
 #define __LINUX_RCUTREE_H
 
 void rcu_note_context_switch(int cpu);
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 void rcu_cpu_stall_reset(void);
 
 /*
@@ -74,6 +76,8 @@ static inline void synchronize_rcu_bh_expedited(void)
 void rcu_barrier(void);
 void rcu_barrier_bh(void);
 void rcu_barrier_sched(void);
+unsigned long get_state_synchronize_rcu(void);
+void cond_synchronize_rcu(unsigned long oldstate);
 
 extern unsigned long rcutorture_testseq;
 extern unsigned long rcutorture_vernum;
index 4149f1a9b00320dfeea8768817b2adbd2d1a4da2..5ad86eacef0d4c17bae725f9a0eb1ebf09cd48a6 100644 (file)
@@ -164,6 +164,9 @@ typedef void (*regmap_unlock)(void *);
  * @use_single_rw: If set, converts the bulk read and write operations into
  *                 a series of single read and write operations. This is useful
  *                 for device that does not support bulk read and write.
+ * @can_multi_write: If set, the device supports the multi write mode of bulk
+ *                   write operations, if clear multi write requests will be
+ *                   split into individual write operations
  *
  * @cache_type: The actual cache type.
  * @reg_defaults_raw: Power on reset values for registers (for use with
@@ -215,6 +218,7 @@ struct regmap_config {
        u8 write_flag_mask;
 
        bool use_single_rw;
+       bool can_multi_write;
 
        enum regmap_endian reg_format_endian;
        enum regmap_endian val_format_endian;
@@ -317,6 +321,8 @@ struct regmap *regmap_init(struct device *dev,
                           const struct regmap_bus *bus,
                           void *bus_context,
                           const struct regmap_config *config);
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+                                const struct regmap_config *config);
 struct regmap *regmap_init_i2c(struct i2c_client *i2c,
                               const struct regmap_config *config);
 struct regmap *regmap_init_spi(struct spi_device *dev,
@@ -386,8 +392,11 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
                     const void *val, size_t val_len);
 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
                        size_t val_count);
-int regmap_multi_reg_write(struct regmap *map, struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
                        int num_regs);
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+                                   const struct reg_default *regs,
+                                   int num_regs);
 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
                           const void *val, size_t val_len);
 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
@@ -423,6 +432,8 @@ bool regmap_check_range_table(struct regmap *map, unsigned int reg,
 
 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
                          int num_regs);
+int regmap_parse_val(struct regmap *map, const void *buf,
+                               unsigned int *val);
 
 static inline bool regmap_reg_in_range(unsigned int reg,
                                       const struct regmap_range *range)
@@ -695,6 +706,13 @@ static inline int regmap_register_patch(struct regmap *map,
        return -EINVAL;
 }
 
+static inline int regmap_parse_val(struct regmap *map, const void *buf,
+                               unsigned int *val)
+{
+       WARN_ONCE(1, "regmap API is disabled");
+       return -EINVAL;
+}
+
 static inline struct regmap *dev_get_regmap(struct device *dev,
                                            const char *name)
 {
index 9370e65348a40ba5a92620fbfe4cfed4305a3963..bbe03a1924c04182be79d128854557ebb262b6e3 100644 (file)
@@ -228,10 +228,14 @@ enum regulator_type {
  *                output when using regulator_set_voltage_sel_regmap
  * @enable_reg: Register for control when using regmap enable/disable ops
  * @enable_mask: Mask for control when using regmap enable/disable ops
+ * @enable_val: Enabling value for control when using regmap enable/disable ops
+ * @disable_val: Disabling value for control when using regmap enable/disable ops
  * @enable_is_inverted: A flag to indicate set enable_mask bits to disable
  *                      when using regulator_enable_regmap and friends APIs.
  * @bypass_reg: Register for control when using regmap set_bypass
  * @bypass_mask: Mask for control when using regmap set_bypass
+ * @bypass_val_on: Enabling value for control when using regmap set_bypass
+ * @bypass_val_off: Disabling value for control when using regmap set_bypass
  *
  * @enable_time: Time taken for initial enable of regulator (in uS).
  */
@@ -263,9 +267,13 @@ struct regulator_desc {
        unsigned int apply_bit;
        unsigned int enable_reg;
        unsigned int enable_mask;
+       unsigned int enable_val;
+       unsigned int disable_val;
        bool enable_is_inverted;
        unsigned int bypass_reg;
        unsigned int bypass_mask;
+       unsigned int bypass_val_on;
+       unsigned int bypass_val_off;
 
        unsigned int enable_time;
 };
index 65d550bf395474f8427f8d67a2eaa1bd2db20945..364f7a7c43db3db23e67805983e90b3dd0187837 100644 (file)
 #define PFUZE100_VGEN6         14
 #define PFUZE100_MAX_REGULATOR 15
 
+#define PFUZE200_SW1AB         0
+#define PFUZE200_SW2           1
+#define PFUZE200_SW3A          2
+#define PFUZE200_SW3B          3
+#define PFUZE200_SWBST         4
+#define PFUZE200_VSNVS         5
+#define PFUZE200_VREFDDR       6
+#define PFUZE200_VGEN1         7
+#define PFUZE200_VGEN2         8
+#define PFUZE200_VGEN3         9
+#define PFUZE200_VGEN4         10
+#define PFUZE200_VGEN5         11
+#define PFUZE200_VGEN6         12
+
 struct regulator_init_data;
 
 struct pfuze_regulator_platform_data {
index a781dec1cd0b58d219427fdc71b37574ac972ec8..7cb07fd266808835ca8d4309891f19567cbe7503 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <uapi/linux/sched.h>
 
+#include <linux/sched/prio.h>
+
 
 struct sched_param {
        int sched_priority;
@@ -27,7 +29,7 @@ struct sched_param {
 
 #include <asm/page.h>
 #include <asm/ptrace.h>
-#include <asm/cputime.h>
+#include <linux/cputime.h>
 
 #include <linux/smp.h>
 #include <linux/sem.h>
@@ -292,10 +294,14 @@ extern int runqueue_is_locked(int cpu);
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 extern void nohz_balance_enter_idle(int cpu);
 extern void set_cpu_sd_state_idle(void);
-extern int get_nohz_timer_target(void);
+extern int get_nohz_timer_target(int pinned);
 #else
 static inline void nohz_balance_enter_idle(int cpu) { }
 static inline void set_cpu_sd_state_idle(void) { }
+static inline int get_nohz_timer_target(int pinned)
+{
+       return smp_processor_id();
+}
 #endif
 
 /*
@@ -1077,6 +1083,7 @@ struct sched_entity {
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
+       int                     depth;
        struct sched_entity     *parent;
        /* rq on which this entity is (to be) queued: */
        struct cfs_rq           *cfs_rq;
@@ -1460,6 +1467,9 @@ struct task_struct {
        struct mutex perf_event_mutex;
        struct list_head perf_event_list;
 #endif
+#ifdef CONFIG_DEBUG_PREEMPT
+       unsigned long preempt_disable_ip;
+#endif
 #ifdef CONFIG_NUMA
        struct mempolicy *mempolicy;    /* Protected by alloc_lock */
        short il_next;
@@ -1470,9 +1480,10 @@ struct task_struct {
        unsigned int numa_scan_period;
        unsigned int numa_scan_period_max;
        int numa_preferred_nid;
-       int numa_migrate_deferred;
        unsigned long numa_migrate_retry;
        u64 node_stamp;                 /* migration stamp  */
+       u64 last_task_numa_placement;
+       u64 last_sum_exec_runtime;
        struct callback_head numa_work;
 
        struct list_head numa_entry;
@@ -1483,15 +1494,22 @@ struct task_struct {
         * Scheduling placement decisions are made based on the these counts.
         * The values remain static for the duration of a PTE scan
         */
-       unsigned long *numa_faults;
+       unsigned long *numa_faults_memory;
        unsigned long total_numa_faults;
 
        /*
         * numa_faults_buffer records faults per node during the current
-        * scan window. When the scan completes, the counts in numa_faults
-        * decay and these values are copied.
+        * scan window. When the scan completes, the counts in
+        * numa_faults_memory decay and these values are copied.
         */
-       unsigned long *numa_faults_buffer;
+       unsigned long *numa_faults_buffer_memory;
+
+       /*
+        * Track the nodes the process was running on when a NUMA hinting
+        * fault was incurred.
+        */
+       unsigned long *numa_faults_cpu;
+       unsigned long *numa_faults_buffer_cpu;
 
        /*
         * numa_faults_locality tracks if faults recorded during the last
@@ -1596,8 +1614,8 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags);
 extern pid_t task_numa_group_id(struct task_struct *p);
 extern void set_numabalancing_state(bool enabled);
 extern void task_numa_free(struct task_struct *p);
-
-extern unsigned int sysctl_numa_balancing_migrate_deferred;
+extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
+                                       int src_nid, int dst_cpu);
 #else
 static inline void task_numa_fault(int last_node, int node, int pages,
                                   int flags)
@@ -1613,6 +1631,11 @@ static inline void set_numabalancing_state(bool enabled)
 static inline void task_numa_free(struct task_struct *p)
 {
 }
+static inline bool should_numa_migrate_memory(struct task_struct *p,
+                               struct page *page, int src_nid, int dst_cpu)
+{
+       return true;
+}
 #endif
 
 static inline struct pid *task_pid(struct task_struct *task)
@@ -2080,7 +2103,16 @@ static inline void sched_autogroup_exit(struct signal_struct *sig) { }
 extern bool yield_to(struct task_struct *p, bool preempt);
 extern void set_user_nice(struct task_struct *p, long nice);
 extern int task_prio(const struct task_struct *p);
-extern int task_nice(const struct task_struct *p);
+/**
+ * task_nice - return the nice value of a given task.
+ * @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
+ */
+static inline int task_nice(const struct task_struct *p)
+{
+       return PRIO_TO_NICE((p)->static_prio);
+}
 extern int can_nice(const struct task_struct *p, const int nice);
 extern int task_curr(const struct task_struct *p);
 extern int idle_cpu(int cpu);
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
new file mode 100644 (file)
index 0000000..ac32258
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef _SCHED_PRIO_H
+#define _SCHED_PRIO_H
+
+#define MAX_NICE       19
+#define MIN_NICE       -20
+#define NICE_WIDTH     (MAX_NICE - MIN_NICE + 1)
+
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space.  This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO       100
+#define MAX_RT_PRIO            MAX_USER_RT_PRIO
+
+#define MAX_PRIO               (MAX_RT_PRIO + NICE_WIDTH)
+#define DEFAULT_PRIO           (MAX_RT_PRIO + NICE_WIDTH / 2)
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice)     ((nice) + DEFAULT_PRIO)
+#define PRIO_TO_NICE(prio)     ((prio) - DEFAULT_PRIO)
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p)           ((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p)      USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO          (USER_PRIO(MAX_PRIO))
+
+#endif /* _SCHED_PRIO_H */
index 34e4ebea8fce79efa8f1528747db18b7a11f75b4..6341f5be6e2474c0a7e30fdd75e3eae5b4286bf3 100644 (file)
@@ -1,24 +1,7 @@
 #ifndef _SCHED_RT_H
 #define _SCHED_RT_H
 
-/*
- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
- * values are inverted: lower p->prio value means higher priority.
- *
- * The MAX_USER_RT_PRIO value allows the actual maximum
- * RT priority to be separate from the value exported to
- * user-space.  This allows kernel threads to set their
- * priority to a value higher than any user task. Note:
- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
- */
-
-#define MAX_USER_RT_PRIO       100
-#define MAX_RT_PRIO            MAX_USER_RT_PRIO
-
-#define MAX_PRIO               (MAX_RT_PRIO + 40)
-#define DEFAULT_PRIO           (MAX_RT_PRIO + 20)
+#include <linux/sched/prio.h>
 
 static inline int rt_prio(int prio)
 {
@@ -35,6 +18,7 @@ static inline int rt_task(struct task_struct *p)
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
 extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
 extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
 extern void rt_mutex_adjust_pi(struct task_struct *p);
 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
@@ -46,6 +30,12 @@ static inline int rt_mutex_getprio(struct task_struct *p)
 {
        return p->normal_prio;
 }
+
+static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
+{
+       return 0;
+}
+
 static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
 {
        return NULL;
index 5e1e6f2d98c2ae5b45b8a3ec5565eb793e840a68..15ede6a823a6e2daa366551cd7bad13ac3dd5c03 100644 (file)
@@ -2451,8 +2451,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
                    unsigned int flags);
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
-void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
-                 int len, int hlen);
+int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
+                int len, int hlen);
 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
 void skb_scrub_packet(struct sk_buff *skb, bool xnet);
index 9b058eecd40390b914d705809a1aabac3eff132b..a2783cb5d2753f6c6eb04e48086f461abedb8b6f 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright (C) IBM Corporation, 2006
  * Copyright (C) Fujitsu, 2012
index a747a77ea584aafb124ae230862360c8dd0b6073..1e67b7a5968c77c2f9290e48801971adc9c0f3fc 100644 (file)
@@ -98,6 +98,8 @@ struct sigaltstack;
 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
 
 #define __SC_DECL(t, a)        t a
+#define __TYPE_IS_L(t) (__same_type((t)0, 0L))
+#define __TYPE_IS_UL(t)        (__same_type((t)0, 0UL))
 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
 #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
 #define __SC_CAST(t, a)        (t) a
diff --git a/include/linux/torture.h b/include/linux/torture.h
new file mode 100644 (file)
index 0000000..b2e2b46
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Common functions for in-kernel torture tests.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2014
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __LINUX_TORTURE_H
+#define __LINUX_TORTURE_H
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+#include <linux/lockdep.h>
+#include <linux/completion.h>
+#include <linux/debugobjects.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+
+/* Definitions for a non-string torture-test module parameter. */
+#define torture_param(type, name, init, msg) \
+       static type name = init; \
+       module_param(name, type, 0444); \
+       MODULE_PARM_DESC(name, msg);
+
+#define TORTURE_FLAG "-torture:"
+#define TOROUT_STRING(s) \
+       pr_alert("%s" TORTURE_FLAG s "\n", torture_type)
+#define VERBOSE_TOROUT_STRING(s) \
+       do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
+#define VERBOSE_TOROUT_ERRSTRING(s) \
+       do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
+
+/* Definitions for a non-string torture-test module parameter. */
+#define torture_parm(type, name, init, msg) \
+       static type name = init; \
+       module_param(name, type, 0444); \
+       MODULE_PARM_DESC(name, msg);
+
+/* Definitions for online/offline exerciser. */
+int torture_onoff_init(long ooholdoff, long oointerval);
+char *torture_onoff_stats(char *page);
+bool torture_onoff_failures(void);
+
+/* Low-rider random number generator. */
+struct torture_random_state {
+       unsigned long trs_state;
+       long trs_count;
+};
+#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+unsigned long torture_random(struct torture_random_state *trsp);
+
+/* Task shuffler, which causes CPUs to occasionally go idle. */
+void torture_shuffle_task_register(struct task_struct *tp);
+int torture_shuffle_init(long shuffint);
+
+/* Test auto-shutdown handling. */
+void torture_shutdown_absorb(const char *title);
+int torture_shutdown_init(int ssecs, void (*cleanup)(void));
+
+/* Task stuttering, which forces load/no-load transitions. */
+void stutter_wait(const char *title);
+int torture_stutter_init(int s);
+
+/* Initialization and cleanup. */
+void torture_init_begin(char *ttype, bool v, int *runnable);
+void torture_init_end(void);
+bool torture_cleanup(void);
+bool torture_must_stop(void);
+bool torture_must_stop_irq(void);
+void torture_kthread_stopping(char *title);
+int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+                            char *f, struct task_struct **tp);
+void _torture_stop_kthread(char *m, struct task_struct **tp);
+
+#define torture_create_kthread(n, arg, tp) \
+       _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+                               "Failed to create " #n, &(tp))
+#define torture_stop_kthread(n, tp) \
+       _torture_stop_kthread("Stopping " #n " task", &(tp))
+
+#endif /* __LINUX_TORTURE_H */
index e303eef94dd5cea92d16d7ae63aa35f5c3f1d4bb..0662e98fef72b21fdabb7d14ac1fd71f2066a140 100644 (file)
@@ -30,7 +30,7 @@ struct usbnet {
        struct driver_info      *driver_info;
        const char              *driver_name;
        void                    *driver_priv;
-       wait_queue_head_t       *wait;
+       wait_queue_head_t       wait;
        struct mutex            phy_mutex;
        unsigned char           suspend_count;
        unsigned char           pkt_cnt, pkt_err;
diff --git a/include/linux/video_output.h b/include/linux/video_output.h
deleted file mode 100644 (file)
index ed5cdeb..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- *
- *  Copyright (C) 2006 Luming Yu <luming.yu@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#ifndef _LINUX_VIDEO_OUTPUT_H
-#define _LINUX_VIDEO_OUTPUT_H
-#include <linux/device.h>
-#include <linux/err.h>
-struct output_device;
-struct output_properties {
-       int (*set_state)(struct output_device *);
-       int (*get_status)(struct output_device *);
-};
-struct output_device {
-       int request_state;
-       struct output_properties *props;
-       struct device dev;
-};
-#define to_output_device(obj) container_of(obj, struct output_device, dev)
-#if    defined(CONFIG_VIDEO_OUTPUT_CONTROL) || defined(CONFIG_VIDEO_OUTPUT_CONTROL_MODULE)
-struct output_device *video_output_register(const char *name,
-       struct device *dev,
-       void *devdata,
-       struct output_properties *op);
-void video_output_unregister(struct output_device *dev);
-#else
-static struct output_device *video_output_register(const char *name,
-        struct device *dev,
-        void *devdata,
-        struct output_properties *op)
-{
-       return ERR_PTR(-ENODEV);
-}
-static void video_output_unregister(struct output_device *dev)
-{
-       return;
-}
-#endif
-#endif
index 704f4f652d0af8b28406154678ee38b5f98298bf..1b22c42e9c2d4f03bb2e16b32fbeb0c397b3e034 100644 (file)
@@ -177,20 +177,10 @@ struct execute_work {
 #define DECLARE_DEFERRABLE_WORK(n, f)                                  \
        struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
 
-/*
- * initialize a work item's function pointer
- */
-#define PREPARE_WORK(_work, _func)                                     \
-       do {                                                            \
-               (_work)->func = (_func);                                \
-       } while (0)
-
-#define PREPARE_DELAYED_WORK(_work, _func)                             \
-       PREPARE_WORK(&(_work)->work, (_func))
-
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 extern void __init_work(struct work_struct *work, int onstack);
 extern void destroy_work_on_stack(struct work_struct *work);
+extern void destroy_delayed_work_on_stack(struct delayed_work *work);
 static inline unsigned int work_static(struct work_struct *work)
 {
        return *work_data_bits(work) & WORK_STRUCT_STATIC;
@@ -198,6 +188,7 @@ static inline unsigned int work_static(struct work_struct *work)
 #else
 static inline void __init_work(struct work_struct *work, int onstack) { }
 static inline void destroy_work_on_stack(struct work_struct *work) { }
+static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
 static inline unsigned int work_static(struct work_struct *work) { return 0; }
 #endif
 
@@ -217,7 +208,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
                lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
                INIT_LIST_HEAD(&(_work)->entry);                        \
-               PREPARE_WORK((_work), (_func));                         \
+               (_work)->func = (_func);                                \
        } while (0)
 #else
 #define __INIT_WORK(_work, _func, _onstack)                            \
@@ -225,7 +216,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
                __init_work((_work), _onstack);                         \
                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
                INIT_LIST_HEAD(&(_work)->entry);                        \
-               PREPARE_WORK((_work), (_func));                         \
+               (_work)->func = (_func);                                \
        } while (0)
 #endif
 
@@ -295,17 +286,11 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
  * Documentation/workqueue.txt.
  */
 enum {
-       /*
-        * All wqs are now non-reentrant making the following flag
-        * meaningless.  Will be removed.
-        */
-       WQ_NON_REENTRANT        = 1 << 0, /* DEPRECATED */
-
        WQ_UNBOUND              = 1 << 1, /* not bound to any cpu */
        WQ_FREEZABLE            = 1 << 2, /* freeze during suspend */
        WQ_MEM_RECLAIM          = 1 << 3, /* may be used for memory reclaim */
        WQ_HIGHPRI              = 1 << 4, /* high priority */
-       WQ_CPU_INTENSIVE        = 1 << 5, /* cpu instensive workqueue */
+       WQ_CPU_INTENSIVE        = 1 << 5, /* cpu intensive workqueue */
        WQ_SYSFS                = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
 
        /*
@@ -602,21 +587,6 @@ static inline bool keventd_up(void)
        return system_wq != NULL;
 }
 
-/*
- * Like above, but uses del_timer() instead of del_timer_sync(). This means,
- * if it returns 0 the timer function may be running and the queueing is in
- * progress.
- */
-static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
-{
-       bool ret;
-
-       ret = del_timer(&work->timer);
-       if (ret)
-               work_clear_pending(&work->work);
-       return ret;
-}
-
 /* used to be different but now identical to flush_work(), deprecated */
 static inline bool __deprecated flush_work_sync(struct work_struct *work)
 {
index 9650a3ffd2d2328f11859adc4a3169ae41b25442..b4956a5fcc3f117e9e4199a5ad7af94894fea859 100644 (file)
 #define IF_PREFIX_AUTOCONF     0x02
 
 enum {
+       INET6_IFADDR_STATE_PREDAD,
        INET6_IFADDR_STATE_DAD,
        INET6_IFADDR_STATE_POSTDAD,
+       INET6_IFADDR_STATE_ERRDAD,
        INET6_IFADDR_STATE_UP,
        INET6_IFADDR_STATE_DEAD,
 };
@@ -58,7 +60,7 @@ struct inet6_ifaddr {
        unsigned long           cstamp; /* created timestamp */
        unsigned long           tstamp; /* updated timestamp */
 
-       struct timer_list       dad_timer;
+       struct delayed_work     dad_work;
 
        struct inet6_dev        *idev;
        struct rt6_info         *rt;
index f843dd8722a97eb9ca7106c6e09f8dd43a6e903c..ef7872c20da9e5f2d16c5a950b0abd0d9e9e53b2 100644 (file)
@@ -172,7 +172,6 @@ struct sata_device {
         enum   ata_command_set command_set;
         struct smp_resp        rps_resp; /* report_phy_sata_resp */
         u8     port_no;        /* port number, if this is a PM (Port) */
-       int    pm_result;
 
        struct ata_port *ap;
        struct ata_host ata_host;
index e5bf9a76f169681c356a1d14236d5e71d14bdb96..9a7e08d6125814406a7e2f5198e8abdaa8395877 100644 (file)
@@ -407,8 +407,8 @@ DECLARE_EVENT_CLASS(dev_pm_qos_request,
        TP_printk("device=%s type=%s new_value=%d",
                  __get_str(name),
                  __print_symbolic(__entry->type,
-                       { DEV_PM_QOS_LATENCY,   "DEV_PM_QOS_LATENCY" },
-                       { DEV_PM_QOS_FLAGS,     "DEV_PM_QOS_FLAGS" }),
+                       { DEV_PM_QOS_RESUME_LATENCY, "DEV_PM_QOS_RESUME_LATENCY" },
+                       { DEV_PM_QOS_FLAGS, "DEV_PM_QOS_FLAGS" }),
                  __entry->new_value)
 );
 
index dde8041f40d2e8a29a25a6205dd38577f2b46bce..6db66783d268d9a286a86b836c9277bd89f5ac13 100644 (file)
@@ -191,6 +191,7 @@ __SYSCALL(__NR_quotactl, sys_quotactl)
 
 /* fs/readdir.c */
 #define __NR_getdents64 61
+#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
 __SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64)
 
 /* fs/read_write.c */
index 009a797dd24272afbe761c33f237287c9420be33..d56cb03c1b491036c981582568d780303d3ca250 100644 (file)
@@ -1387,6 +1387,13 @@ config FUTEX
          support for "fast userspace mutexes".  The resulting kernel may not
          run glibc-based applications correctly.
 
+config HAVE_FUTEX_CMPXCHG
+       bool
+       help
+         Architectures should select this if futex_atomic_cmpxchg_inatomic()
+         is implemented and always working. This removes a couple of runtime
+         checks.
+
 config EPOLL
        bool "Enable eventpoll support" if EXPERT
        default y
index f486b0096a67f32fe453cca13b40ad6268a76d45..98b9016cab6c84e4e86151458a5e9b8063ffc6f2 100644 (file)
@@ -430,9 +430,9 @@ COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp,
 }
 
 COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp,
-                      compat_ssize_t, msgsz, long, msgtyp, int, msgflg)
+                      compat_ssize_t, msgsz, compat_long_t, msgtyp, int, msgflg)
 {
-       return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, msgtyp,
+       return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp,
                         msgflg, compat_do_msg_fill);
 }
 
@@ -498,7 +498,7 @@ static inline int put_compat_msqid_ds(struct msqid64_ds *m,
        return err;
 }
 
-long compat_sys_msgctl(int first, int second, void __user *uptr)
+COMPAT_SYSCALL_DEFINE3(msgctl, int, first, int, second, void __user *, uptr)
 {
        int err, err2;
        struct msqid64_ds m64;
@@ -668,7 +668,7 @@ static inline int put_compat_shm_info(struct shm_info __user *ip,
        return err;
 }
 
-long compat_sys_shmctl(int first, int second, void __user *uptr)
+COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr)
 {
        void __user *p;
        struct shmid64_ds s64;
@@ -749,8 +749,9 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
        return err;
 }
 
-long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
-               unsigned nsops, const struct compat_timespec __user *timeout)
+COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
+                      unsigned, nsops,
+                      const struct compat_timespec __user *, timeout)
 {
        struct timespec __user *ts64 = NULL;
        if (timeout) {
index 63d7c6de335bd3b4878f8f2cf65d1d9e3f7e9474..d5874729377296acc8a95cd0c27b7ab5eb903a76 100644 (file)
@@ -46,9 +46,9 @@ static inline int put_compat_mq_attr(const struct mq_attr *attr,
                | __put_user(attr->mq_curmsgs, &uattr->mq_curmsgs);
 }
 
-asmlinkage long compat_sys_mq_open(const char __user *u_name,
-                       int oflag, compat_mode_t mode,
-                       struct compat_mq_attr __user *u_attr)
+COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
+                      int, oflag, compat_mode_t, mode,
+                      struct compat_mq_attr __user *, u_attr)
 {
        void __user *p = NULL;
        if (u_attr && oflag & O_CREAT) {
@@ -78,10 +78,10 @@ static int compat_prepare_timeout(struct timespec __user **p,
        return 0;
 }
 
-asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
-                       const char __user *u_msg_ptr,
-                       size_t msg_len, unsigned int msg_prio,
-                       const struct compat_timespec __user *u_abs_timeout)
+COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
+                      const char __user *, u_msg_ptr,
+                      compat_size_t, msg_len, unsigned int, msg_prio,
+                      const struct compat_timespec __user *, u_abs_timeout)
 {
        struct timespec __user *u_ts;
 
@@ -92,10 +92,10 @@ asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
                        msg_prio, u_ts);
 }
 
-asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
-                       char __user *u_msg_ptr,
-                       size_t msg_len, unsigned int __user *u_msg_prio,
-                       const struct compat_timespec __user *u_abs_timeout)
+COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
+                      char __user *, u_msg_ptr,
+                      compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
+                      const struct compat_timespec __user *, u_abs_timeout)
 {
        struct timespec __user *u_ts;
        if (compat_prepare_timeout(&u_ts, u_abs_timeout))
@@ -105,8 +105,8 @@ asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
                        u_msg_prio, u_ts);
 }
 
-asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
-                       const struct compat_sigevent __user *u_notification)
+COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
+                      const struct compat_sigevent __user *, u_notification)
 {
        struct sigevent __user *p = NULL;
        if (u_notification) {
@@ -122,9 +122,9 @@ asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
        return sys_mq_notify(mqdes, p);
 }
 
-asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
-                       const struct compat_mq_attr __user *u_mqstat,
-                       struct compat_mq_attr __user *u_omqstat)
+COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
+                      const struct compat_mq_attr __user *, u_mqstat,
+                      struct compat_mq_attr __user *, u_omqstat)
 {
        struct mq_attr mqstat;
        struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p));
index bc010ee272b6cfec39892e6cb04b98cc06995e1a..f2a8b6246ce935e9d4ae5aeddc314e3d281c8539 100644 (file)
@@ -18,11 +18,13 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
 CFLAGS_REMOVE_irq_work.o = -pg
 endif
 
+# cond_syscall is currently not LTO compatible
+CFLAGS_sys_ni.o = $(DISABLE_LTO)
+
 obj-y += sched/
 obj-y += locking/
 obj-y += power/
 obj-y += printk/
-obj-y += cpu/
 obj-y += irq/
 obj-y += rcu/
 
@@ -93,6 +95,7 @@ obj-$(CONFIG_PADATA) += padata.o
 obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
 obj-$(CONFIG_JUMP_LABEL) += jump_label.o
 obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
+obj-$(CONFIG_TORTURE_TEST) += torture.o
 
 $(obj)/configs.o: $(obj)/config_data.h
 
index 3392d3e0254ac5d93199c0b00e271cbb0b2bfc9c..95a20f3f52f1c9f35b7d6aa2cb0a8b7c05fdbc69 100644 (file)
@@ -608,9 +608,19 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
        int err = 0;
 
        /* Only support the initial namespaces for now. */
+       /*
+        * We return ECONNREFUSED because it tricks userspace into thinking
+        * that audit was not configured into the kernel.  Lots of users
+        * configure their PAM stack (because that's what the distro does)
+        * to reject login if unable to send messages to audit.  If we return
+        * ECONNREFUSED the PAM stack thinks the kernel does not have audit
+        * configured in and will let login proceed.  If we return EPERM
+        * userspace will reject all logins.  This should be removed when we
+        * support non init namespaces!!
+        */
        if ((current_user_ns() != &init_user_ns) ||
            (task_active_pid_ns(current) != &init_pid_ns))
-               return -EPERM;
+               return -ECONNREFUSED;
 
        switch (msg_type) {
        case AUDIT_LIST:
index 0a09e481b70b6e97519841f2eebf57da2e02d851..488ff8c4cf48ec071ad6cdb824fe60af735b5c8f 100644 (file)
@@ -110,8 +110,8 @@ static int compat_put_timex(struct compat_timex __user *utp, struct timex *txc)
        return 0;
 }
 
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
-               struct timezone __user *tz)
+COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
+                      struct timezone __user *, tz)
 {
        if (tv) {
                struct timeval ktv;
@@ -127,8 +127,8 @@ asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
        return 0;
 }
 
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
-               struct timezone __user *tz)
+COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
+                      struct timezone __user *, tz)
 {
        struct timespec kts;
        struct timezone ktz;
@@ -236,8 +236,8 @@ static long compat_nanosleep_restart(struct restart_block *restart)
        return ret;
 }
 
-asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
-                                    struct compat_timespec __user *rmtp)
+COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
+                      struct compat_timespec __user *, rmtp)
 {
        struct timespec tu, rmt;
        mm_segment_t oldfs;
@@ -328,7 +328,7 @@ static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
        return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
 }
 
-asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
+COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
 {
        if (tbuf) {
                struct tms tms;
@@ -354,7 +354,7 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
  * types that can be passed to put_user()/get_user().
  */
 
-asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
+COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set)
 {
        old_sigset_t s;
        long ret;
@@ -424,8 +424,8 @@ COMPAT_SYSCALL_DEFINE3(sigprocmask, int, how,
 
 #endif
 
-asmlinkage long compat_sys_setrlimit(unsigned int resource,
-               struct compat_rlimit __user *rlim)
+COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
+                      struct compat_rlimit __user *, rlim)
 {
        struct rlimit r;
 
@@ -443,8 +443,8 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
 
 #ifdef COMPAT_RLIM_OLD_INFINITY
 
-asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
-               struct compat_rlimit __user *rlim)
+COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
+                      struct compat_rlimit __user *, rlim)
 {
        struct rlimit r;
        int ret;
@@ -470,8 +470,8 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
 
 #endif
 
-asmlinkage long compat_sys_getrlimit(unsigned int resource,
-               struct compat_rlimit __user *rlim)
+COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
+                      struct compat_rlimit __user *, rlim)
 {
        struct rlimit r;
        int ret;
@@ -596,9 +596,9 @@ static int compat_get_user_cpu_mask(compat_ulong_t __user *user_mask_ptr,
        return compat_get_bitmap(k, user_mask_ptr, len * 8);
 }
 
-asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
-                                            unsigned int len,
-                                            compat_ulong_t __user *user_mask_ptr)
+COMPAT_SYSCALL_DEFINE3(sched_setaffinity, compat_pid_t, pid,
+                      unsigned int, len,
+                      compat_ulong_t __user *, user_mask_ptr)
 {
        cpumask_var_t new_mask;
        int retval;
@@ -616,8 +616,8 @@ out:
        return retval;
 }
 
-asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
-                                            compat_ulong_t __user *user_mask_ptr)
+COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t,  pid, unsigned int, len,
+                      compat_ulong_t __user *, user_mask_ptr)
 {
        int ret;
        cpumask_var_t mask;
@@ -662,9 +662,9 @@ int put_compat_itimerspec(struct compat_itimerspec __user *dst,
        return 0;
 }
 
-long compat_sys_timer_create(clockid_t which_clock,
-                       struct compat_sigevent __user *timer_event_spec,
-                       timer_t __user *created_timer_id)
+COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
+                      struct compat_sigevent __user *, timer_event_spec,
+                      timer_t __user *, created_timer_id)
 {
        struct sigevent __user *event = NULL;
 
@@ -680,9 +680,9 @@ long compat_sys_timer_create(clockid_t which_clock,
        return sys_timer_create(which_clock, event, created_timer_id);
 }
 
-long compat_sys_timer_settime(timer_t timer_id, int flags,
-                         struct compat_itimerspec __user *new,
-                         struct compat_itimerspec __user *old)
+COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
+                      struct compat_itimerspec __user *, new,
+                      struct compat_itimerspec __user *, old)
 {
        long err;
        mm_segment_t oldfs;
@@ -703,8 +703,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
        return err;
 }
 
-long compat_sys_timer_gettime(timer_t timer_id,
-               struct compat_itimerspec __user *setting)
+COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
+                      struct compat_itimerspec __user *, setting)
 {
        long err;
        mm_segment_t oldfs;
@@ -720,8 +720,8 @@ long compat_sys_timer_gettime(timer_t timer_id,
        return err;
 }
 
-long compat_sys_clock_settime(clockid_t which_clock,
-               struct compat_timespec __user *tp)
+COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
+                      struct compat_timespec __user *, tp)
 {
        long err;
        mm_segment_t oldfs;
@@ -737,8 +737,8 @@ long compat_sys_clock_settime(clockid_t which_clock,
        return err;
 }
 
-long compat_sys_clock_gettime(clockid_t which_clock,
-               struct compat_timespec __user *tp)
+COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
+                      struct compat_timespec __user *, tp)
 {
        long err;
        mm_segment_t oldfs;
@@ -754,8 +754,8 @@ long compat_sys_clock_gettime(clockid_t which_clock,
        return err;
 }
 
-long compat_sys_clock_adjtime(clockid_t which_clock,
-               struct compat_timex __user *utp)
+COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
+                      struct compat_timex __user *, utp)
 {
        struct timex txc;
        mm_segment_t oldfs;
@@ -777,8 +777,8 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
        return ret;
 }
 
-long compat_sys_clock_getres(clockid_t which_clock,
-               struct compat_timespec __user *tp)
+COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
+                      struct compat_timespec __user *, tp)
 {
        long err;
        mm_segment_t oldfs;
@@ -818,9 +818,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
        return err;
 }
 
-long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
-                           struct compat_timespec __user *rqtp,
-                           struct compat_timespec __user *rmtp)
+COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
+                      struct compat_timespec __user *, rqtp,
+                      struct compat_timespec __user *, rmtp)
 {
        long err;
        mm_segment_t oldfs;
@@ -1010,7 +1010,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
 
 /* compat_time_t is a 32 bit "long" and needs to get converted. */
 
-asmlinkage long compat_sys_time(compat_time_t __user * tloc)
+COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
 {
        compat_time_t i;
        struct timeval tv;
@@ -1026,7 +1026,7 @@ asmlinkage long compat_sys_time(compat_time_t __user * tloc)
        return i;
 }
 
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
+COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
 {
        struct timespec tv;
        int err;
@@ -1046,7 +1046,7 @@ asmlinkage long compat_sys_stime(compat_time_t __user *tptr)
 
 #endif /* __ARCH_WANT_COMPAT_SYS_TIME */
 
-asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
+COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
 {
        struct timex txc;
        int err, ret;
@@ -1065,11 +1065,11 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
 }
 
 #ifdef CONFIG_NUMA
-asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
-               compat_uptr_t __user *pages32,
-               const int __user *nodes,
-               int __user *status,
-               int flags)
+COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
+                      compat_uptr_t __user *, pages32,
+                      const int __user *, nodes,
+                      int __user *, status,
+                      int, flags)
 {
        const void __user * __user *pages;
        int i;
@@ -1085,10 +1085,10 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
        return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
 }
 
-asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
-                       compat_ulong_t maxnode,
-                       const compat_ulong_t __user *old_nodes,
-                       const compat_ulong_t __user *new_nodes)
+COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
+                      compat_ulong_t, maxnode,
+                      const compat_ulong_t __user *, old_nodes,
+                      const compat_ulong_t __user *, new_nodes)
 {
        unsigned long __user *old = NULL;
        unsigned long __user *new = NULL;
diff --git a/kernel/cpu/Makefile b/kernel/cpu/Makefile
deleted file mode 100644 (file)
index 59ab052..0000000
+++ /dev/null
@@ -1 +0,0 @@
-obj-y  = idle.o
index 334b3980ffc14d396e25ccb11644f7f038cbe6e4..99982a70ddade1f728e18bd1af020b3600c4b8a2 100644 (file)
@@ -1035,7 +1035,7 @@ int dbg_io_get_char(void)
  * otherwise as a quick means to stop program execution and "break" into
  * the debugger.
  */
-void kgdb_breakpoint(void)
+noinline void kgdb_breakpoint(void)
 {
        atomic_inc(&kgdb_setting_breakpoint);
        wmb(); /* Sync point before breakpoint */
index fa0b2d4ad83c5f7a08dfecf27d16d33d928a80f2..661951ab8ae731a5094e1a29101bb7e0797794ed 100644 (file)
@@ -231,11 +231,29 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
 #define NR_ACCUMULATED_SAMPLES 128
 static DEFINE_PER_CPU(u64, running_sample_length);
 
-void perf_sample_event_took(u64 sample_len_ns)
+static void perf_duration_warn(struct irq_work *w)
 {
+       u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
        u64 avg_local_sample_len;
        u64 local_samples_len;
+
+       local_samples_len = __get_cpu_var(running_sample_length);
+       avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
+
+       printk_ratelimited(KERN_WARNING
+                       "perf interrupt took too long (%lld > %lld), lowering "
+                       "kernel.perf_event_max_sample_rate to %d\n",
+                       avg_local_sample_len, allowed_ns >> 1,
+                       sysctl_perf_event_sample_rate);
+}
+
+static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
+
+void perf_sample_event_took(u64 sample_len_ns)
+{
        u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
+       u64 avg_local_sample_len;
+       u64 local_samples_len;
 
        if (allowed_ns == 0)
                return;
@@ -263,13 +281,14 @@ void perf_sample_event_took(u64 sample_len_ns)
        sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
        perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
 
-       printk_ratelimited(KERN_WARNING
-                       "perf samples too long (%lld > %lld), lowering "
-                       "kernel.perf_event_max_sample_rate to %d\n",
-                       avg_local_sample_len, allowed_ns,
-                       sysctl_perf_event_sample_rate);
-
        update_perf_cpu_limits();
+
+       if (!irq_work_queue(&perf_duration_work)) {
+               early_printk("perf interrupt took too long (%lld > %lld), lowering "
+                            "kernel.perf_event_max_sample_rate to %d\n",
+                            avg_local_sample_len, allowed_ns >> 1,
+                            sysctl_perf_event_sample_rate);
+       }
 }
 
 static atomic64_t perf_event_id;
@@ -1714,7 +1733,7 @@ group_sched_in(struct perf_event *group_event,
               struct perf_event_context *ctx)
 {
        struct perf_event *event, *partial_group = NULL;
-       struct pmu *pmu = group_event->pmu;
+       struct pmu *pmu = ctx->pmu;
        u64 now = ctx->time;
        bool simulate = false;
 
@@ -2563,8 +2582,6 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
                if (cpuctx->ctx.nr_branch_stack > 0
                    && pmu->flush_branch_stack) {
 
-                       pmu = cpuctx->ctx.pmu;
-
                        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 
                        perf_pmu_disable(pmu);
@@ -6294,7 +6311,7 @@ static int perf_event_idx_default(struct perf_event *event)
  * Ensures all contexts with the same task_ctx_nr have the same
  * pmu_cpu_context too.
  */
-static void *find_pmu_context(int ctxn)
+static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
 {
        struct pmu *pmu;
 
index 763faf037ec1cccb135b98d1deccf339f69cd2e5..d8a6446adbcbebe00410d0c8b3cef7b5bfb4b1c9 100644 (file)
@@ -36,7 +36,7 @@ extern struct exception_table_entry __start___ex_table[];
 extern struct exception_table_entry __stop___ex_table[];
 
 /* Cleared by build time tools if the table is already sorted. */
-u32 __initdata main_extable_sort_needed = 1;
+u32 __initdata __visible main_extable_sort_needed = 1;
 
 /* Sort the kernel's built-in exception table */
 void __init sort_main_extable(void)
index a17621c6cd4272182a78083e736c5972c741ed7a..332688e5e7b4c93e55f3785ceb6beb70f4b2182f 100644 (file)
@@ -237,6 +237,7 @@ void __put_task_struct(struct task_struct *tsk)
        WARN_ON(atomic_read(&tsk->usage));
        WARN_ON(tsk == current);
 
+       task_numa_free(tsk);
        security_task_free(tsk);
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
index 08ec814ad9d2fe95ae4303085cb1967265bd3758..67dacaf93e56c0edb12b74a84b40da9fe596ea49 100644 (file)
  * enqueue.
  */
 
+#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
 int __read_mostly futex_cmpxchg_enabled;
+#endif
 
 /*
  * Futex flags used to encode options to functions and preserve them across
@@ -2875,9 +2877,28 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
        return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
 }
 
-static int __init futex_init(void)
+static void __init futex_detect_cmpxchg(void)
 {
+#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
        u32 curval;
+
+       /*
+        * This will fail and we want it. Some arch implementations do
+        * runtime detection of the futex_atomic_cmpxchg_inatomic()
+        * functionality. We want to know that before we call in any
+        * of the complex code paths. Also we want to prevent
+        * registration of robust lists in that case. NULL is
+        * guaranteed to fault and we get -EFAULT on functional
+        * implementation, the non-functional ones will return
+        * -ENOSYS.
+        */
+       if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
+               futex_cmpxchg_enabled = 1;
+#endif
+}
+
+static int __init futex_init(void)
+{
        unsigned int futex_shift;
        unsigned long i;
 
@@ -2893,18 +2914,8 @@ static int __init futex_init(void)
                                               &futex_shift, NULL,
                                               futex_hashsize, futex_hashsize);
        futex_hashsize = 1UL << futex_shift;
-       /*
-        * This will fail and we want it. Some arch implementations do
-        * runtime detection of the futex_atomic_cmpxchg_inatomic()
-        * functionality. We want to know that before we call in any
-        * of the complex code paths. Also we want to prevent
-        * registration of robust lists in that case. NULL is
-        * guaranteed to fault and we get -EFAULT on functional
-        * implementation, the non-functional ones will return
-        * -ENOSYS.
-        */
-       if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
-               futex_cmpxchg_enabled = 1;
+
+       futex_detect_cmpxchg();
 
        for (i = 0; i < futex_hashsize; i++) {
                atomic_set(&futex_queues[i].waiters, 0);
index 09094361dce523fec7def28599b17c34bee16bec..d55092ceee2975c204bcb90e856f9b6504d577ac 100644 (file)
@@ -168,19 +168,6 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
        }
 }
 
-
-/*
- * Get the preferred target CPU for NOHZ
- */
-static int hrtimer_get_target(int this_cpu, int pinned)
-{
-#ifdef CONFIG_NO_HZ_COMMON
-       if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
-               return get_nohz_timer_target();
-#endif
-       return this_cpu;
-}
-
 /*
  * With HIGHRES=y we do not migrate the timer when it is expiring
  * before the next event on the target cpu because we cannot reprogram
@@ -214,7 +201,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
        struct hrtimer_clock_base *new_base;
        struct hrtimer_cpu_base *new_cpu_base;
        int this_cpu = smp_processor_id();
-       int cpu = hrtimer_get_target(this_cpu, pinned);
+       int cpu = get_nohz_timer_target(pinned);
        int basenum = base->index;
 
 again:
index dc04c166c54d7bc8e86ab87bec4075a3cc492e12..6397df2d6945b09ae853c1a75e4d18aa6da7096f 100644 (file)
@@ -281,6 +281,19 @@ void unmask_irq(struct irq_desc *desc)
        }
 }
 
+void unmask_threaded_irq(struct irq_desc *desc)
+{
+       struct irq_chip *chip = desc->irq_data.chip;
+
+       if (chip->flags & IRQCHIP_EOI_THREADED)
+               chip->irq_eoi(&desc->irq_data);
+
+       if (chip->irq_unmask) {
+               chip->irq_unmask(&desc->irq_data);
+               irq_state_clr_masked(desc);
+       }
+}
+
 /*
  *     handle_nested_irq - Handle a nested irq from a irq thread
  *     @irq:   the interrupt number
@@ -435,6 +448,27 @@ static inline void preflow_handler(struct irq_desc *desc)
 static inline void preflow_handler(struct irq_desc *desc) { }
 #endif
 
+static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
+{
+       if (!(desc->istate & IRQS_ONESHOT)) {
+               chip->irq_eoi(&desc->irq_data);
+               return;
+       }
+       /*
+        * We need to unmask in the following cases:
+        * - Oneshot irq which did not wake the thread (caused by a
+        *   spurious interrupt or a primary handler handling it
+        *   completely).
+        */
+       if (!irqd_irq_disabled(&desc->irq_data) &&
+           irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
+               chip->irq_eoi(&desc->irq_data);
+               unmask_irq(desc);
+       } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
+               chip->irq_eoi(&desc->irq_data);
+       }
+}
+
 /**
  *     handle_fasteoi_irq - irq handler for transparent controllers
  *     @irq:   the interrupt number
@@ -448,6 +482,8 @@ static inline void preflow_handler(struct irq_desc *desc) { }
 void
 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
 {
+       struct irq_chip *chip = desc->irq_data.chip;
+
        raw_spin_lock(&desc->lock);
 
        if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
@@ -473,18 +509,14 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
        preflow_handler(desc);
        handle_irq_event(desc);
 
-       if (desc->istate & IRQS_ONESHOT)
-               cond_unmask_irq(desc);
+       cond_unmask_eoi_irq(desc, chip);
 
-out_eoi:
-       desc->irq_data.chip->irq_eoi(&desc->irq_data);
-out_unlock:
        raw_spin_unlock(&desc->lock);
        return;
 out:
-       if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
-               goto out_eoi;
-       goto out_unlock;
+       if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
+               chip->irq_eoi(&desc->irq_data);
+       raw_spin_unlock(&desc->lock);
 }
 
 /**
index 131ca176b4973c9b5f40ff8aa1ab471587ac71ae..635480270858448064b1afa54bf616f074744d60 100644 (file)
@@ -41,6 +41,7 @@ irqreturn_t no_action(int cpl, void *dev_id)
 {
        return IRQ_NONE;
 }
+EXPORT_SYMBOL_GPL(no_action);
 
 static void warn_no_thread(unsigned int irq, struct irqaction *action)
 {
@@ -51,7 +52,7 @@ static void warn_no_thread(unsigned int irq, struct irqaction *action)
               "but no thread function available.", irq, action->name);
 }
 
-static void irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
+void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
 {
        /*
         * In case the thread crashed and was killed we just pretend that
@@ -157,7 +158,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
                                break;
                        }
 
-                       irq_wake_thread(desc, action);
+                       __irq_wake_thread(desc, action);
 
                        /* Fall through to add to randomness */
                case IRQ_HANDLED:
index 001fa5bab4902dcf403384ded269841c09d3ca6a..ddf1ffeb79f1e3ac5a5408465d4f31391525de13 100644 (file)
@@ -6,6 +6,7 @@
  * of this file for your non core code.
  */
 #include <linux/irqdesc.h>
+#include <linux/kernel_stat.h>
 
 #ifdef CONFIG_SPARSE_IRQ
 # define IRQ_BITMAP_BITS       (NR_IRQS + 8196)
@@ -73,6 +74,7 @@ extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
 extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
 extern void mask_irq(struct irq_desc *desc);
 extern void unmask_irq(struct irq_desc *desc);
+extern void unmask_threaded_irq(struct irq_desc *desc);
 
 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
 
@@ -82,6 +84,7 @@ irqreturn_t handle_irq_event(struct irq_desc *desc);
 /* Resending of interrupts :*/
 void check_irq_resend(struct irq_desc *desc, unsigned int irq);
 bool irq_wait_for_poll(struct irq_desc *desc);
+void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
 
 #ifdef CONFIG_PROC_FS
 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
@@ -179,3 +182,9 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
 {
        return d->state_use_accessors & mask;
 }
+
+static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc)
+{
+       __this_cpu_inc(*desc->kstat_irqs);
+       __this_cpu_inc(kstat.irqs_sum);
+}
index 8ab8e9390297a06ef7c4efc2a8ad502433b13879..a7174617616ba6b8f404a1c3f01cf8b7dd90cb4d 100644 (file)
@@ -489,6 +489,11 @@ void dynamic_irq_cleanup(unsigned int irq)
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
 
+void kstat_incr_irq_this_cpu(unsigned int irq)
+{
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+}
+
 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 {
        struct irq_desc *desc = irq_to_desc(irq);
index d3bf660cb57fb8a26e55c3605dffc78ce82035f4..2486a4c1a710ba057c7f884faae19bff1fc6d31c 100644 (file)
@@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg)
 early_param("threadirqs", setup_forced_irqthreads);
 #endif
 
-/**
- *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
- *     @irq: interrupt number to wait for
- *
- *     This function waits for any pending IRQ handlers for this interrupt
- *     to complete before returning. If you use this function while
- *     holding a resource the IRQ handler may need you will deadlock.
- *
- *     This function may be called - with care - from IRQ context.
- */
-void synchronize_irq(unsigned int irq)
+static void __synchronize_hardirq(struct irq_desc *desc)
 {
-       struct irq_desc *desc = irq_to_desc(irq);
        bool inprogress;
 
-       if (!desc)
-               return;
-
        do {
                unsigned long flags;
 
@@ -67,12 +53,56 @@ void synchronize_irq(unsigned int irq)
 
                /* Oops, that failed? */
        } while (inprogress);
+}
 
-       /*
-        * We made sure that no hardirq handler is running. Now verify
-        * that no threaded handlers are active.
-        */
-       wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
+/**
+ *     synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
+ *     @irq: interrupt number to wait for
+ *
+ *     This function waits for any pending hard IRQ handlers for this
+ *     interrupt to complete before returning. If you use this
+ *     function while holding a resource the IRQ handler may need you
+ *     will deadlock. It does not take associated threaded handlers
+ *     into account.
+ *
+ *     Do not use this for shutdown scenarios where you must be sure
+ *     that all parts (hardirq and threaded handler) have completed.
+ *
+ *     This function may be called - with care - from IRQ context.
+ */
+void synchronize_hardirq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (desc)
+               __synchronize_hardirq(desc);
+}
+EXPORT_SYMBOL(synchronize_hardirq);
+
+/**
+ *     synchronize_irq - wait for pending IRQ handlers (on other CPUs)
+ *     @irq: interrupt number to wait for
+ *
+ *     This function waits for any pending IRQ handlers for this interrupt
+ *     to complete before returning. If you use this function while
+ *     holding a resource the IRQ handler may need you will deadlock.
+ *
+ *     This function may be called - with care - from IRQ context.
+ */
+void synchronize_irq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       if (desc) {
+               __synchronize_hardirq(desc);
+               /*
+                * We made sure that no hardirq handler is
+                * running. Now verify that no threaded handlers are
+                * active.
+                */
+               wait_event(desc->wait_for_threads,
+                          !atomic_read(&desc->threads_active));
+       }
 }
 EXPORT_SYMBOL(synchronize_irq);
 
@@ -718,7 +748,7 @@ again:
 
        if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
            irqd_irq_masked(&desc->irq_data))
-               unmask_irq(desc);
+               unmask_threaded_irq(desc);
 
 out_unlock:
        raw_spin_unlock_irq(&desc->lock);
@@ -727,7 +757,7 @@ out_unlock:
 
 #ifdef CONFIG_SMP
 /*
- * Check whether we need to chasnge the affinity of the interrupt thread.
+ * Check whether we need to change the affinity of the interrupt thread.
  */
 static void
 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
@@ -880,6 +910,33 @@ static int irq_thread(void *data)
        return 0;
 }
 
+/**
+ *     irq_wake_thread - wake the irq thread for the action identified by dev_id
+ *     @irq:           Interrupt line
+ *     @dev_id:        Device identity for which the thread should be woken
+ *
+ */
+void irq_wake_thread(unsigned int irq, void *dev_id)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irqaction *action;
+       unsigned long flags;
+
+       if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+               return;
+
+       raw_spin_lock_irqsave(&desc->lock, flags);
+       for (action = desc->action; action; action = action->next) {
+               if (action->dev_id == dev_id) {
+                       if (action->thread)
+                               __irq_wake_thread(desc, action);
+                       break;
+               }
+       }
+       raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(irq_wake_thread);
+
 static void irq_setup_forced_threading(struct irqaction *new)
 {
        if (!force_irqthreads)
@@ -896,6 +953,23 @@ static void irq_setup_forced_threading(struct irqaction *new)
        }
 }
 
+static int irq_request_resources(struct irq_desc *desc)
+{
+       struct irq_data *d = &desc->irq_data;
+       struct irq_chip *c = d->chip;
+
+       return c->irq_request_resources ? c->irq_request_resources(d) : 0;
+}
+
+static void irq_release_resources(struct irq_desc *desc)
+{
+       struct irq_data *d = &desc->irq_data;
+       struct irq_chip *c = d->chip;
+
+       if (c->irq_release_resources)
+               c->irq_release_resources(d);
+}
+
 /*
  * Internal function to register an irqaction - typically used to
  * allocate special interrupts that are part of the architecture.
@@ -1091,6 +1165,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        }
 
        if (!shared) {
+               ret = irq_request_resources(desc);
+               if (ret) {
+                       pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
+                              new->name, irq, desc->irq_data.chip->name);
+                       goto out_mask;
+               }
+
                init_waitqueue_head(&desc->wait_for_threads);
 
                /* Setup the type (level, edge polarity) if configured: */
@@ -1261,8 +1342,10 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
        *action_ptr = action->next;
 
        /* If this was the last handler, shut down the IRQ line: */
-       if (!desc->action)
+       if (!desc->action) {
                irq_shutdown(desc);
+               irq_release_resources(desc);
+       }
 
 #ifdef CONFIG_SMP
        /* make sure affinity_hint is cleaned up */
index 36f6ee181b0c4313a7632f6914341cacd26989b0..ac1ba2f110321fe637b2f5538c0ac568dbafc10e 100644 (file)
@@ -324,15 +324,15 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
 
 #ifdef CONFIG_SMP
        /* create /proc/irq/<irq>/smp_affinity */
-       proc_create_data("smp_affinity", 0600, desc->dir,
+       proc_create_data("smp_affinity", 0644, desc->dir,
                         &irq_affinity_proc_fops, (void *)(long)irq);
 
        /* create /proc/irq/<irq>/affinity_hint */
-       proc_create_data("affinity_hint", 0400, desc->dir,
+       proc_create_data("affinity_hint", 0444, desc->dir,
                         &irq_affinity_hint_proc_fops, (void *)(long)irq);
 
        /* create /proc/irq/<irq>/smp_affinity_list */
-       proc_create_data("smp_affinity_list", 0600, desc->dir,
+       proc_create_data("smp_affinity_list", 0644, desc->dir,
                         &irq_affinity_list_proc_fops, (void *)(long)irq);
 
        proc_create_data("node", 0444, desc->dir,
@@ -372,7 +372,7 @@ void unregister_handler_proc(unsigned int irq, struct irqaction *action)
 static void register_default_affinity_proc(void)
 {
 #ifdef CONFIG_SMP
-       proc_create("irq/default_smp_affinity", 0600, NULL,
+       proc_create("irq/default_smp_affinity", 0644, NULL,
                    &default_affinity_proc_fops);
 #endif
 }
index 55fcce6065cf6bc3213829fd8188cae90b61b778..a82170e2fa78e50fbefa29c6cf71b1eccadae175 100644 (file)
@@ -61,11 +61,11 @@ void __weak arch_irq_work_raise(void)
  *
  * Can be re-enqueued while the callback is still in progress.
  */
-void irq_work_queue(struct irq_work *work)
+bool irq_work_queue(struct irq_work *work)
 {
        /* Only queue if not already pending */
        if (!irq_work_claim(work))
-               return;
+               return false;
 
        /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
@@ -83,6 +83,8 @@ void irq_work_queue(struct irq_work *work)
        }
 
        preempt_enable();
+
+       return true;
 }
 EXPORT_SYMBOL_GPL(irq_work_queue);
 
index 60bafbed06abd7a7b2defd0c031ca18b7287a37e..45601cf41bee0106ff37a7ef5112cfd065c4a7bc 100644 (file)
@@ -1039,10 +1039,10 @@ void __weak crash_unmap_reserved_pages(void)
 {}
 
 #ifdef CONFIG_COMPAT
-asmlinkage long compat_sys_kexec_load(unsigned long entry,
-                               unsigned long nr_segments,
-                               struct compat_kexec_segment __user *segments,
-                               unsigned long flags)
+COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
+                      compat_ulong_t, nr_segments,
+                      struct compat_kexec_segment __user *, segments,
+                      compat_ulong_t, flags)
 {
        struct compat_kexec_segment in;
        struct kexec_segment out, __user *ksegments;
index d945a949760f0ec4a9fd09313e30fb5f651bc39a..e660964086e2e46d1066e143d29c21887f712d5d 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/sched.h>
 #include <linux/capability.h>
 
+#include <linux/rcupdate.h>    /* rcu_expedited */
+
 #define KERNEL_ATTR_RO(_name) \
 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
 
index baab8e5e7f66acd327f71afd40fbd7530803a449..306a76b51e0f4308a1ef680e006e07333e98d3c0 100644 (file)
@@ -1,5 +1,5 @@
 
-obj-y += mutex.o semaphore.o rwsem.o lglock.o
+obj-y += mutex.o semaphore.o rwsem.o lglock.o mcs_spinlock.o
 
 ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_lockdep.o = -pg
@@ -23,3 +23,4 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
 obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
+obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
index eb8a54783fa0f47bb9b51568ec9dfed3d51cc48d..b0e9467922e1a476bfe1d4d8503ac7623affcaea 100644 (file)
@@ -1936,12 +1936,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
 
        for (;;) {
                int distance = curr->lockdep_depth - depth + 1;
-               hlock = curr->held_locks + depth-1;
+               hlock = curr->held_locks + depth - 1;
                /*
                 * Only non-recursive-read entries get new dependencies
                 * added:
                 */
-               if (hlock->read != 2) {
+               if (hlock->read != 2 && hlock->check) {
                        if (!check_prev_add(curr, hlock, next,
                                                distance, trylock_loop))
                                return 0;
@@ -2098,7 +2098,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
         * (If lookup_chain_cache() returns with 1 it acquires
         * graph_lock for us)
         */
-       if (!hlock->trylock && (hlock->check == 2) &&
+       if (!hlock->trylock && hlock->check &&
            lookup_chain_cache(curr, hlock, chain_key)) {
                /*
                 * Check whether last held lock:
@@ -2517,7 +2517,7 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
 
                BUG_ON(usage_bit >= LOCK_USAGE_STATES);
 
-               if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
+               if (!hlock->check)
                        continue;
 
                if (!mark_lock(curr, hlock, usage_bit))
@@ -2557,7 +2557,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
        debug_atomic_inc(hardirqs_on_events);
 }
 
-void trace_hardirqs_on_caller(unsigned long ip)
+__visible void trace_hardirqs_on_caller(unsigned long ip)
 {
        time_hardirqs_on(CALLER_ADDR0, ip);
 
@@ -2610,7 +2610,7 @@ EXPORT_SYMBOL(trace_hardirqs_on);
 /*
  * Hardirqs were disabled:
  */
-void trace_hardirqs_off_caller(unsigned long ip)
+__visible void trace_hardirqs_off_caller(unsigned long ip)
 {
        struct task_struct *curr = current;
 
@@ -3055,9 +3055,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        int class_idx;
        u64 chain_key;
 
-       if (!prove_locking)
-               check = 1;
-
        if (unlikely(!debug_locks))
                return 0;
 
@@ -3069,8 +3066,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
 
-       if (lock->key == &__lockdep_no_validate__)
-               check = 1;
+       if (!prove_locking || lock->key == &__lockdep_no_validate__)
+               check = 0;
 
        if (subclass < NR_LOCKDEP_CACHING_CLASSES)
                class = lock->class_cache[subclass];
@@ -3138,7 +3135,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->holdtime_stamp = lockstat_clock();
 #endif
 
-       if (check == 2 && !mark_irqflags(curr, hlock))
+       if (check && !mark_irqflags(curr, hlock))
                return 0;
 
        /* mark it as used: */
@@ -4191,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task)
 }
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
-void lockdep_sys_exit(void)
+asmlinkage void lockdep_sys_exit(void)
 {
        struct task_struct *curr = current;
 
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
new file mode 100644 (file)
index 0000000..f26b1a1
--- /dev/null
@@ -0,0 +1,452 @@
+/*
+ * Module-based torture test facility for locking
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2014
+ *
+ * Author: Paul E. McKenney <paulmck@us.ibm.com>
+ *     Based on kernel/rcu/torture.c.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/trace_clock.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
+
+torture_param(int, nwriters_stress, -1,
+            "Number of write-locking stress-test threads");
+torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
+torture_param(int, onoff_interval, 0,
+            "Time between CPU hotplugs (s), 0=disable");
+torture_param(int, shuffle_interval, 3,
+            "Number of jiffies between shuffles, 0=disable");
+torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
+torture_param(int, stat_interval, 60,
+            "Number of seconds between stats printk()s");
+torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
+torture_param(bool, verbose, true,
+            "Enable verbose debugging printk()s");
+
+static char *torture_type = "spin_lock";
+module_param(torture_type, charp, 0444);
+MODULE_PARM_DESC(torture_type,
+                "Type of lock to torture (spin_lock, spin_lock_irq, ...)");
+
+static atomic_t n_lock_torture_errors;
+
+static struct task_struct *stats_task;
+static struct task_struct **writer_tasks;
+
+static int nrealwriters_stress;
+static bool lock_is_write_held;
+
+struct lock_writer_stress_stats {
+       long n_write_lock_fail;
+       long n_write_lock_acquired;
+};
+static struct lock_writer_stress_stats *lwsa;
+
+#if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE)
+#define LOCKTORTURE_RUNNABLE_INIT 1
+#else
+#define LOCKTORTURE_RUNNABLE_INIT 0
+#endif
+int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT;
+module_param(locktorture_runnable, int, 0444);
+MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot");
+
+/* Forward reference. */
+static void lock_torture_cleanup(void);
+
+/*
+ * Operations vector for selecting different types of tests.
+ */
+struct lock_torture_ops {
+       void (*init)(void);
+       int (*writelock)(void);
+       void (*write_delay)(struct torture_random_state *trsp);
+       void (*writeunlock)(void);
+       unsigned long flags;
+       const char *name;
+};
+
+static struct lock_torture_ops *cur_ops;
+
+/*
+ * Definitions for lock torture testing.
+ */
+
+static int torture_lock_busted_write_lock(void)
+{
+       return 0;  /* BUGGY, do not use in real life!!! */
+}
+
+static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
+{
+       const unsigned long longdelay_us = 100;
+
+       /* We want a long delay occasionally to force massive contention.  */
+       if (!(torture_random(trsp) %
+             (nrealwriters_stress * 2000 * longdelay_us)))
+               mdelay(longdelay_us);
+#ifdef CONFIG_PREEMPT
+       if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+               preempt_schedule();  /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_lock_busted_write_unlock(void)
+{
+         /* BUGGY, do not use in real life!!! */
+}
+
+static struct lock_torture_ops lock_busted_ops = {
+       .writelock      = torture_lock_busted_write_lock,
+       .write_delay    = torture_lock_busted_write_delay,
+       .writeunlock    = torture_lock_busted_write_unlock,
+       .name           = "lock_busted"
+};
+
+static DEFINE_SPINLOCK(torture_spinlock);
+
+static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
+{
+       spin_lock(&torture_spinlock);
+       return 0;
+}
+
+static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
+{
+       const unsigned long shortdelay_us = 2;
+       const unsigned long longdelay_us = 100;
+
+       /* We want a short delay mostly to emulate likely code, and
+        * we want a long delay occasionally to force massive contention.
+        */
+       if (!(torture_random(trsp) %
+             (nrealwriters_stress * 2000 * longdelay_us)))
+               mdelay(longdelay_us);
+       if (!(torture_random(trsp) %
+             (nrealwriters_stress * 2 * shortdelay_us)))
+               udelay(shortdelay_us);
+#ifdef CONFIG_PREEMPT
+       if (!(torture_random(trsp) % (nrealwriters_stress * 20000)))
+               preempt_schedule();  /* Allow test to be preempted. */
+#endif
+}
+
+static void torture_spin_lock_write_unlock(void) __releases(torture_spinlock)
+{
+       spin_unlock(&torture_spinlock);
+}
+
+static struct lock_torture_ops spin_lock_ops = {
+       .writelock      = torture_spin_lock_write_lock,
+       .write_delay    = torture_spin_lock_write_delay,
+       .writeunlock    = torture_spin_lock_write_unlock,
+       .name           = "spin_lock"
+};
+
+static int torture_spin_lock_write_lock_irq(void)
+__acquires(torture_spinlock_irq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&torture_spinlock, flags);
+       cur_ops->flags = flags;
+       return 0;
+}
+
+static void torture_lock_spin_write_unlock_irq(void)
+__releases(torture_spinlock)
+{
+       spin_unlock_irqrestore(&torture_spinlock, cur_ops->flags);
+}
+
+static struct lock_torture_ops spin_lock_irq_ops = {
+       .writelock      = torture_spin_lock_write_lock_irq,
+       .write_delay    = torture_spin_lock_write_delay,
+       .writeunlock    = torture_lock_spin_write_unlock_irq,
+       .name           = "spin_lock_irq"
+};
+
+/*
+ * Lock torture writer kthread.  Repeatedly acquires and releases
+ * the lock, checking for duplicate acquisitions.
+ */
+static int lock_torture_writer(void *arg)
+{
+       struct lock_writer_stress_stats *lwsp = arg;
+       static DEFINE_TORTURE_RANDOM(rand);
+
+       VERBOSE_TOROUT_STRING("lock_torture_writer task started");
+       set_user_nice(current, 19);
+
+       do {
+               schedule_timeout_uninterruptible(1);
+               cur_ops->writelock();
+               if (WARN_ON_ONCE(lock_is_write_held))
+                       lwsp->n_write_lock_fail++;
+               lock_is_write_held = 1;
+               lwsp->n_write_lock_acquired++;
+               cur_ops->write_delay(&rand);
+               lock_is_write_held = 0;
+               cur_ops->writeunlock();
+               stutter_wait("lock_torture_writer");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("lock_torture_writer");
+       return 0;
+}
+
+/*
+ * Create an lock-torture-statistics message in the specified buffer.
+ */
+static void lock_torture_printk(char *page)
+{
+       bool fail = 0;
+       int i;
+       long max = 0;
+       long min = lwsa[0].n_write_lock_acquired;
+       long long sum = 0;
+
+       for (i = 0; i < nrealwriters_stress; i++) {
+               if (lwsa[i].n_write_lock_fail)
+                       fail = true;
+               sum += lwsa[i].n_write_lock_acquired;
+               if (max < lwsa[i].n_write_lock_fail)
+                       max = lwsa[i].n_write_lock_fail;
+               if (min > lwsa[i].n_write_lock_fail)
+                       min = lwsa[i].n_write_lock_fail;
+       }
+       page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
+       page += sprintf(page,
+                       "Writes:  Total: %lld  Max/Min: %ld/%ld %s  Fail: %d %s\n",
+                       sum, max, min, max / 2 > min ? "???" : "",
+                       fail, fail ? "!!!" : "");
+       if (fail)
+               atomic_inc(&n_lock_torture_errors);
+}
+
+/*
+ * Print torture statistics.  Caller must ensure that there is only one
+ * call to this function at a given time!!!  This is normally accomplished
+ * by relying on the module system to only have one copy of the module
+ * loaded, and then by giving the lock_torture_stats kthread full control
+ * (or the init/cleanup functions when lock_torture_stats thread is not
+ * running).
+ */
+static void lock_torture_stats_print(void)
+{
+       int size = nrealwriters_stress * 200 + 8192;
+       char *buf;
+
+       buf = kmalloc(size, GFP_KERNEL);
+       if (!buf) {
+               pr_err("lock_torture_stats_print: Out of memory, need: %d",
+                      size);
+               return;
+       }
+       lock_torture_printk(buf);
+       pr_alert("%s", buf);
+       kfree(buf);
+}
+
+/*
+ * Periodically prints torture statistics, if periodic statistics printing
+ * was specified via the stat_interval module parameter.
+ *
+ * No need to worry about fullstop here, since this one doesn't reference
+ * volatile state or register callbacks.
+ */
+static int lock_torture_stats(void *arg)
+{
+       VERBOSE_TOROUT_STRING("lock_torture_stats task started");
+       do {
+               schedule_timeout_interruptible(stat_interval * HZ);
+               lock_torture_stats_print();
+               torture_shutdown_absorb("lock_torture_stats");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("lock_torture_stats");
+       return 0;
+}
+
+static inline void
+lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
+                               const char *tag)
+{
+       pr_alert("%s" TORTURE_FLAG
+                "--- %s: nwriters_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
+                torture_type, tag, nrealwriters_stress, stat_interval, verbose,
+                shuffle_interval, stutter, shutdown_secs,
+                onoff_interval, onoff_holdoff);
+}
+
+static void lock_torture_cleanup(void)
+{
+       int i;
+
+       if (torture_cleanup())
+               return;
+
+       if (writer_tasks) {
+               for (i = 0; i < nrealwriters_stress; i++)
+                       torture_stop_kthread(lock_torture_writer,
+                                            writer_tasks[i]);
+               kfree(writer_tasks);
+               writer_tasks = NULL;
+       }
+
+       torture_stop_kthread(lock_torture_stats, stats_task);
+       lock_torture_stats_print();  /* -After- the stats thread is stopped! */
+
+       if (atomic_read(&n_lock_torture_errors))
+               lock_torture_print_module_parms(cur_ops,
+                                               "End of test: FAILURE");
+       else if (torture_onoff_failures())
+               lock_torture_print_module_parms(cur_ops,
+                                               "End of test: LOCK_HOTPLUG");
+       else
+               lock_torture_print_module_parms(cur_ops,
+                                               "End of test: SUCCESS");
+}
+
+static int __init lock_torture_init(void)
+{
+       int i;
+       int firsterr = 0;
+       static struct lock_torture_ops *torture_ops[] = {
+               &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops,
+       };
+
+       torture_init_begin(torture_type, verbose, &locktorture_runnable);
+
+       /* Process args and tell the world that the torturer is on the job. */
+       for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
+               cur_ops = torture_ops[i];
+               if (strcmp(torture_type, cur_ops->name) == 0)
+                       break;
+       }
+       if (i == ARRAY_SIZE(torture_ops)) {
+               pr_alert("lock-torture: invalid torture type: \"%s\"\n",
+                        torture_type);
+               pr_alert("lock-torture types:");
+               for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
+                       pr_alert(" %s", torture_ops[i]->name);
+               pr_alert("\n");
+               torture_init_end();
+               return -EINVAL;
+       }
+       if (cur_ops->init)
+               cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+
+       if (nwriters_stress >= 0)
+               nrealwriters_stress = nwriters_stress;
+       else
+               nrealwriters_stress = 2 * num_online_cpus();
+       lock_torture_print_module_parms(cur_ops, "Start of test");
+
+       /* Initialize the statistics so that each run gets its own numbers. */
+
+       lock_is_write_held = 0;
+       lwsa = kmalloc(sizeof(*lwsa) * nrealwriters_stress, GFP_KERNEL);
+       if (lwsa == NULL) {
+               VERBOSE_TOROUT_STRING("lwsa: Out of memory");
+               firsterr = -ENOMEM;
+               goto unwind;
+       }
+       for (i = 0; i < nrealwriters_stress; i++) {
+               lwsa[i].n_write_lock_fail = 0;
+               lwsa[i].n_write_lock_acquired = 0;
+       }
+
+       /* Start up the kthreads. */
+
+       if (onoff_interval > 0) {
+               firsterr = torture_onoff_init(onoff_holdoff * HZ,
+                                             onoff_interval * HZ);
+               if (firsterr)
+                       goto unwind;
+       }
+       if (shuffle_interval > 0) {
+               firsterr = torture_shuffle_init(shuffle_interval);
+               if (firsterr)
+                       goto unwind;
+       }
+       if (shutdown_secs > 0) {
+               firsterr = torture_shutdown_init(shutdown_secs,
+                                                lock_torture_cleanup);
+               if (firsterr)
+                       goto unwind;
+       }
+       if (stutter > 0) {
+               firsterr = torture_stutter_init(stutter);
+               if (firsterr)
+                       goto unwind;
+       }
+
+       writer_tasks = kzalloc(nrealwriters_stress * sizeof(writer_tasks[0]),
+                              GFP_KERNEL);
+       if (writer_tasks == NULL) {
+               VERBOSE_TOROUT_ERRSTRING("writer_tasks: Out of memory");
+               firsterr = -ENOMEM;
+               goto unwind;
+       }
+       for (i = 0; i < nrealwriters_stress; i++) {
+               firsterr = torture_create_kthread(lock_torture_writer, &lwsa[i],
+                                                 writer_tasks[i]);
+               if (firsterr)
+                       goto unwind;
+       }
+       if (stat_interval > 0) {
+               firsterr = torture_create_kthread(lock_torture_stats, NULL,
+                                                 stats_task);
+               if (firsterr)
+                       goto unwind;
+       }
+       torture_init_end();
+       return 0;
+
+unwind:
+       torture_init_end();
+       lock_torture_cleanup();
+       return firsterr;
+}
+
+module_init(lock_torture_init);
+module_exit(lock_torture_cleanup);
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
new file mode 100644 (file)
index 0000000..838dc9e
--- /dev/null
@@ -0,0 +1,178 @@
+
+#include <linux/percpu.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include "mcs_spinlock.h"
+
+#ifdef CONFIG_SMP
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ *
+ * Using a single mcs node per CPU is safe because sleeping locks should not be
+ * called from interrupt context and we have preemption disabled while
+ * spinning.
+ */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
+
+/*
+ * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
+ * Can return NULL in case we were the last queued and we updated @lock instead.
+ */
+static inline struct optimistic_spin_queue *
+osq_wait_next(struct optimistic_spin_queue **lock,
+             struct optimistic_spin_queue *node,
+             struct optimistic_spin_queue *prev)
+{
+       struct optimistic_spin_queue *next = NULL;
+
+       for (;;) {
+               if (*lock == node && cmpxchg(lock, node, prev) == node) {
+                       /*
+                        * We were the last queued, we moved @lock back. @prev
+                        * will now observe @lock and will complete its
+                        * unlock()/unqueue().
+                        */
+                       break;
+               }
+
+               /*
+                * We must xchg() the @node->next value, because if we were to
+                * leave it in, a concurrent unlock()/unqueue() from
+                * @node->next might complete Step-A and think its @prev is
+                * still valid.
+                *
+                * If the concurrent unlock()/unqueue() wins the race, we'll
+                * wait for either @lock to point to us, through its Step-B, or
+                * wait for a new @node->next from its Step-C.
+                */
+               if (node->next) {
+                       next = xchg(&node->next, NULL);
+                       if (next)
+                               break;
+               }
+
+               arch_mutex_cpu_relax();
+       }
+
+       return next;
+}
+
+bool osq_lock(struct optimistic_spin_queue **lock)
+{
+       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
+       struct optimistic_spin_queue *prev, *next;
+
+       node->locked = 0;
+       node->next = NULL;
+
+       node->prev = prev = xchg(lock, node);
+       if (likely(prev == NULL))
+               return true;
+
+       ACCESS_ONCE(prev->next) = node;
+
+       /*
+        * Normally @prev is untouchable after the above store; because at that
+        * moment unlock can proceed and wipe the node element from stack.
+        *
+        * However, since our nodes are static per-cpu storage, we're
+        * guaranteed their existence -- this allows us to apply
+        * cmpxchg in an attempt to undo our queueing.
+        */
+
+       while (!smp_load_acquire(&node->locked)) {
+               /*
+                * If we need to reschedule bail... so we can block.
+                */
+               if (need_resched())
+                       goto unqueue;
+
+               arch_mutex_cpu_relax();
+       }
+       return true;
+
+unqueue:
+       /*
+        * Step - A  -- stabilize @prev
+        *
+        * Undo our @prev->next assignment; this will make @prev's
+        * unlock()/unqueue() wait for a next pointer since @lock points to us
+        * (or later).
+        */
+
+       for (;;) {
+               if (prev->next == node &&
+                   cmpxchg(&prev->next, node, NULL) == node)
+                       break;
+
+               /*
+                * We can only fail the cmpxchg() racing against an unlock(),
+                * in which case we should observe @node->locked becomming
+                * true.
+                */
+               if (smp_load_acquire(&node->locked))
+                       return true;
+
+               arch_mutex_cpu_relax();
+
+               /*
+                * Or we race against a concurrent unqueue()'s step-B, in which
+                * case its step-C will write us a new @node->prev pointer.
+                */
+               prev = ACCESS_ONCE(node->prev);
+       }
+
+       /*
+        * Step - B -- stabilize @next
+        *
+        * Similar to unlock(), wait for @node->next or move @lock from @node
+        * back to @prev.
+        */
+
+       next = osq_wait_next(lock, node, prev);
+       if (!next)
+               return false;
+
+       /*
+        * Step - C -- unlink
+        *
+        * @prev is stable because its still waiting for a new @prev->next
+        * pointer, @next is stable because our @node->next pointer is NULL and
+        * it will wait in Step-A.
+        */
+
+       ACCESS_ONCE(next->prev) = prev;
+       ACCESS_ONCE(prev->next) = next;
+
+       return false;
+}
+
+void osq_unlock(struct optimistic_spin_queue **lock)
+{
+       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
+       struct optimistic_spin_queue *next;
+
+       /*
+        * Fast path for the uncontended case.
+        */
+       if (likely(cmpxchg(lock, node, NULL) == node))
+               return;
+
+       /*
+        * Second most likely case.
+        */
+       next = xchg(&node->next, NULL);
+       if (next) {
+               ACCESS_ONCE(next->locked) = 1;
+               return;
+       }
+
+       next = osq_wait_next(lock, node, NULL);
+       if (next)
+               ACCESS_ONCE(next->locked) = 1;
+}
+
+#endif
+
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
new file mode 100644 (file)
index 0000000..a2dbac4
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * MCS lock defines
+ *
+ * This file contains the main data structure and API definitions of MCS lock.
+ *
+ * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
+ * with the desirable properties of being fair, and with each cpu trying
+ * to acquire the lock spinning on a local variable.
+ * It avoids expensive cache bouncings that common test-and-set spin-lock
+ * implementations incur.
+ */
+#ifndef __LINUX_MCS_SPINLOCK_H
+#define __LINUX_MCS_SPINLOCK_H
+
+#include <asm/mcs_spinlock.h>
+
+struct mcs_spinlock {
+       struct mcs_spinlock *next;
+       int locked; /* 1 if lock acquired */
+};
+
+#ifndef arch_mcs_spin_lock_contended
+/*
+ * Using smp_load_acquire() provides a memory barrier that ensures
+ * subsequent operations happen after the lock is acquired.
+ */
+#define arch_mcs_spin_lock_contended(l)                                        \
+do {                                                                   \
+       while (!(smp_load_acquire(l)))                                  \
+               arch_mutex_cpu_relax();                                 \
+} while (0)
+#endif
+
+#ifndef arch_mcs_spin_unlock_contended
+/*
+ * smp_store_release() provides a memory barrier to ensure all
+ * operations in the critical section has been completed before
+ * unlocking.
+ */
+#define arch_mcs_spin_unlock_contended(l)                              \
+       smp_store_release((l), 1)
+#endif
+
+/*
+ * Note: the smp_load_acquire/smp_store_release pair is not
+ * sufficient to form a full memory barrier across
+ * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
+ * For applications that need a full barrier across multiple cpus
+ * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
+ * used after mcs_lock.
+ */
+
+/*
+ * In order to acquire the lock, the caller should declare a local node and
+ * pass a reference of the node to this function in addition to the lock.
+ * If the lock has already been acquired, then this will proceed to spin
+ * on this node->locked until the previous lock holder sets the node->locked
+ * in mcs_spin_unlock().
+ *
+ * We don't inline mcs_spin_lock() so that perf can correctly account for the
+ * time spent in this lock function.
+ */
+static inline
+void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+{
+       struct mcs_spinlock *prev;
+
+       /* Init node */
+       node->locked = 0;
+       node->next   = NULL;
+
+       prev = xchg(lock, node);
+       if (likely(prev == NULL)) {
+               /*
+                * Lock acquired, don't need to set node->locked to 1. Threads
+                * only spin on its own node->locked value for lock acquisition.
+                * However, since this thread can immediately acquire the lock
+                * and does not proceed to spin on its own node->locked, this
+                * value won't be used. If a debug mode is needed to
+                * audit lock status, then set node->locked value here.
+                */
+               return;
+       }
+       ACCESS_ONCE(prev->next) = node;
+
+       /* Wait until the lock holder passes the lock down. */
+       arch_mcs_spin_lock_contended(&node->locked);
+}
+
+/*
+ * Releases the lock. The caller should pass in the corresponding node that
+ * was used to acquire the lock.
+ */
+static inline
+void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
+{
+       struct mcs_spinlock *next = ACCESS_ONCE(node->next);
+
+       if (likely(!next)) {
+               /*
+                * Release the lock by setting it to NULL
+                */
+               if (likely(cmpxchg(lock, node, NULL) == node))
+                       return;
+               /* Wait until the next pointer is set */
+               while (!(next = ACCESS_ONCE(node->next)))
+                       arch_mutex_cpu_relax();
+       }
+
+       /* Pass lock to next waiter. */
+       arch_mcs_spin_unlock_contended(&next->locked);
+}
+
+/*
+ * Cancellable version of the MCS lock above.
+ *
+ * Intended for adaptive spinning of sleeping locks:
+ * mutex_lock()/rwsem_down_{read,write}() etc.
+ */
+
+struct optimistic_spin_queue {
+       struct optimistic_spin_queue *next, *prev;
+       int locked; /* 1 if lock acquired */
+};
+
+extern bool osq_lock(struct optimistic_spin_queue **lock);
+extern void osq_unlock(struct optimistic_spin_queue **lock);
+
+#endif /* __LINUX_MCS_SPINLOCK_H */
index faf6f5b53e775eefbc7ee4ea734ff0998be68d01..e1191c996c59cbe3b3d2aecb7b54fd570cef17b6 100644 (file)
@@ -83,6 +83,12 @@ void debug_mutex_unlock(struct mutex *lock)
 
        DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
        mutex_clear_owner(lock);
+
+       /*
+        * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
+        * mutexes so that we can do it here after we've verified state.
+        */
+       atomic_set(&lock->count, 1);
 }
 
 void debug_mutex_init(struct mutex *lock, const char *name,
index 4dd6e4c219de9316593b61daae8e17cf8dc5d874..bc73d33c6760e174fd1bb2c8319c0faf5abc221f 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/debug_locks.h>
+#include "mcs_spinlock.h"
 
 /*
  * In the DEBUG case we are using the "NULL fastpath" for mutexes,
 #ifdef CONFIG_DEBUG_MUTEXES
 # include "mutex-debug.h"
 # include <asm-generic/mutex-null.h>
+/*
+ * Must be 0 for the debug case so we do not do the unlock outside of the
+ * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
+ * case.
+ */
+# undef __mutex_slowpath_needs_to_unlock
+# define  __mutex_slowpath_needs_to_unlock()   0
 #else
 # include "mutex.h"
 # include <asm/mutex.h>
@@ -52,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
        INIT_LIST_HEAD(&lock->wait_list);
        mutex_clear_owner(lock);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       lock->spin_mlock = NULL;
+       lock->osq = NULL;
 #endif
 
        debug_mutex_init(lock, name, key);
@@ -67,8 +75,7 @@ EXPORT_SYMBOL(__mutex_init);
  * We also put the fastpath first in the kernel image, to make sure the
  * branch is predicted by the CPU as default-untaken.
  */
-static __used noinline void __sched
-__mutex_lock_slowpath(atomic_t *lock_count);
+__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
 
 /**
  * mutex_lock - acquire the mutex
@@ -111,54 +118,7 @@ EXPORT_SYMBOL(mutex_lock);
  * more or less simultaneously, the spinners need to acquire a MCS lock
  * first before spinning on the owner field.
  *
- * We don't inline mspin_lock() so that perf can correctly account for the
- * time spent in this lock function.
  */
-struct mspin_node {
-       struct mspin_node *next ;
-       int               locked;       /* 1 if lock acquired */
-};
-#define        MLOCK(mutex)    ((struct mspin_node **)&((mutex)->spin_mlock))
-
-static noinline
-void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
-{
-       struct mspin_node *prev;
-
-       /* Init node */
-       node->locked = 0;
-       node->next   = NULL;
-
-       prev = xchg(lock, node);
-       if (likely(prev == NULL)) {
-               /* Lock acquired */
-               node->locked = 1;
-               return;
-       }
-       ACCESS_ONCE(prev->next) = node;
-       smp_wmb();
-       /* Wait until the lock holder passes the lock down */
-       while (!ACCESS_ONCE(node->locked))
-               arch_mutex_cpu_relax();
-}
-
-static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
-{
-       struct mspin_node *next = ACCESS_ONCE(node->next);
-
-       if (likely(!next)) {
-               /*
-                * Release the lock by setting it to NULL
-                */
-               if (cmpxchg(lock, node, NULL) == node)
-                       return;
-               /* Wait until the next pointer is set */
-               while (!(next = ACCESS_ONCE(node->next)))
-                       arch_mutex_cpu_relax();
-       }
-       ACCESS_ONCE(next->locked) = 1;
-       smp_wmb();
-}
 
 /*
  * Mutex spinning code migrated from kernel/sched/core.c
@@ -212,6 +172,9 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
        struct task_struct *owner;
        int retval = 1;
 
+       if (need_resched())
+               return 0;
+
        rcu_read_lock();
        owner = ACCESS_ONCE(lock->owner);
        if (owner)
@@ -225,7 +188,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
 }
 #endif
 
-static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
+__visible __used noinline
+void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
 
 /**
  * mutex_unlock - release the mutex
@@ -446,9 +410,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        if (!mutex_can_spin_on_owner(lock))
                goto slowpath;
 
+       if (!osq_lock(&lock->osq))
+               goto slowpath;
+
        for (;;) {
                struct task_struct *owner;
-               struct mspin_node  node;
 
                if (use_ww_ctx && ww_ctx->acquired > 0) {
                        struct ww_mutex *ww;
@@ -463,19 +429,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                         * performed the optimistic spinning cannot be done.
                         */
                        if (ACCESS_ONCE(ww->ctx))
-                               goto slowpath;
+                               break;
                }
 
                /*
                 * If there's an owner, wait for it to either
                 * release the lock or go to sleep.
                 */
-               mspin_lock(MLOCK(lock), &node);
                owner = ACCESS_ONCE(lock->owner);
-               if (owner && !mutex_spin_on_owner(lock, owner)) {
-                       mspin_unlock(MLOCK(lock), &node);
-                       goto slowpath;
-               }
+               if (owner && !mutex_spin_on_owner(lock, owner))
+                       break;
 
                if ((atomic_read(&lock->count) == 1) &&
                    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
@@ -488,11 +451,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                        }
 
                        mutex_set_owner(lock);
-                       mspin_unlock(MLOCK(lock), &node);
+                       osq_unlock(&lock->osq);
                        preempt_enable();
                        return 0;
                }
-               mspin_unlock(MLOCK(lock), &node);
 
                /*
                 * When there's no owner, we might have preempted between the
@@ -501,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * the owner complete.
                 */
                if (!owner && (need_resched() || rt_task(task)))
-                       goto slowpath;
+                       break;
 
                /*
                 * The cpu_relax() call is a compiler barrier which forces
@@ -511,7 +473,15 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 */
                arch_mutex_cpu_relax();
        }
+       osq_unlock(&lock->osq);
 slowpath:
+       /*
+        * If we fell out of the spin path because of need_resched(),
+        * reschedule now, before we try-lock the mutex. This avoids getting
+        * scheduled out right after we obtained the mutex.
+        */
+       if (need_resched())
+               schedule_preempt_disabled();
 #endif
        spin_lock_mutex(&lock->wait_lock, flags);
 
@@ -717,10 +687,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
        struct mutex *lock = container_of(lock_count, struct mutex, count);
        unsigned long flags;
 
-       spin_lock_mutex(&lock->wait_lock, flags);
-       mutex_release(&lock->dep_map, nested, _RET_IP_);
-       debug_mutex_unlock(lock);
-
        /*
         * some architectures leave the lock unlocked in the fastpath failure
         * case, others need to leave it locked. In the later case we have to
@@ -729,6 +695,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
        if (__mutex_slowpath_needs_to_unlock())
                atomic_set(&lock->count, 1);
 
+       spin_lock_mutex(&lock->wait_lock, flags);
+       mutex_release(&lock->dep_map, nested, _RET_IP_);
+       debug_mutex_unlock(lock);
+
        if (!list_empty(&lock->wait_list)) {
                /* get the first entry from the wait-list: */
                struct mutex_waiter *waiter =
@@ -746,7 +716,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
 /*
  * Release the lock, slowpath:
  */
-static __used noinline void
+__visible void
 __mutex_unlock_slowpath(atomic_t *lock_count)
 {
        __mutex_unlock_common_slowpath(lock_count, 1);
@@ -803,7 +773,7 @@ int __sched mutex_lock_killable(struct mutex *lock)
 }
 EXPORT_SYMBOL(mutex_lock_killable);
 
-static __used noinline void __sched
+__visible void __sched
 __mutex_lock_slowpath(atomic_t *lock_count)
 {
        struct mutex *lock = container_of(lock_count, struct mutex, count);
index 2e960a2bab81a06ae90d7c48edba6a670d16ebaf..aa4dff04b594c2b058ba2193ef44644ff8dd54dc 100644 (file)
@@ -212,6 +212,18 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
        return task_top_pi_waiter(task)->task;
 }
 
+/*
+ * Called by sched_setscheduler() to check whether the priority change
+ * is overruled by a possible priority boosting.
+ */
+int rt_mutex_check_prio(struct task_struct *task, int newprio)
+{
+       if (!task_has_pi_waiters(task))
+               return 0;
+
+       return task_top_pi_waiter(task)->task->prio <= newprio;
+}
+
 /*
  * Adjust the priority of a task, after its pi_waiters got modified.
  *
index 19c5fa95e0b4d7d06587d5fc5feb2ff0295f1efe..1d66e08e897d166cf70bc2f41cdc3c3bbb57cf45 100644 (file)
@@ -143,6 +143,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
 /*
  * wait for the read lock to be granted
  */
+__visible
 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 {
        long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
@@ -190,6 +191,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 /*
  * wait until we successfully acquire the write lock
  */
+__visible
 struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
 {
        long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
@@ -252,6 +254,7 @@ struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
  * handle waking up a waiter on the semaphore
  * - up_read/up_write has decremented the active part of count if we come here
  */
+__visible
 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
@@ -272,6 +275,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
  * - caller incremented waiting part of count and discovered it still negative
  * - just wake up any readers at the front of the queue
  */
+__visible
 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
index d24fcf29cb64c16029857a89dbcdeaa1ef9d724a..8dc7f5e80dd8f75273ef15df03a14615de1579d4 100644 (file)
@@ -1015,7 +1015,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
                buf[l++] = 'C';
        /*
         * TAINT_FORCED_RMMOD: could be added.
-        * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
+        * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
         * apply to modules.
         */
        return l;
@@ -1948,6 +1948,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
 
                switch (sym[i].st_shndx) {
                case SHN_COMMON:
+                       /* Ignore common symbols */
+                       if (!strncmp(name, "__gnu_lto", 9))
+                               break;
+
                        /* We compiled with -fno-common.  These are not
                           supposed to happen.  */
                        pr_debug("Common symbol: %s\n", name);
index 2d5cc4ccff7f4b79f734bf12ea769fb14a2346b2..db4c8b08a50cef986f48f0e537f1b622d4e8b19a 100644 (file)
@@ -309,7 +309,7 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
         * racy then it does not matter what the result of the test
         * is, we re-check the list after having taken the lock anyway:
         */
-       if (rcu_dereference_raw(nh->head)) {
+       if (rcu_access_pointer(nh->head)) {
                down_read(&nh->rwsem);
                ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
                                        nr_calls);
index 6d6300375090e7b2345353174d85ed09f6f35ce4..cca8a913ae7c8d6314fda34a1f3853a364e32cfd 100644 (file)
@@ -199,7 +199,7 @@ struct tnt {
 static const struct tnt tnts[] = {
        { TAINT_PROPRIETARY_MODULE,     'P', 'G' },
        { TAINT_FORCED_MODULE,          'F', ' ' },
-       { TAINT_UNSAFE_SMP,             'S', ' ' },
+       { TAINT_CPU_OUT_OF_SPEC,        'S', ' ' },
        { TAINT_FORCED_RMMOD,           'R', ' ' },
        { TAINT_MACHINE_CHECK,          'M', ' ' },
        { TAINT_BAD_PAGE,               'B', ' ' },
@@ -459,7 +459,7 @@ EXPORT_SYMBOL(warn_slowpath_null);
  * Called when gcc's -fstack-protector feature is used, and
  * gcc detects corruption of the on-stack canary value
  */
-void __stack_chk_fail(void)
+__visible void __stack_chk_fail(void)
 {
        panic("stack-protector: Kernel stack is corrupted in: %p\n",
                __builtin_return_address(0));
index 37170d4dd9a61d1ad1171d106c61af8d59393f85..f4f2073711d34891645be8dfc997645342c043e4 100644 (file)
@@ -973,16 +973,20 @@ static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
 static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
                            const char *buf, size_t n)
 {
-       unsigned int maj, min;
        dev_t res;
-       int ret = -EINVAL;
+       int len = n;
+       char *name;
 
-       if (sscanf(buf, "%u:%u", &maj, &min) != 2)
-               goto out;
+       if (len && buf[len-1] == '\n')
+               len--;
+       name = kstrndup(buf, len, GFP_KERNEL);
+       if (!name)
+               return -ENOMEM;
 
-       res = MKDEV(maj,min);
-       if (maj != MAJOR(res) || min != MINOR(res))
-               goto out;
+       res = name_to_dev_t(name);
+       kfree(name);
+       if (!res)
+               return -EINVAL;
 
        lock_system_sleep();
        swsusp_resume_device = res;
@@ -990,9 +994,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
        printk(KERN_INFO "PM: Starting manual resume from disk\n");
        noresume = 0;
        software_resume();
-       ret = n;
- out:
-       return ret;
+       return n;
 }
 
 power_attr(resume);
index 1d1bf630e6e900d5354413a6176b60b6a948a979..6271bc4073ef24ffd68f38d8348340cc9ff43eb3 100644 (file)
@@ -282,8 +282,8 @@ struct kobject *power_kobj;
  *     state - control system power state.
  *
  *     show() returns what states are supported, which is hard-coded to
- *     'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
- *     'disk' (Suspend-to-Disk).
+ *     'freeze' (Low-Power Idle), 'standby' (Power-On Suspend),
+ *     'mem' (Suspend-to-RAM), and 'disk' (Suspend-to-Disk).
  *
  *     store() accepts one of those strings, translates it into the
  *     proper enumerated value, and initiates a suspend transition.
index 7d4b7ffb3c1d4371f19e81fe83c67ea88ac83759..1ca753106557f4d7de3f6586f86fe8f33593851b 100644 (file)
@@ -49,6 +49,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
  */
 #define SPARE_PAGES    ((1024 * 1024) >> PAGE_SHIFT)
 
+asmlinkage int swsusp_save(void);
+
 /* kernel/power/hibernate.c */
 extern bool freezer_test_done;
 
index 8dff9b48075af3f61eeab3531b3ad706b88718e4..884b77058864cd3596dd6f67d5d8c1dda77dedaf 100644 (file)
@@ -66,6 +66,7 @@ static struct pm_qos_constraints cpu_dma_constraints = {
        .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
        .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
        .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+       .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
        .type = PM_QOS_MIN,
        .notifiers = &cpu_dma_lat_notifier,
 };
@@ -79,6 +80,7 @@ static struct pm_qos_constraints network_lat_constraints = {
        .list = PLIST_HEAD_INIT(network_lat_constraints.list),
        .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
        .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+       .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
        .type = PM_QOS_MIN,
        .notifiers = &network_lat_notifier,
 };
@@ -93,6 +95,7 @@ static struct pm_qos_constraints network_tput_constraints = {
        .list = PLIST_HEAD_INIT(network_tput_constraints.list),
        .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
        .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+       .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
        .type = PM_QOS_MAX,
        .notifiers = &network_throughput_notifier,
 };
@@ -128,7 +131,7 @@ static const struct file_operations pm_qos_power_fops = {
 static inline int pm_qos_get_value(struct pm_qos_constraints *c)
 {
        if (plist_head_empty(&c->list))
-               return c->default_value;
+               return c->no_constraint_value;
 
        switch (c->type) {
        case PM_QOS_MIN:
@@ -170,6 +173,7 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
 {
        unsigned long flags;
        int prev_value, curr_value, new_value;
+       int ret;
 
        spin_lock_irqsave(&pm_qos_lock, flags);
        prev_value = pm_qos_get_value(c);
@@ -205,13 +209,15 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
 
        trace_pm_qos_update_target(action, prev_value, curr_value);
        if (prev_value != curr_value) {
-               blocking_notifier_call_chain(c->notifiers,
-                                            (unsigned long)curr_value,
-                                            NULL);
-               return 1;
+               ret = 1;
+               if (c->notifiers)
+                       blocking_notifier_call_chain(c->notifiers,
+                                                    (unsigned long)curr_value,
+                                                    NULL);
        } else {
-               return 0;
+               ret = 0;
        }
+       return ret;
 }
 
 /**
index d9f61a145802df2393f6041e935d00ea3f15ff8f..149e745eaa528800cd64a07bd8aacdbe6dd63ea0 100644 (file)
@@ -1268,7 +1268,7 @@ static void free_unnecessary_pages(void)
  * [number of saveable pages] - [number of pages that can be freed in theory]
  *
  * where the second term is the sum of (1) reclaimable slab pages, (2) active
- * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
+ * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
  * minus mapped file pages.
  */
 static unsigned long minimum_image_size(unsigned long saveable)
index 62ee437b5c7ea362de8573dd18fcee1ef6286221..90b3d9366d1a7d51338484b95b9a1084aa2992b8 100644 (file)
@@ -39,7 +39,7 @@ static const struct platform_suspend_ops *suspend_ops;
 
 static bool need_suspend_ops(suspend_state_t state)
 {
-       return !!(state > PM_SUSPEND_FREEZE);
+       return state > PM_SUSPEND_FREEZE;
 }
 
 static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
index 8f50de394d22b30090d1cf62cd3fd3069d451d57..019069c84ff6b6d351b0259f0c5203ebd18ee41d 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/rbtree.h>
 #include <linux/slab.h>
 
+#include "power.h"
+
 static DEFINE_MUTEX(wakelocks_lock);
 
 struct wakelock {
index 1f4bcb3cc21cee5bcfd1b4e13a77eeff2af23a2b..adf98622cb32bdf0b995d25ca0c4477c098e9bcc 100644 (file)
@@ -1180,8 +1180,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
        return ret;
 }
 
-asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
-                                 compat_long_t addr, compat_long_t data)
+COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
+                      compat_long_t, addr, compat_long_t, data)
 {
        struct task_struct *child;
        long ret;
index 01e9ec37a3e39af2d7e5d889a770b099dc1eb473..807ccfbf69b3359fe85b736efd14d7050e5e4990 100644 (file)
@@ -1,5 +1,5 @@
 obj-y += update.o srcu.o
-obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o
+obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 obj-$(CONFIG_TREE_RCU) += tree.o
 obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o
 obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
index 79c3877e9c5b83e089ef319a2e72a9aaddde6fa3..bfda2726ca454112569433a85a9467a20f5d7a45 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2011
  *
@@ -23,6 +23,7 @@
 #ifndef __LINUX_RCU_H
 #define __LINUX_RCU_H
 
+#include <trace/events/rcu.h>
 #ifdef CONFIG_RCU_TRACE
 #define RCU_TRACE(stmt) stmt
 #else /* #ifdef CONFIG_RCU_TRACE */
@@ -116,8 +117,6 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
        }
 }
 
-extern int rcu_expedited;
-
 #ifdef CONFIG_RCU_STALL_COMMON
 
 extern int rcu_cpu_stall_suppress;
similarity index 58%
rename from kernel/rcu/torture.c
rename to kernel/rcu/rcutorture.c
index 732f8ae3086a163c22329ecda6a77bf9aa018b79..bd30bc61bc05c273159541f260d047fa4747bc82 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright (C) IBM Corporation, 2005, 2006
  *
 #include <linux/slab.h>
 #include <linux/trace_clock.h>
 #include <asm/byteorder.h>
+#include <linux/torture.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
 
-MODULE_ALIAS("rcutorture");
-#ifdef MODULE_PARAM_PREFIX
-#undef MODULE_PARAM_PREFIX
-#endif
-#define MODULE_PARAM_PREFIX "rcutorture."
-
-static int fqs_duration;
-module_param(fqs_duration, int, 0444);
-MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable");
-static int fqs_holdoff;
-module_param(fqs_holdoff, int, 0444);
-MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
-static int fqs_stutter = 3;
-module_param(fqs_stutter, int, 0444);
-MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
-static bool gp_exp;
-module_param(gp_exp, bool, 0444);
-MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives");
-static bool gp_normal;
-module_param(gp_normal, bool, 0444);
-MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives");
-static int irqreader = 1;
-module_param(irqreader, int, 0444);
-MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
-static int n_barrier_cbs;
-module_param(n_barrier_cbs, int, 0444);
-MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing");
-static int nfakewriters = 4;
-module_param(nfakewriters, int, 0444);
-MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-static int nreaders = -1;
-module_param(nreaders, int, 0444);
-MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-static int object_debug;
-module_param(object_debug, int, 0444);
-MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing");
-static int onoff_holdoff;
-module_param(onoff_holdoff, int, 0444);
-MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
-static int onoff_interval;
-module_param(onoff_interval, int, 0444);
-MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
-static int shuffle_interval = 3;
-module_param(shuffle_interval, int, 0444);
-MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-static int shutdown_secs;
-module_param(shutdown_secs, int, 0444);
-MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable.");
-static int stall_cpu;
-module_param(stall_cpu, int, 0444);
-MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
-static int stall_cpu_holdoff = 10;
-module_param(stall_cpu_holdoff, int, 0444);
-MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
-static int stat_interval = 60;
-module_param(stat_interval, int, 0644);
-MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-static int stutter = 5;
-module_param(stutter, int, 0444);
-MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
-static int test_boost = 1;
-module_param(test_boost, int, 0444);
-MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
-static int test_boost_duration = 4;
-module_param(test_boost_duration, int, 0444);
-MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
-static int test_boost_interval = 7;
-module_param(test_boost_interval, int, 0444);
-MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
-static bool test_no_idle_hz = true;
-module_param(test_no_idle_hz, bool, 0444);
-MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
+
+torture_param(int, fqs_duration, 0,
+             "Duration of fqs bursts (us), 0 to disable");
+torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
+torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
+torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
+torture_param(bool, gp_normal, false,
+            "Use normal (non-expedited) GP wait primitives");
+torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
+torture_param(int, n_barrier_cbs, 0,
+            "# of callbacks/kthreads for barrier testing");
+torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
+torture_param(int, nreaders, -1, "Number of RCU reader threads");
+torture_param(int, object_debug, 0,
+            "Enable debug-object double call_rcu() testing");
+torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
+torture_param(int, onoff_interval, 0,
+            "Time between CPU hotplugs (s), 0=disable");
+torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
+torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
+torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
+torture_param(int, stall_cpu_holdoff, 10,
+            "Time to wait before starting stall (s).");
+torture_param(int, stat_interval, 60,
+            "Number of seconds between stats printk()s");
+torture_param(int, stutter, 5, "Number of seconds to run/halt test");
+torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
+torture_param(int, test_boost_duration, 4,
+            "Duration of each boost test, seconds.");
+torture_param(int, test_boost_interval, 7,
+            "Interval between boost tests, seconds.");
+torture_param(bool, test_no_idle_hz, true,
+            "Test support for tickless idle CPUs");
+torture_param(bool, verbose, true,
+            "Enable verbose debugging printk()s");
+
 static char *torture_type = "rcu";
 module_param(torture_type, charp, 0444);
 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
-static bool verbose;
-module_param(verbose, bool, 0444);
-MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-
-#define TORTURE_FLAG "-torture:"
-#define PRINTK_STRING(s) \
-       do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
-#define VERBOSE_PRINTK_STRING(s) \
-       do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0)
-#define VERBOSE_PRINTK_ERRSTRING(s) \
-       do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
 
 static int nrealreaders;
 static struct task_struct *writer_task;
 static struct task_struct **fakewriter_tasks;
 static struct task_struct **reader_tasks;
 static struct task_struct *stats_task;
-static struct task_struct *shuffler_task;
-static struct task_struct *stutter_task;
 static struct task_struct *fqs_task;
 static struct task_struct *boost_tasks[NR_CPUS];
-static struct task_struct *shutdown_task;
-#ifdef CONFIG_HOTPLUG_CPU
-static struct task_struct *onoff_task;
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static struct task_struct *stall_task;
 static struct task_struct **barrier_cbs_tasks;
 static struct task_struct *barrier_task;
@@ -170,10 +118,10 @@ static struct rcu_torture __rcu *rcu_torture_current;
 static unsigned long rcu_torture_current_version;
 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 static DEFINE_SPINLOCK(rcu_torture_lock);
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
-       { 0 };
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
-       { 0 };
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
+                     rcu_torture_count) = { 0 };
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
+                     rcu_torture_batch) = { 0 };
 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 static atomic_t n_rcu_torture_alloc;
 static atomic_t n_rcu_torture_alloc_fail;
@@ -186,22 +134,9 @@ static long n_rcu_torture_boost_rterror;
 static long n_rcu_torture_boost_failure;
 static long n_rcu_torture_boosts;
 static long n_rcu_torture_timers;
-static long n_offline_attempts;
-static long n_offline_successes;
-static unsigned long sum_offline;
-static int min_offline = -1;
-static int max_offline;
-static long n_online_attempts;
-static long n_online_successes;
-static unsigned long sum_online;
-static int min_online = -1;
-static int max_online;
 static long n_barrier_attempts;
 static long n_barrier_successes;
 static struct list_head rcu_torture_removed;
-static cpumask_var_t shuffle_tmp_mask;
-
-static int stutter_pause_test;
 
 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
 #define RCUTORTURE_RUNNABLE_INIT 1
@@ -232,7 +167,6 @@ static u64 notrace rcu_trace_clock_local(void)
 }
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
-static unsigned long shutdown_time;    /* jiffies to system shutdown. */
 static unsigned long boost_starttime;  /* jiffies of next boost test start. */
 DEFINE_MUTEX(boost_mutex);             /* protect setting boost_starttime */
                                        /*  and boost task create/destroy. */
@@ -242,51 +176,6 @@ static atomic_t barrier_cbs_invoked;       /* Barrier callbacks invoked. */
 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
 
-/* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
-
-#define FULLSTOP_DONTSTOP 0    /* Normal operation. */
-#define FULLSTOP_SHUTDOWN 1    /* System shutdown with rcutorture running. */
-#define FULLSTOP_RMMOD    2    /* Normal rmmod of rcutorture. */
-static int fullstop = FULLSTOP_RMMOD;
-/*
- * Protect fullstop transitions and spawning of kthreads.
- */
-static DEFINE_MUTEX(fullstop_mutex);
-
-/* Forward reference. */
-static void rcu_torture_cleanup(void);
-
-/*
- * Detect and respond to a system shutdown.
- */
-static int
-rcutorture_shutdown_notify(struct notifier_block *unused1,
-                          unsigned long unused2, void *unused3)
-{
-       mutex_lock(&fullstop_mutex);
-       if (fullstop == FULLSTOP_DONTSTOP)
-               fullstop = FULLSTOP_SHUTDOWN;
-       else
-               pr_warn(/* but going down anyway, so... */
-                      "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
-       mutex_unlock(&fullstop_mutex);
-       return NOTIFY_DONE;
-}
-
-/*
- * Absorb kthreads into a kernel function that won't return, so that
- * they won't ever access module text or data again.
- */
-static void rcutorture_shutdown_absorb(const char *title)
-{
-       if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
-               pr_notice(
-                      "rcutorture thread %s parking due to system shutdown\n",
-                      title);
-               schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
-       }
-}
-
 /*
  * Allocate an element from the rcu_tortures pool.
  */
@@ -320,44 +209,6 @@ rcu_torture_free(struct rcu_torture *p)
        spin_unlock_bh(&rcu_torture_lock);
 }
 
-struct rcu_random_state {
-       unsigned long rrs_state;
-       long rrs_count;
-};
-
-#define RCU_RANDOM_MULT 39916801  /* prime */
-#define RCU_RANDOM_ADD 479001701 /* prime */
-#define RCU_RANDOM_REFRESH 10000
-
-#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
-
-/*
- * Crude but fast random-number generator.  Uses a linear congruential
- * generator, with occasional help from cpu_clock().
- */
-static unsigned long
-rcu_random(struct rcu_random_state *rrsp)
-{
-       if (--rrsp->rrs_count < 0) {
-               rrsp->rrs_state += (unsigned long)local_clock();
-               rrsp->rrs_count = RCU_RANDOM_REFRESH;
-       }
-       rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
-       return swahw32(rrsp->rrs_state);
-}
-
-static void
-rcu_stutter_wait(const char *title)
-{
-       while (stutter_pause_test || !rcutorture_runnable) {
-               if (rcutorture_runnable)
-                       schedule_timeout_interruptible(1);
-               else
-                       schedule_timeout_interruptible(round_jiffies_relative(HZ));
-               rcutorture_shutdown_absorb(title);
-       }
-}
-
 /*
  * Operations vector for selecting different types of tests.
  */
@@ -365,7 +216,7 @@ rcu_stutter_wait(const char *title)
 struct rcu_torture_ops {
        void (*init)(void);
        int (*readlock)(void);
-       void (*read_delay)(struct rcu_random_state *rrsp);
+       void (*read_delay)(struct torture_random_state *rrsp);
        void (*readunlock)(int idx);
        int (*completed)(void);
        void (*deferred_free)(struct rcu_torture *p);
@@ -392,7 +243,7 @@ static int rcu_torture_read_lock(void) __acquires(RCU)
        return 0;
 }
 
-static void rcu_read_delay(struct rcu_random_state *rrsp)
+static void rcu_read_delay(struct torture_random_state *rrsp)
 {
        const unsigned long shortdelay_us = 200;
        const unsigned long longdelay_ms = 50;
@@ -401,12 +252,13 @@ static void rcu_read_delay(struct rcu_random_state *rrsp)
         * period, and we want a long delay occasionally to trigger
         * force_quiescent_state. */
 
-       if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
+       if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
                mdelay(longdelay_ms);
-       if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
+       if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
                udelay(shortdelay_us);
 #ifdef CONFIG_PREEMPT
-       if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
+       if (!preempt_count() &&
+           !(torture_random(rrsp) % (nrealreaders * 20000)))
                preempt_schedule();  /* No QS if preempt_disable() in effect */
 #endif
 }
@@ -427,7 +279,7 @@ rcu_torture_cb(struct rcu_head *p)
        int i;
        struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 
-       if (fullstop != FULLSTOP_DONTSTOP) {
+       if (torture_must_stop_irq()) {
                /* Test is ending, just drop callbacks on the floor. */
                /* The next initialization will pick up the pieces. */
                return;
@@ -519,6 +371,48 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .name           = "rcu_bh"
 };
 
+/*
+ * Don't even think about trying any of these in real life!!!
+ * The names includes "busted", and they really means it!
+ * The only purpose of these functions is to provide a buggy RCU
+ * implementation to make sure that rcutorture correctly emits
+ * buggy-RCU error messages.
+ */
+static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
+{
+       /* This is a deliberate bug for testing purposes only! */
+       rcu_torture_cb(&p->rtort_rcu);
+}
+
+static void synchronize_rcu_busted(void)
+{
+       /* This is a deliberate bug for testing purposes only! */
+}
+
+static void
+call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
+{
+       /* This is a deliberate bug for testing purposes only! */
+       func(head);
+}
+
+static struct rcu_torture_ops rcu_busted_ops = {
+       .init           = rcu_sync_torture_init,
+       .readlock       = rcu_torture_read_lock,
+       .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
+       .readunlock     = rcu_torture_read_unlock,
+       .completed      = rcu_no_completed,
+       .deferred_free  = rcu_busted_torture_deferred_free,
+       .sync           = synchronize_rcu_busted,
+       .exp_sync       = synchronize_rcu_busted,
+       .call           = call_rcu_busted,
+       .cb_barrier     = NULL,
+       .fqs            = NULL,
+       .stats          = NULL,
+       .irq_capable    = 1,
+       .name           = "rcu_busted"
+};
+
 /*
  * Definitions for srcu torture testing.
  */
@@ -530,7 +424,7 @@ static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
        return srcu_read_lock(&srcu_ctl);
 }
 
-static void srcu_read_delay(struct rcu_random_state *rrsp)
+static void srcu_read_delay(struct torture_random_state *rrsp)
 {
        long delay;
        const long uspertick = 1000000 / HZ;
@@ -538,7 +432,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp)
 
        /* We want there to be long-running readers, but not all the time. */
 
-       delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
+       delay = torture_random(rrsp) %
+               (nrealreaders * 2 * longdelay * uspertick);
        if (!delay)
                schedule_timeout_interruptible(longdelay);
        else
@@ -677,12 +572,12 @@ static int rcu_torture_boost(void *arg)
        struct rcu_boost_inflight rbi = { .inflight = 0 };
        struct sched_param sp;
 
-       VERBOSE_PRINTK_STRING("rcu_torture_boost started");
+       VERBOSE_TOROUT_STRING("rcu_torture_boost started");
 
        /* Set real-time priority. */
        sp.sched_priority = 1;
        if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
-               VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
+               VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
                n_rcu_torture_boost_rterror++;
        }
 
@@ -693,9 +588,8 @@ static int rcu_torture_boost(void *arg)
                oldstarttime = boost_starttime;
                while (ULONG_CMP_LT(jiffies, oldstarttime)) {
                        schedule_timeout_interruptible(oldstarttime - jiffies);
-                       rcu_stutter_wait("rcu_torture_boost");
-                       if (kthread_should_stop() ||
-                           fullstop != FULLSTOP_DONTSTOP)
+                       stutter_wait("rcu_torture_boost");
+                       if (torture_must_stop())
                                goto checkwait;
                }
 
@@ -710,15 +604,14 @@ static int rcu_torture_boost(void *arg)
                                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
                                if (jiffies - call_rcu_time >
                                         test_boost_duration * HZ - HZ / 2) {
-                                       VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
+                                       VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
                                        n_rcu_torture_boost_failure++;
                                }
                                call_rcu_time = jiffies;
                        }
                        cond_resched();
-                       rcu_stutter_wait("rcu_torture_boost");
-                       if (kthread_should_stop() ||
-                           fullstop != FULLSTOP_DONTSTOP)
+                       stutter_wait("rcu_torture_boost");
+                       if (torture_must_stop())
                                goto checkwait;
                }
 
@@ -742,16 +635,17 @@ static int rcu_torture_boost(void *arg)
                }
 
                /* Go do the stutter. */
-checkwait:     rcu_stutter_wait("rcu_torture_boost");
-       } while (!kthread_should_stop() && fullstop  == FULLSTOP_DONTSTOP);
+checkwait:     stutter_wait("rcu_torture_boost");
+       } while (!torture_must_stop());
 
        /* Clean up and exit. */
-       VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_boost");
-       while (!kthread_should_stop() || rbi.inflight)
+       while (!kthread_should_stop() || rbi.inflight) {
+               torture_shutdown_absorb("rcu_torture_boost");
                schedule_timeout_uninterruptible(1);
+       }
        smp_mb(); /* order accesses to ->inflight before stack-frame death. */
        destroy_rcu_head_on_stack(&rbi.rcu);
+       torture_kthread_stopping("rcu_torture_boost");
        return 0;
 }
 
@@ -766,7 +660,7 @@ rcu_torture_fqs(void *arg)
        unsigned long fqs_resume_time;
        int fqs_burst_remaining;
 
-       VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
+       VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
        do {
                fqs_resume_time = jiffies + fqs_stutter * HZ;
                while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
@@ -780,12 +674,9 @@ rcu_torture_fqs(void *arg)
                        udelay(fqs_holdoff);
                        fqs_burst_remaining -= fqs_holdoff;
                }
-               rcu_stutter_wait("rcu_torture_fqs");
-       } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
-       VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_fqs");
-       while (!kthread_should_stop())
-               schedule_timeout_uninterruptible(1);
+               stutter_wait("rcu_torture_fqs");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("rcu_torture_fqs");
        return 0;
 }
 
@@ -802,10 +693,10 @@ rcu_torture_writer(void *arg)
        struct rcu_torture *rp;
        struct rcu_torture *rp1;
        struct rcu_torture *old_rp;
-       static DEFINE_RCU_RANDOM(rand);
+       static DEFINE_TORTURE_RANDOM(rand);
 
-       VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
-       set_user_nice(current, 19);
+       VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
+       set_user_nice(current, MAX_NICE);
 
        do {
                schedule_timeout_uninterruptible(1);
@@ -813,7 +704,7 @@ rcu_torture_writer(void *arg)
                if (rp == NULL)
                        continue;
                rp->rtort_pipe_count = 0;
-               udelay(rcu_random(&rand) & 0x3ff);
+               udelay(torture_random(&rand) & 0x3ff);
                old_rp = rcu_dereference_check(rcu_torture_current,
                                               current == writer_task);
                rp->rtort_mbtest = 1;
@@ -826,7 +717,7 @@ rcu_torture_writer(void *arg)
                        atomic_inc(&rcu_torture_wcount[i]);
                        old_rp->rtort_pipe_count++;
                        if (gp_normal == gp_exp)
-                               exp = !!(rcu_random(&rand) & 0x80);
+                               exp = !!(torture_random(&rand) & 0x80);
                        else
                                exp = gp_exp;
                        if (!exp) {
@@ -852,12 +743,9 @@ rcu_torture_writer(void *arg)
                        }
                }
                rcutorture_record_progress(++rcu_torture_current_version);
-               rcu_stutter_wait("rcu_torture_writer");
-       } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
-       VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_writer");
-       while (!kthread_should_stop())
-               schedule_timeout_uninterruptible(1);
+               stutter_wait("rcu_torture_writer");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("rcu_torture_writer");
        return 0;
 }
 
@@ -868,19 +756,19 @@ rcu_torture_writer(void *arg)
 static int
 rcu_torture_fakewriter(void *arg)
 {
-       DEFINE_RCU_RANDOM(rand);
+       DEFINE_TORTURE_RANDOM(rand);
 
-       VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
-       set_user_nice(current, 19);
+       VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
+       set_user_nice(current, MAX_NICE);
 
        do {
-               schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
-               udelay(rcu_random(&rand) & 0x3ff);
+               schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
+               udelay(torture_random(&rand) & 0x3ff);
                if (cur_ops->cb_barrier != NULL &&
-                   rcu_random(&rand) % (nfakewriters * 8) == 0) {
+                   torture_random(&rand) % (nfakewriters * 8) == 0) {
                        cur_ops->cb_barrier();
                } else if (gp_normal == gp_exp) {
-                       if (rcu_random(&rand) & 0x80)
+                       if (torture_random(&rand) & 0x80)
                                cur_ops->sync();
                        else
                                cur_ops->exp_sync();
@@ -889,13 +777,10 @@ rcu_torture_fakewriter(void *arg)
                } else {
                        cur_ops->exp_sync();
                }
-               rcu_stutter_wait("rcu_torture_fakewriter");
-       } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
+               stutter_wait("rcu_torture_fakewriter");
+       } while (!torture_must_stop());
 
-       VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_fakewriter");
-       while (!kthread_should_stop())
-               schedule_timeout_uninterruptible(1);
+       torture_kthread_stopping("rcu_torture_fakewriter");
        return 0;
 }
 
@@ -921,7 +806,7 @@ static void rcu_torture_timer(unsigned long unused)
        int idx;
        int completed;
        int completed_end;
-       static DEFINE_RCU_RANDOM(rand);
+       static DEFINE_TORTURE_RANDOM(rand);
        static DEFINE_SPINLOCK(rand_lock);
        struct rcu_torture *p;
        int pipe_count;
@@ -980,14 +865,14 @@ rcu_torture_reader(void *arg)
        int completed;
        int completed_end;
        int idx;
-       DEFINE_RCU_RANDOM(rand);
+       DEFINE_TORTURE_RANDOM(rand);
        struct rcu_torture *p;
        int pipe_count;
        struct timer_list t;
        unsigned long long ts;
 
-       VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
-       set_user_nice(current, 19);
+       VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
+       set_user_nice(current, MAX_NICE);
        if (irqreader && cur_ops->irq_capable)
                setup_timer_on_stack(&t, rcu_torture_timer, 0);
 
@@ -1034,14 +919,11 @@ rcu_torture_reader(void *arg)
                preempt_enable();
                cur_ops->readunlock(idx);
                schedule();
-               rcu_stutter_wait("rcu_torture_reader");
-       } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
-       VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_reader");
+               stutter_wait("rcu_torture_reader");
+       } while (!torture_must_stop());
        if (irqreader && cur_ops->irq_capable)
                del_timer_sync(&t);
-       while (!kthread_should_stop())
-               schedule_timeout_uninterruptible(1);
+       torture_kthread_stopping("rcu_torture_reader");
        return 0;
 }
 
@@ -1083,13 +965,7 @@ rcu_torture_printk(char *page)
                       n_rcu_torture_boost_failure,
                       n_rcu_torture_boosts,
                       n_rcu_torture_timers);
-       page += sprintf(page,
-                      "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
-                      n_online_successes, n_online_attempts,
-                      n_offline_successes, n_offline_attempts,
-                      min_online, max_online,
-                      min_offline, max_offline,
-                      sum_online, sum_offline, HZ);
+       page = torture_onoff_stats(page);
        page += sprintf(page, "barrier: %ld/%ld:%ld",
                       n_barrier_successes,
                       n_barrier_attempts,
@@ -1150,123 +1026,17 @@ rcu_torture_stats_print(void)
 /*
  * Periodically prints torture statistics, if periodic statistics printing
  * was specified via the stat_interval module parameter.
- *
- * No need to worry about fullstop here, since this one doesn't reference
- * volatile state or register callbacks.
  */
 static int
 rcu_torture_stats(void *arg)
 {
-       VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
+       VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
        do {
                schedule_timeout_interruptible(stat_interval * HZ);
                rcu_torture_stats_print();
-               rcutorture_shutdown_absorb("rcu_torture_stats");
-       } while (!kthread_should_stop());
-       VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
-       return 0;
-}
-
-static int rcu_idle_cpu;       /* Force all torture tasks off this CPU */
-
-/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
- * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
- */
-static void rcu_torture_shuffle_tasks(void)
-{
-       int i;
-
-       cpumask_setall(shuffle_tmp_mask);
-       get_online_cpus();
-
-       /* No point in shuffling if there is only one online CPU (ex: UP) */
-       if (num_online_cpus() == 1) {
-               put_online_cpus();
-               return;
-       }
-
-       if (rcu_idle_cpu != -1)
-               cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
-
-       set_cpus_allowed_ptr(current, shuffle_tmp_mask);
-
-       if (reader_tasks) {
-               for (i = 0; i < nrealreaders; i++)
-                       if (reader_tasks[i])
-                               set_cpus_allowed_ptr(reader_tasks[i],
-                                                    shuffle_tmp_mask);
-       }
-       if (fakewriter_tasks) {
-               for (i = 0; i < nfakewriters; i++)
-                       if (fakewriter_tasks[i])
-                               set_cpus_allowed_ptr(fakewriter_tasks[i],
-                                                    shuffle_tmp_mask);
-       }
-       if (writer_task)
-               set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
-       if (stats_task)
-               set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
-       if (stutter_task)
-               set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
-       if (fqs_task)
-               set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
-       if (shutdown_task)
-               set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
-#ifdef CONFIG_HOTPLUG_CPU
-       if (onoff_task)
-               set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-       if (stall_task)
-               set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
-       if (barrier_cbs_tasks)
-               for (i = 0; i < n_barrier_cbs; i++)
-                       if (barrier_cbs_tasks[i])
-                               set_cpus_allowed_ptr(barrier_cbs_tasks[i],
-                                                    shuffle_tmp_mask);
-       if (barrier_task)
-               set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
-
-       if (rcu_idle_cpu == -1)
-               rcu_idle_cpu = num_online_cpus() - 1;
-       else
-               rcu_idle_cpu--;
-
-       put_online_cpus();
-}
-
-/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
- * system to become idle at a time and cut off its timer ticks. This is meant
- * to test the support for such tickless idle CPU in RCU.
- */
-static int
-rcu_torture_shuffle(void *arg)
-{
-       VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
-       do {
-               schedule_timeout_interruptible(shuffle_interval * HZ);
-               rcu_torture_shuffle_tasks();
-               rcutorture_shutdown_absorb("rcu_torture_shuffle");
-       } while (!kthread_should_stop());
-       VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
-       return 0;
-}
-
-/* Cause the rcutorture test to "stutter", starting and stopping all
- * threads periodically.
- */
-static int
-rcu_torture_stutter(void *arg)
-{
-       VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
-       do {
-               schedule_timeout_interruptible(stutter * HZ);
-               stutter_pause_test = 1;
-               if (!kthread_should_stop())
-                       schedule_timeout_interruptible(stutter * HZ);
-               stutter_pause_test = 0;
-               rcutorture_shutdown_absorb("rcu_torture_stutter");
-       } while (!kthread_should_stop());
-       VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
+               torture_shutdown_absorb("rcu_torture_stats");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("rcu_torture_stats");
        return 0;
 }
 
@@ -1293,10 +1063,6 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
                 onoff_interval, onoff_holdoff);
 }
 
-static struct notifier_block rcutorture_shutdown_nb = {
-       .notifier_call = rcutorture_shutdown_notify,
-};
-
 static void rcutorture_booster_cleanup(int cpu)
 {
        struct task_struct *t;
@@ -1304,14 +1070,12 @@ static void rcutorture_booster_cleanup(int cpu)
        if (boost_tasks[cpu] == NULL)
                return;
        mutex_lock(&boost_mutex);
-       VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
        t = boost_tasks[cpu];
        boost_tasks[cpu] = NULL;
        mutex_unlock(&boost_mutex);
 
        /* This must be outside of the mutex, otherwise deadlock! */
-       kthread_stop(t);
-       boost_tasks[cpu] = NULL;
+       torture_stop_kthread(rcu_torture_boost, t);
 }
 
 static int rcutorture_booster_init(int cpu)
@@ -1323,13 +1087,13 @@ static int rcutorture_booster_init(int cpu)
 
        /* Don't allow time recalculation while creating a new task. */
        mutex_lock(&boost_mutex);
-       VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
+       VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
        boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
                                                  cpu_to_node(cpu),
                                                  "rcu_torture_boost");
        if (IS_ERR(boost_tasks[cpu])) {
                retval = PTR_ERR(boost_tasks[cpu]);
-               VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
+               VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
                n_rcu_torture_boost_ktrerror++;
                boost_tasks[cpu] = NULL;
                mutex_unlock(&boost_mutex);
@@ -1341,175 +1105,6 @@ static int rcutorture_booster_init(int cpu)
        return 0;
 }
 
-/*
- * Cause the rcutorture test to shutdown the system after the test has
- * run for the time specified by the shutdown_secs module parameter.
- */
-static int
-rcu_torture_shutdown(void *arg)
-{
-       long delta;
-       unsigned long jiffies_snap;
-
-       VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
-       jiffies_snap = ACCESS_ONCE(jiffies);
-       while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
-              !kthread_should_stop()) {
-               delta = shutdown_time - jiffies_snap;
-               if (verbose)
-                       pr_alert("%s" TORTURE_FLAG
-                                "rcu_torture_shutdown task: %lu jiffies remaining\n",
-                                torture_type, delta);
-               schedule_timeout_interruptible(delta);
-               jiffies_snap = ACCESS_ONCE(jiffies);
-       }
-       if (kthread_should_stop()) {
-               VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
-               return 0;
-       }
-
-       /* OK, shut down the system. */
-
-       VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
-       shutdown_task = NULL;   /* Avoid self-kill deadlock. */
-       rcu_torture_cleanup();  /* Get the success/failure message. */
-       kernel_power_off();     /* Shut down the system. */
-       return 0;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Execute random CPU-hotplug operations at the interval specified
- * by the onoff_interval.
- */
-static int
-rcu_torture_onoff(void *arg)
-{
-       int cpu;
-       unsigned long delta;
-       int maxcpu = -1;
-       DEFINE_RCU_RANDOM(rand);
-       int ret;
-       unsigned long starttime;
-
-       VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
-       for_each_online_cpu(cpu)
-               maxcpu = cpu;
-       WARN_ON(maxcpu < 0);
-       if (onoff_holdoff > 0) {
-               VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
-               schedule_timeout_interruptible(onoff_holdoff * HZ);
-               VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
-       }
-       while (!kthread_should_stop()) {
-               cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
-               if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
-                       if (verbose)
-                               pr_alert("%s" TORTURE_FLAG
-                                        "rcu_torture_onoff task: offlining %d\n",
-                                        torture_type, cpu);
-                       starttime = jiffies;
-                       n_offline_attempts++;
-                       ret = cpu_down(cpu);
-                       if (ret) {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "rcu_torture_onoff task: offline %d failed: errno %d\n",
-                                                torture_type, cpu, ret);
-                       } else {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "rcu_torture_onoff task: offlined %d\n",
-                                                torture_type, cpu);
-                               n_offline_successes++;
-                               delta = jiffies - starttime;
-                               sum_offline += delta;
-                               if (min_offline < 0) {
-                                       min_offline = delta;
-                                       max_offline = delta;
-                               }
-                               if (min_offline > delta)
-                                       min_offline = delta;
-                               if (max_offline < delta)
-                                       max_offline = delta;
-                       }
-               } else if (cpu_is_hotpluggable(cpu)) {
-                       if (verbose)
-                               pr_alert("%s" TORTURE_FLAG
-                                        "rcu_torture_onoff task: onlining %d\n",
-                                        torture_type, cpu);
-                       starttime = jiffies;
-                       n_online_attempts++;
-                       ret = cpu_up(cpu);
-                       if (ret) {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "rcu_torture_onoff task: online %d failed: errno %d\n",
-                                                torture_type, cpu, ret);
-                       } else {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "rcu_torture_onoff task: onlined %d\n",
-                                                torture_type, cpu);
-                               n_online_successes++;
-                               delta = jiffies - starttime;
-                               sum_online += delta;
-                               if (min_online < 0) {
-                                       min_online = delta;
-                                       max_online = delta;
-                               }
-                               if (min_online > delta)
-                                       min_online = delta;
-                               if (max_online < delta)
-                                       max_online = delta;
-                       }
-               }
-               schedule_timeout_interruptible(onoff_interval * HZ);
-       }
-       VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
-       return 0;
-}
-
-static int
-rcu_torture_onoff_init(void)
-{
-       int ret;
-
-       if (onoff_interval <= 0)
-               return 0;
-       onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
-       if (IS_ERR(onoff_task)) {
-               ret = PTR_ERR(onoff_task);
-               onoff_task = NULL;
-               return ret;
-       }
-       return 0;
-}
-
-static void rcu_torture_onoff_cleanup(void)
-{
-       if (onoff_task == NULL)
-               return;
-       VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
-       kthread_stop(onoff_task);
-       onoff_task = NULL;
-}
-
-#else /* #ifdef CONFIG_HOTPLUG_CPU */
-
-static int
-rcu_torture_onoff_init(void)
-{
-       return 0;
-}
-
-static void rcu_torture_onoff_cleanup(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
  * induces a CPU stall for the time specified by stall_cpu.
@@ -1518,11 +1113,11 @@ static int rcu_torture_stall(void *args)
 {
        unsigned long stop_at;
 
-       VERBOSE_PRINTK_STRING("rcu_torture_stall task started");
+       VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
        if (stall_cpu_holdoff > 0) {
-               VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff");
+               VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
                schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
-               VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff");
+               VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
        }
        if (!kthread_should_stop()) {
                stop_at = get_seconds() + stall_cpu;
@@ -1536,7 +1131,7 @@ static int rcu_torture_stall(void *args)
                rcu_read_unlock();
                pr_alert("rcu_torture_stall end.\n");
        }
-       rcutorture_shutdown_absorb("rcu_torture_stall");
+       torture_shutdown_absorb("rcu_torture_stall");
        while (!kthread_should_stop())
                schedule_timeout_interruptible(10 * HZ);
        return 0;
@@ -1545,27 +1140,9 @@ static int rcu_torture_stall(void *args)
 /* Spawn CPU-stall kthread, if stall_cpu specified. */
 static int __init rcu_torture_stall_init(void)
 {
-       int ret;
-
        if (stall_cpu <= 0)
                return 0;
-       stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
-       if (IS_ERR(stall_task)) {
-               ret = PTR_ERR(stall_task);
-               stall_task = NULL;
-               return ret;
-       }
-       return 0;
-}
-
-/* Clean up after the CPU-stall kthread, if one was spawned. */
-static void rcu_torture_stall_cleanup(void)
-{
-       if (stall_task == NULL)
-               return;
-       VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task.");
-       kthread_stop(stall_task);
-       stall_task = NULL;
+       return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
 }
 
 /* Callback function for RCU barrier testing. */
@@ -1583,28 +1160,24 @@ static int rcu_torture_barrier_cbs(void *arg)
        struct rcu_head rcu;
 
        init_rcu_head_on_stack(&rcu);
-       VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started");
-       set_user_nice(current, 19);
+       VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
+       set_user_nice(current, MAX_NICE);
        do {
                wait_event(barrier_cbs_wq[myid],
                           (newphase =
                            ACCESS_ONCE(barrier_phase)) != lastphase ||
-                          kthread_should_stop() ||
-                          fullstop != FULLSTOP_DONTSTOP);
+                          torture_must_stop());
                lastphase = newphase;
                smp_mb(); /* ensure barrier_phase load before ->call(). */
-               if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
+               if (torture_must_stop())
                        break;
                cur_ops->call(&rcu, rcu_torture_barrier_cbf);
                if (atomic_dec_and_test(&barrier_cbs_count))
                        wake_up(&barrier_wq);
-       } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
-       VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
-       while (!kthread_should_stop())
-               schedule_timeout_interruptible(1);
+       } while (!torture_must_stop());
        cur_ops->cb_barrier();
        destroy_rcu_head_on_stack(&rcu);
+       torture_kthread_stopping("rcu_torture_barrier_cbs");
        return 0;
 }
 
@@ -1613,7 +1186,7 @@ static int rcu_torture_barrier(void *arg)
 {
        int i;
 
-       VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting");
+       VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
        do {
                atomic_set(&barrier_cbs_invoked, 0);
                atomic_set(&barrier_cbs_count, n_barrier_cbs);
@@ -1623,9 +1196,8 @@ static int rcu_torture_barrier(void *arg)
                        wake_up(&barrier_cbs_wq[i]);
                wait_event(barrier_wq,
                           atomic_read(&barrier_cbs_count) == 0 ||
-                          kthread_should_stop() ||
-                          fullstop != FULLSTOP_DONTSTOP);
-               if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
+                          torture_must_stop());
+               if (torture_must_stop())
                        break;
                n_barrier_attempts++;
                cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
@@ -1635,11 +1207,8 @@ static int rcu_torture_barrier(void *arg)
                }
                n_barrier_successes++;
                schedule_timeout_interruptible(HZ / 10);
-       } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
-       VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
-       rcutorture_shutdown_absorb("rcu_torture_barrier");
-       while (!kthread_should_stop())
-               schedule_timeout_interruptible(1);
+       } while (!torture_must_stop());
+       torture_kthread_stopping("rcu_torture_barrier");
        return 0;
 }
 
@@ -1672,24 +1241,13 @@ static int rcu_torture_barrier_init(void)
                return -ENOMEM;
        for (i = 0; i < n_barrier_cbs; i++) {
                init_waitqueue_head(&barrier_cbs_wq[i]);
-               barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs,
-                                                  (void *)(long)i,
-                                                  "rcu_torture_barrier_cbs");
-               if (IS_ERR(barrier_cbs_tasks[i])) {
-                       ret = PTR_ERR(barrier_cbs_tasks[i]);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
-                       barrier_cbs_tasks[i] = NULL;
+               ret = torture_create_kthread(rcu_torture_barrier_cbs,
+                                            (void *)(long)i,
+                                            barrier_cbs_tasks[i]);
+               if (ret)
                        return ret;
-               }
        }
-       barrier_task = kthread_run(rcu_torture_barrier, NULL,
-                                  "rcu_torture_barrier");
-       if (IS_ERR(barrier_task)) {
-               ret = PTR_ERR(barrier_task);
-               VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier");
-               barrier_task = NULL;
-       }
-       return 0;
+       return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
 }
 
 /* Clean up after RCU barrier testing. */
@@ -1697,19 +1255,11 @@ static void rcu_torture_barrier_cleanup(void)
 {
        int i;
 
-       if (barrier_task != NULL) {
-               VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task");
-               kthread_stop(barrier_task);
-               barrier_task = NULL;
-       }
+       torture_stop_kthread(rcu_torture_barrier, barrier_task);
        if (barrier_cbs_tasks != NULL) {
-               for (i = 0; i < n_barrier_cbs; i++) {
-                       if (barrier_cbs_tasks[i] != NULL) {
-                               VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task");
-                               kthread_stop(barrier_cbs_tasks[i]);
-                               barrier_cbs_tasks[i] = NULL;
-                       }
-               }
+               for (i = 0; i < n_barrier_cbs; i++)
+                       torture_stop_kthread(rcu_torture_barrier_cbs,
+                                            barrier_cbs_tasks[i]);
                kfree(barrier_cbs_tasks);
                barrier_cbs_tasks = NULL;
        }
@@ -1747,90 +1297,42 @@ rcu_torture_cleanup(void)
 {
        int i;
 
-       mutex_lock(&fullstop_mutex);
        rcutorture_record_test_transition();
-       if (fullstop == FULLSTOP_SHUTDOWN) {
-               pr_warn(/* but going down anyway, so... */
-                      "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
-               mutex_unlock(&fullstop_mutex);
-               schedule_timeout_uninterruptible(10);
+       if (torture_cleanup()) {
                if (cur_ops->cb_barrier != NULL)
                        cur_ops->cb_barrier();
                return;
        }
-       fullstop = FULLSTOP_RMMOD;
-       mutex_unlock(&fullstop_mutex);
-       unregister_reboot_notifier(&rcutorture_shutdown_nb);
-       rcu_torture_barrier_cleanup();
-       rcu_torture_stall_cleanup();
-       if (stutter_task) {
-               VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
-               kthread_stop(stutter_task);
-       }
-       stutter_task = NULL;
-       if (shuffler_task) {
-               VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
-               kthread_stop(shuffler_task);
-               free_cpumask_var(shuffle_tmp_mask);
-       }
-       shuffler_task = NULL;
 
-       if (writer_task) {
-               VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
-               kthread_stop(writer_task);
-       }
-       writer_task = NULL;
+       rcu_torture_barrier_cleanup();
+       torture_stop_kthread(rcu_torture_stall, stall_task);
+       torture_stop_kthread(rcu_torture_writer, writer_task);
 
        if (reader_tasks) {
-               for (i = 0; i < nrealreaders; i++) {
-                       if (reader_tasks[i]) {
-                               VERBOSE_PRINTK_STRING(
-                                       "Stopping rcu_torture_reader task");
-                               kthread_stop(reader_tasks[i]);
-                       }
-                       reader_tasks[i] = NULL;
-               }
+               for (i = 0; i < nrealreaders; i++)
+                       torture_stop_kthread(rcu_torture_reader,
+                                            reader_tasks[i]);
                kfree(reader_tasks);
-               reader_tasks = NULL;
        }
        rcu_torture_current = NULL;
 
        if (fakewriter_tasks) {
                for (i = 0; i < nfakewriters; i++) {
-                       if (fakewriter_tasks[i]) {
-                               VERBOSE_PRINTK_STRING(
-                                       "Stopping rcu_torture_fakewriter task");
-                               kthread_stop(fakewriter_tasks[i]);
-                       }
-                       fakewriter_tasks[i] = NULL;
+                       torture_stop_kthread(rcu_torture_fakewriter,
+                                            fakewriter_tasks[i]);
                }
                kfree(fakewriter_tasks);
                fakewriter_tasks = NULL;
        }
 
-       if (stats_task) {
-               VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
-               kthread_stop(stats_task);
-       }
-       stats_task = NULL;
-
-       if (fqs_task) {
-               VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
-               kthread_stop(fqs_task);
-       }
-       fqs_task = NULL;
+       torture_stop_kthread(rcu_torture_stats, stats_task);
+       torture_stop_kthread(rcu_torture_fqs, fqs_task);
        if ((test_boost == 1 && cur_ops->can_boost) ||
            test_boost == 2) {
                unregister_cpu_notifier(&rcutorture_cpu_nb);
                for_each_possible_cpu(i)
                        rcutorture_booster_cleanup(i);
        }
-       if (shutdown_task != NULL) {
-               VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
-               kthread_stop(shutdown_task);
-       }
-       shutdown_task = NULL;
-       rcu_torture_onoff_cleanup();
 
        /* Wait for all RCU callbacks to fire.  */
 
@@ -1841,8 +1343,7 @@ rcu_torture_cleanup(void)
 
        if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
                rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
-       else if (n_online_successes != n_online_attempts ||
-                n_offline_successes != n_offline_attempts)
+       else if (torture_onoff_failures())
                rcu_torture_print_module_parms(cur_ops,
                                               "End of test: RCU_HOTPLUG");
        else
@@ -1911,12 +1412,11 @@ rcu_torture_init(void)
        int i;
        int cpu;
        int firsterr = 0;
-       int retval;
        static struct rcu_torture_ops *torture_ops[] = {
-               &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
+               &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops,
        };
 
-       mutex_lock(&fullstop_mutex);
+       torture_init_begin(torture_type, verbose, &rcutorture_runnable);
 
        /* Process args and tell the world that the torturer is on the job. */
        for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
@@ -1931,7 +1431,7 @@ rcu_torture_init(void)
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
                        pr_alert(" %s", torture_ops[i]->name);
                pr_alert("\n");
-               mutex_unlock(&fullstop_mutex);
+               torture_init_end();
                return -EINVAL;
        }
        if (cur_ops->fqs == NULL && fqs_duration != 0) {
@@ -1946,7 +1446,6 @@ rcu_torture_init(void)
        else
                nrealreaders = 2 * num_online_cpus();
        rcu_torture_print_module_parms(cur_ops, "Start of test");
-       fullstop = FULLSTOP_DONTSTOP;
 
        /* Set up the freelist. */
 
@@ -1982,108 +1481,61 @@ rcu_torture_init(void)
 
        /* Start up the kthreads. */
 
-       VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
-       writer_task = kthread_create(rcu_torture_writer, NULL,
-                                    "rcu_torture_writer");
-       if (IS_ERR(writer_task)) {
-               firsterr = PTR_ERR(writer_task);
-               VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
-               writer_task = NULL;
+       firsterr = torture_create_kthread(rcu_torture_writer, NULL,
+                                         writer_task);
+       if (firsterr)
                goto unwind;
-       }
-       wake_up_process(writer_task);
        fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
                                   GFP_KERNEL);
        if (fakewriter_tasks == NULL) {
-               VERBOSE_PRINTK_ERRSTRING("out of memory");
+               VERBOSE_TOROUT_ERRSTRING("out of memory");
                firsterr = -ENOMEM;
                goto unwind;
        }
        for (i = 0; i < nfakewriters; i++) {
-               VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
-               fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
-                                                 "rcu_torture_fakewriter");
-               if (IS_ERR(fakewriter_tasks[i])) {
-                       firsterr = PTR_ERR(fakewriter_tasks[i]);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
-                       fakewriter_tasks[i] = NULL;
+               firsterr = torture_create_kthread(rcu_torture_fakewriter,
+                                                 NULL, fakewriter_tasks[i]);
+               if (firsterr)
                        goto unwind;
-               }
        }
        reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
                               GFP_KERNEL);
        if (reader_tasks == NULL) {
-               VERBOSE_PRINTK_ERRSTRING("out of memory");
+               VERBOSE_TOROUT_ERRSTRING("out of memory");
                firsterr = -ENOMEM;
                goto unwind;
        }
        for (i = 0; i < nrealreaders; i++) {
-               VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
-               reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
-                                             "rcu_torture_reader");
-               if (IS_ERR(reader_tasks[i])) {
-                       firsterr = PTR_ERR(reader_tasks[i]);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
-                       reader_tasks[i] = NULL;
+               firsterr = torture_create_kthread(rcu_torture_reader, NULL,
+                                                 reader_tasks[i]);
+               if (firsterr)
                        goto unwind;
-               }
        }
        if (stat_interval > 0) {
-               VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
-               stats_task = kthread_run(rcu_torture_stats, NULL,
-                                       "rcu_torture_stats");
-               if (IS_ERR(stats_task)) {
-                       firsterr = PTR_ERR(stats_task);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
-                       stats_task = NULL;
+               firsterr = torture_create_kthread(rcu_torture_stats, NULL,
+                                                 stats_task);
+               if (firsterr)
                        goto unwind;
-               }
        }
        if (test_no_idle_hz) {
-               rcu_idle_cpu = num_online_cpus() - 1;
-
-               if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
-                       firsterr = -ENOMEM;
-                       VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
-                       goto unwind;
-               }
-
-               /* Create the shuffler thread */
-               shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
-                                         "rcu_torture_shuffle");
-               if (IS_ERR(shuffler_task)) {
-                       free_cpumask_var(shuffle_tmp_mask);
-                       firsterr = PTR_ERR(shuffler_task);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
-                       shuffler_task = NULL;
+               firsterr = torture_shuffle_init(shuffle_interval * HZ);
+               if (firsterr)
                        goto unwind;
-               }
        }
        if (stutter < 0)
                stutter = 0;
        if (stutter) {
-               /* Create the stutter thread */
-               stutter_task = kthread_run(rcu_torture_stutter, NULL,
-                                         "rcu_torture_stutter");
-               if (IS_ERR(stutter_task)) {
-                       firsterr = PTR_ERR(stutter_task);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
-                       stutter_task = NULL;
+               firsterr = torture_stutter_init(stutter * HZ);
+               if (firsterr)
                        goto unwind;
-               }
        }
        if (fqs_duration < 0)
                fqs_duration = 0;
        if (fqs_duration) {
-               /* Create the stutter thread */
-               fqs_task = kthread_run(rcu_torture_fqs, NULL,
-                                      "rcu_torture_fqs");
-               if (IS_ERR(fqs_task)) {
-                       firsterr = PTR_ERR(fqs_task);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
-                       fqs_task = NULL;
+               /* Create the fqs thread */
+               torture_create_kthread(rcu_torture_fqs, NULL, fqs_task);
+               if (firsterr)
                        goto unwind;
-               }
        }
        if (test_boost_interval < 1)
                test_boost_interval = 1;
@@ -2097,49 +1549,31 @@ rcu_torture_init(void)
                for_each_possible_cpu(i) {
                        if (cpu_is_offline(i))
                                continue;  /* Heuristic: CPU can go offline. */
-                       retval = rcutorture_booster_init(i);
-                       if (retval < 0) {
-                               firsterr = retval;
+                       firsterr = rcutorture_booster_init(i);
+                       if (firsterr)
                                goto unwind;
-                       }
                }
        }
-       if (shutdown_secs > 0) {
-               shutdown_time = jiffies + shutdown_secs * HZ;
-               shutdown_task = kthread_create(rcu_torture_shutdown, NULL,
-                                              "rcu_torture_shutdown");
-               if (IS_ERR(shutdown_task)) {
-                       firsterr = PTR_ERR(shutdown_task);
-                       VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
-                       shutdown_task = NULL;
-                       goto unwind;
-               }
-               wake_up_process(shutdown_task);
-       }
-       i = rcu_torture_onoff_init();
-       if (i != 0) {
-               firsterr = i;
+       firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
+       if (firsterr)
                goto unwind;
-       }
-       register_reboot_notifier(&rcutorture_shutdown_nb);
-       i = rcu_torture_stall_init();
-       if (i != 0) {
-               firsterr = i;
+       firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
+       if (firsterr)
                goto unwind;
-       }
-       retval = rcu_torture_barrier_init();
-       if (retval != 0) {
-               firsterr = retval;
+       firsterr = rcu_torture_stall_init();
+       if (firsterr)
+               goto unwind;
+       firsterr = rcu_torture_barrier_init();
+       if (firsterr)
                goto unwind;
-       }
        if (object_debug)
                rcu_test_debug_objects();
        rcutorture_record_test_transition();
-       mutex_unlock(&fullstop_mutex);
+       torture_init_end();
        return 0;
 
 unwind:
-       mutex_unlock(&fullstop_mutex);
+       torture_init_end();
        rcu_torture_cleanup();
        return firsterr;
 }
index 3318d82843841971f4926d8a8f9eb9a375e2eab3..c639556f3fa06234dcc34517826fbf0420641e56 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright (C) IBM Corporation, 2006
  * Copyright (C) Fujitsu, 2012
@@ -36,8 +36,6 @@
 #include <linux/delay.h>
 #include <linux/srcu.h>
 
-#include <trace/events/rcu.h>
-
 #include "rcu.h"
 
 /*
@@ -398,7 +396,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
        rcu_batch_queue(&sp->batch_queue, head);
        if (!sp->running) {
                sp->running = true;
-               schedule_delayed_work(&sp->work, 0);
+               queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
        }
        spin_unlock_irqrestore(&sp->queue_lock, flags);
 }
@@ -674,7 +672,8 @@ static void srcu_reschedule(struct srcu_struct *sp)
        }
 
        if (pending)
-               schedule_delayed_work(&sp->work, SRCU_INTERVAL);
+               queue_delayed_work(system_power_efficient_wq,
+                                  &sp->work, SRCU_INTERVAL);
 }
 
 /*
index 1254f312d02483f524319ee5ab9d2e1a13f48678..d9efcc13008c00201c130f87135348c7238118ff 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2008
  *
 #include <linux/prefetch.h>
 #include <linux/ftrace_event.h>
 
-#ifdef CONFIG_RCU_TRACE
-#include <trace/events/rcu.h>
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
-
 #include "rcu.h"
 
 /* Forward declarations for tiny_plugin.h. */
index 280d06cae3524160833d3441b5de5af3462798c7..4315285205626f7c27c38c774a4522a12b04adcf 100644 (file)
@@ -14,8 +14,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright (c) 2010 Linaro
  *
index b3d116cd072d7bd24803a52c8d6b478930bd6b8b..0c47e300210ad61c3d79854426150fd5246f5750 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2008
  *
@@ -58,8 +58,6 @@
 #include <linux/suspend.h>
 
 #include "tree.h"
-#include <trace/events/rcu.h>
-
 #include "rcu.h"
 
 MODULE_ALIAS("rcutree");
@@ -837,7 +835,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
         * to the next.  Only do this for the primary flavor of RCU.
         */
        if (rdp->rsp == rcu_state &&
-           ULONG_CMP_GE(ACCESS_ONCE(jiffies), rdp->rsp->jiffies_resched)) {
+           ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
                rdp->rsp->jiffies_resched += 5;
                resched_cpu(rdp->cpu);
        }
@@ -847,7 +845,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
 
 static void record_gp_stall_check_time(struct rcu_state *rsp)
 {
-       unsigned long j = ACCESS_ONCE(jiffies);
+       unsigned long j = jiffies;
        unsigned long j1;
 
        rsp->gp_start = j;
@@ -1005,7 +1003,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 
        if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
                return;
-       j = ACCESS_ONCE(jiffies);
+       j = jiffies;
 
        /*
         * Lots of memory barriers to reject false positives.
@@ -1423,13 +1421,14 @@ static int rcu_gp_init(struct rcu_state *rsp)
 
        /* Advance to a new grace period and initialize state. */
        record_gp_stall_check_time(rsp);
-       smp_wmb(); /* Record GP times before starting GP. */
-       rsp->gpnum++;
+       /* Record GP times before starting GP, hence smp_store_release(). */
+       smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
        trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
        raw_spin_unlock_irq(&rnp->lock);
 
        /* Exclude any concurrent CPU-hotplug operations. */
        mutex_lock(&rsp->onoff_mutex);
+       smp_mb__after_unlock_lock(); /* ->gpnum increment before GP! */
 
        /*
         * Set the quiescent-state-needed bits in all the rcu_node
@@ -1557,10 +1556,11 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        }
        rnp = rcu_get_root(rsp);
        raw_spin_lock_irq(&rnp->lock);
-       smp_mb__after_unlock_lock();
+       smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
        rcu_nocb_gp_set(rnp, nocb);
 
-       rsp->completed = rsp->gpnum; /* Declare grace period done. */
+       /* Declare grace period done. */
+       ACCESS_ONCE(rsp->completed) = rsp->gpnum;
        trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
        rsp->fqs_state = RCU_GP_IDLE;
        rdp = this_cpu_ptr(rsp->rda);
@@ -2304,7 +2304,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
                if (rnp_old != NULL)
                        raw_spin_unlock(&rnp_old->fqslock);
                if (ret) {
-                       rsp->n_force_qs_lh++;
+                       ACCESS_ONCE(rsp->n_force_qs_lh)++;
                        return;
                }
                rnp_old = rnp;
@@ -2316,7 +2316,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
        smp_mb__after_unlock_lock();
        raw_spin_unlock(&rnp_old->fqslock);
        if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
-               rsp->n_force_qs_lh++;
+               ACCESS_ONCE(rsp->n_force_qs_lh)++;
                raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
                return;  /* Someone beat us to it. */
        }
@@ -2639,6 +2639,58 @@ void synchronize_rcu_bh(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
 
+/**
+ * get_state_synchronize_rcu - Snapshot current RCU state
+ *
+ * Returns a cookie that is used by a later call to cond_synchronize_rcu()
+ * to determine whether or not a full grace period has elapsed in the
+ * meantime.
+ */
+unsigned long get_state_synchronize_rcu(void)
+{
+       /*
+        * Any prior manipulation of RCU-protected data must happen
+        * before the load from ->gpnum.
+        */
+       smp_mb();  /* ^^^ */
+
+       /*
+        * Make sure this load happens before the purportedly
+        * time-consuming work between get_state_synchronize_rcu()
+        * and cond_synchronize_rcu().
+        */
+       return smp_load_acquire(&rcu_state->gpnum);
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
+
+/**
+ * cond_synchronize_rcu - Conditionally wait for an RCU grace period
+ *
+ * @oldstate: return value from earlier call to get_state_synchronize_rcu()
+ *
+ * If a full RCU grace period has elapsed since the earlier call to
+ * get_state_synchronize_rcu(), just return.  Otherwise, invoke
+ * synchronize_rcu() to wait for a full grace period.
+ *
+ * Yes, this function does not take counter wrap into account.  But
+ * counter wrap is harmless.  If the counter wraps, we have waited for
+ * more than 2 billion grace periods (and way more on a 64-bit system!),
+ * so waiting for one additional grace period should be just fine.
+ */
+void cond_synchronize_rcu(unsigned long oldstate)
+{
+       unsigned long newstate;
+
+       /*
+        * Ensure that this load happens before any RCU-destructive
+        * actions the caller might carry out after we return.
+        */
+       newstate = smp_load_acquire(&rcu_state->completed);
+       if (ULONG_CMP_GE(oldstate, newstate))
+               synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
+
 static int synchronize_sched_expedited_cpu_stop(void *data)
 {
        /*
@@ -2880,7 +2932,7 @@ static int rcu_pending(int cpu)
  * non-NULL, store an indication of whether all callbacks are lazy.
  * (If there are no callbacks, all of them are deemed to be lazy.)
  */
-static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
+static int __maybe_unused rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
 {
        bool al = true;
        bool hc = false;
index 8c19873f1ac9b7eda78d55595dae0913b4ad45e3..75dc3c39a02a110b3d0f7b6b3b0557806f883094 100644 (file)
@@ -13,8 +13,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2008
  *
index 6e2ef4b2b920bc3db08ececbc37d74cf83b8dc14..962d1d589929e2b9c8350bd4432d96c4e31017f2 100644 (file)
@@ -14,8 +14,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright Red Hat, 2009
  * Copyright IBM Corporation, 2009
@@ -1586,11 +1586,13 @@ static void rcu_prepare_kthreads(int cpu)
  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
  * any flavor of RCU.
  */
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
 int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
 {
        *delta_jiffies = ULONG_MAX;
        return rcu_cpu_has_callbacks(cpu, NULL);
 }
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 /*
  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
@@ -1656,7 +1658,7 @@ extern int tick_nohz_active;
  * only if it has been awhile since the last time we did so.  Afterwards,
  * if there are any callbacks ready for immediate invocation, return true.
  */
-static bool rcu_try_advance_all_cbs(void)
+static bool __maybe_unused rcu_try_advance_all_cbs(void)
 {
        bool cbs_ready = false;
        struct rcu_data *rdp;
@@ -1696,6 +1698,7 @@ static bool rcu_try_advance_all_cbs(void)
  *
  * The caller must have disabled interrupts.
  */
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
 int rcu_needs_cpu(int cpu, unsigned long *dj)
 {
        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
@@ -1726,6 +1729,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
        }
        return 0;
 }
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 /*
  * Prepare a CPU for idle from an RCU perspective.  The first major task
@@ -1739,6 +1743,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
  */
 static void rcu_prepare_for_idle(int cpu)
 {
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
        struct rcu_data *rdp;
        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
        struct rcu_node *rnp;
@@ -1790,6 +1795,7 @@ static void rcu_prepare_for_idle(int cpu)
                rcu_accelerate_cbs(rsp, rnp, rdp);
                raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
        }
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 }
 
 /*
@@ -1799,11 +1805,12 @@ static void rcu_prepare_for_idle(int cpu)
  */
 static void rcu_cleanup_after_idle(int cpu)
 {
-
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
        if (rcu_is_nocb_cpu(cpu))
                return;
        if (rcu_try_advance_all_cbs())
                invoke_rcu_core();
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 }
 
 /*
@@ -2101,6 +2108,7 @@ static void rcu_init_one_nocb(struct rcu_node *rnp)
        init_waitqueue_head(&rnp->nocb_gp_wq[1]);
 }
 
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
 /* Is the specified CPU a no-CPUs CPU? */
 bool rcu_is_nocb_cpu(int cpu)
 {
@@ -2108,6 +2116,7 @@ bool rcu_is_nocb_cpu(int cpu)
                return cpumask_test_cpu(cpu, rcu_nocb_mask);
        return false;
 }
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 /*
  * Enqueue the specified string of rcu_head structures onto the specified
@@ -2893,7 +2902,7 @@ static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
  * CPU unless the grace period has extended for too long.
  *
  * This code relies on the fact that all NO_HZ_FULL CPUs are also
- * CONFIG_RCU_NOCB_CPUs.
+ * CONFIG_RCU_NOCB_CPU CPUs.
  */
 static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
 {
index 4def475336d412bcbfd8aa4e34a600e0f8b62d41..5cdc62e1beeb635a36ee87098a7f38110a651382 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2008
  *
@@ -273,7 +273,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
        seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
                   rsp->n_force_qs, rsp->n_force_qs_ngp,
                   rsp->n_force_qs - rsp->n_force_qs_ngp,
-                  rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
+                  ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
        for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
                if (rnp->level != level) {
                        seq_puts(m, "\n");
index c54609faf233ba21a49835d32a9132eb56853f14..4c0a9b0af469a1f5fa3242d594653fee0f004505 100644 (file)
@@ -12,8 +12,8 @@
  * GNU General Public License for more details.
  *
  * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
  *
  * Copyright IBM Corporation, 2001
  *
@@ -49,7 +49,6 @@
 #include <linux/module.h>
 
 #define CREATE_TRACE_POINTS
-#include <trace/events/rcu.h>
 
 #include "rcu.h"
 
index 9a95c8c2af2af0e9fb62c794926df3f1775a9dbe..ab32b7b0db5c6b30b4cc64e61e6a857fb51c87e6 100644 (file)
@@ -13,7 +13,7 @@ endif
 
 obj-y += core.o proc.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o completion.o
+obj-y += wait.o completion.o idle.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
index 4a073539c58e69992ed2133a73444ceffc9cd3fa..e73efba98301f77715c5e5e17b8e65a98bbb3769 100644 (file)
@@ -203,7 +203,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
        struct autogroup *ag;
        int err;
 
-       if (nice < -20 || nice > 19)
+       if (nice < MIN_NICE || nice > MAX_NICE)
                return -EINVAL;
 
        err = security_task_setnice(current, nice);
index f5c6635b806c56550bcee17aa0c2490c96b16ff7..3c4d096544ce4179c24403d0c41b3758c1842eab 100644 (file)
@@ -555,12 +555,15 @@ void resched_cpu(int cpu)
  * selecting an idle cpu will add more delays to the timers than intended
  * (as that cpu's timer base may not be uptodate wrt jiffies etc).
  */
-int get_nohz_timer_target(void)
+int get_nohz_timer_target(int pinned)
 {
        int cpu = smp_processor_id();
        int i;
        struct sched_domain *sd;
 
+       if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
+               return cpu;
+
        rcu_read_lock();
        for_each_domain(cpu, sd) {
                for_each_cpu(i, sched_domain_span(sd)) {
@@ -823,19 +826,13 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
 #endif
 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
        if (static_key_false((&paravirt_steal_rq_enabled))) {
-               u64 st;
-
                steal = paravirt_steal_clock(cpu_of(rq));
                steal -= rq->prev_steal_time_rq;
 
                if (unlikely(steal > delta))
                        steal = delta;
 
-               st = steal_ticks(steal);
-               steal = st * TICK_NSEC;
-
                rq->prev_steal_time_rq += steal;
-
                delta -= steal;
        }
 #endif
@@ -1745,8 +1742,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0;
        p->numa_scan_period = sysctl_numa_balancing_scan_delay;
        p->numa_work.next = &p->numa_work;
-       p->numa_faults = NULL;
-       p->numa_faults_buffer = NULL;
+       p->numa_faults_memory = NULL;
+       p->numa_faults_buffer_memory = NULL;
+       p->last_task_numa_placement = 0;
+       p->last_sum_exec_runtime = 0;
 
        INIT_LIST_HEAD(&p->numa_entry);
        p->numa_group = NULL;
@@ -2149,8 +2148,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
        if (mm)
                mmdrop(mm);
        if (unlikely(prev_state == TASK_DEAD)) {
-               task_numa_free(prev);
-
                if (prev->sched_class->task_dead)
                        prev->sched_class->task_dead(prev);
 
@@ -2167,13 +2164,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 
 #ifdef CONFIG_SMP
 
-/* assumes rq->lock is held */
-static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
-{
-       if (prev->sched_class->pre_schedule)
-               prev->sched_class->pre_schedule(rq, prev);
-}
-
 /* rq->lock is NOT held, but preemption is disabled */
 static inline void post_schedule(struct rq *rq)
 {
@@ -2191,10 +2181,6 @@ static inline void post_schedule(struct rq *rq)
 
 #else
 
-static inline void pre_schedule(struct rq *rq, struct task_struct *p)
-{
-}
-
 static inline void post_schedule(struct rq *rq)
 {
 }
@@ -2510,8 +2496,13 @@ void __kprobes preempt_count_add(int val)
        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
                                PREEMPT_MASK - 10);
 #endif
-       if (preempt_count() == val)
-               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+       if (preempt_count() == val) {
+               unsigned long ip = get_parent_ip(CALLER_ADDR1);
+#ifdef CONFIG_DEBUG_PREEMPT
+               current->preempt_disable_ip = ip;
+#endif
+               trace_preempt_off(CALLER_ADDR0, ip);
+       }
 }
 EXPORT_SYMBOL(preempt_count_add);
 
@@ -2554,6 +2545,13 @@ static noinline void __schedule_bug(struct task_struct *prev)
        print_modules();
        if (irqs_disabled())
                print_irqtrace_events(prev);
+#ifdef CONFIG_DEBUG_PREEMPT
+       if (in_atomic_preempt_off()) {
+               pr_err("Preemption disabled at:");
+               print_ip_sym(current->preempt_disable_ip);
+               pr_cont("\n");
+       }
+#endif
        dump_stack();
        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
@@ -2577,36 +2575,34 @@ static inline void schedule_debug(struct task_struct *prev)
        schedstat_inc(this_rq(), sched_count);
 }
 
-static void put_prev_task(struct rq *rq, struct task_struct *prev)
-{
-       if (prev->on_rq || rq->skip_clock_update < 0)
-               update_rq_clock(rq);
-       prev->sched_class->put_prev_task(rq, prev);
-}
-
 /*
  * Pick up the highest-prio task:
  */
 static inline struct task_struct *
-pick_next_task(struct rq *rq)
+pick_next_task(struct rq *rq, struct task_struct *prev)
 {
-       const struct sched_class *class;
+       const struct sched_class *class = &fair_sched_class;
        struct task_struct *p;
 
        /*
         * Optimization: we know that if all tasks are in
         * the fair class we can call that function directly:
         */
-       if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
-               p = fair_sched_class.pick_next_task(rq);
-               if (likely(p))
+       if (likely(prev->sched_class == class &&
+                  rq->nr_running == rq->cfs.h_nr_running)) {
+               p = fair_sched_class.pick_next_task(rq, prev);
+               if (likely(p && p != RETRY_TASK))
                        return p;
        }
 
+again:
        for_each_class(class) {
-               p = class->pick_next_task(rq);
-               if (p)
+               p = class->pick_next_task(rq, prev);
+               if (p) {
+                       if (unlikely(p == RETRY_TASK))
+                               goto again;
                        return p;
+               }
        }
 
        BUG(); /* the idle class will always have a runnable task */
@@ -2700,13 +2696,10 @@ need_resched:
                switch_count = &prev->nvcsw;
        }
 
-       pre_schedule(rq, prev);
-
-       if (unlikely(!rq->nr_running))
-               idle_balance(cpu, rq);
+       if (prev->on_rq || rq->skip_clock_update < 0)
+               update_rq_clock(rq);
 
-       put_prev_task(rq, prev);
-       next = pick_next_task(rq);
+       next = pick_next_task(rq, prev);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
        rq->skip_clock_update = 0;
@@ -2908,7 +2901,8 @@ EXPORT_SYMBOL(sleep_on_timeout);
  * This function changes the 'effective' priority of a task. It does
  * not touch ->normal_prio like __setscheduler().
  *
- * Used by the rt_mutex code to implement priority inheritance logic.
+ * Used by the rt_mutex code to implement priority inheritance
+ * logic. Call site only calls if the priority of the task changed.
  */
 void rt_mutex_setprio(struct task_struct *p, int prio)
 {
@@ -2998,7 +2992,7 @@ void set_user_nice(struct task_struct *p, long nice)
        unsigned long flags;
        struct rq *rq;
 
-       if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
+       if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
                return;
        /*
         * We have to be careful, if called from sys_setpriority(),
@@ -3076,11 +3070,11 @@ SYSCALL_DEFINE1(nice, int, increment)
        if (increment > 40)
                increment = 40;
 
-       nice = TASK_NICE(current) + increment;
-       if (nice < -20)
-               nice = -20;
-       if (nice > 19)
-               nice = 19;
+       nice = task_nice(current) + increment;
+       if (nice < MIN_NICE)
+               nice = MIN_NICE;
+       if (nice > MAX_NICE)
+               nice = MAX_NICE;
 
        if (increment < 0 && !can_nice(current, nice))
                return -EPERM;
@@ -3108,18 +3102,6 @@ int task_prio(const struct task_struct *p)
        return p->prio - MAX_RT_PRIO;
 }
 
-/**
- * task_nice - return the nice value of a given task.
- * @p: the task in question.
- *
- * Return: The nice value [ -20 ... 0 ... 19 ].
- */
-int task_nice(const struct task_struct *p)
-{
-       return TASK_NICE(p);
-}
-EXPORT_SYMBOL(task_nice);
-
 /**
  * idle_cpu - is a given cpu idle currently?
  * @cpu: the processor in question.
@@ -3189,9 +3171,8 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_new = 1;
 }
 
-/* Actually do priority change: must hold pi & rq lock. */
-static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr)
+static void __setscheduler_params(struct task_struct *p,
+               const struct sched_attr *attr)
 {
        int policy = attr->sched_policy;
 
@@ -3211,9 +3192,21 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
         * getparam()/getattr() don't report silly values for !rt tasks.
         */
        p->rt_priority = attr->sched_priority;
-
        p->normal_prio = normal_prio(p);
-       p->prio = rt_mutex_getprio(p);
+       set_load_weight(p);
+}
+
+/* Actually do priority change: must hold pi & rq lock. */
+static void __setscheduler(struct rq *rq, struct task_struct *p,
+                          const struct sched_attr *attr)
+{
+       __setscheduler_params(p, attr);
+
+       /*
+        * If we get here, there was no pi waiters boosting the
+        * task. It is safe to use the normal prio.
+        */
+       p->prio = normal_prio(p);
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
@@ -3221,8 +3214,6 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
                p->sched_class = &rt_sched_class;
        else
                p->sched_class = &fair_sched_class;
-
-       set_load_weight(p);
 }
 
 static void
@@ -3275,6 +3266,8 @@ static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
                                bool user)
 {
+       int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
+                     MAX_RT_PRIO - 1 - attr->sched_priority;
        int retval, oldprio, oldpolicy = -1, on_rq, running;
        int policy = attr->sched_policy;
        unsigned long flags;
@@ -3319,7 +3312,7 @@ recheck:
         */
        if (user && !capable(CAP_SYS_NICE)) {
                if (fair_policy(policy)) {
-                       if (attr->sched_nice < TASK_NICE(p) &&
+                       if (attr->sched_nice < task_nice(p) &&
                            !can_nice(p, attr->sched_nice))
                                return -EPERM;
                }
@@ -3352,7 +3345,7 @@ recheck:
                 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
                 */
                if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
-                       if (!can_nice(p, TASK_NICE(p)))
+                       if (!can_nice(p, task_nice(p)))
                                return -EPERM;
                }
 
@@ -3389,16 +3382,18 @@ recheck:
        }
 
        /*
-        * If not changing anything there's no need to proceed further:
+        * If not changing anything there's no need to proceed further,
+        * but store a possible modification of reset_on_fork.
         */
        if (unlikely(policy == p->policy)) {
-               if (fair_policy(policy) && attr->sched_nice != TASK_NICE(p))
+               if (fair_policy(policy) && attr->sched_nice != task_nice(p))
                        goto change;
                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
                        goto change;
                if (dl_policy(policy))
                        goto change;
 
+               p->sched_reset_on_fork = reset_on_fork;
                task_rq_unlock(rq, p, &flags);
                return 0;
        }
@@ -3452,6 +3447,24 @@ change:
                return -EBUSY;
        }
 
+       p->sched_reset_on_fork = reset_on_fork;
+       oldprio = p->prio;
+
+       /*
+        * Special case for priority boosted tasks.
+        *
+        * If the new priority is lower or equal (user space view)
+        * than the current (boosted) priority, we just store the new
+        * normal parameters and do not touch the scheduler class and
+        * the runqueue. This will be done when the task deboost
+        * itself.
+        */
+       if (rt_mutex_check_prio(p, newprio)) {
+               __setscheduler_params(p, attr);
+               task_rq_unlock(rq, p, &flags);
+               return 0;
+       }
+
        on_rq = p->on_rq;
        running = task_current(rq, p);
        if (on_rq)
@@ -3459,16 +3472,18 @@ change:
        if (running)
                p->sched_class->put_prev_task(rq, p);
 
-       p->sched_reset_on_fork = reset_on_fork;
-
-       oldprio = p->prio;
        prev_class = p->sched_class;
        __setscheduler(rq, p, attr);
 
        if (running)
                p->sched_class->set_curr_task(rq);
-       if (on_rq)
-               enqueue_task(rq, p, 0);
+       if (on_rq) {
+               /*
+                * We enqueue to tail when the priority of a task is
+                * increased (user space view).
+                */
+               enqueue_task(rq, p, oldprio <= p->prio ? ENQUEUE_HEAD : 0);
+       }
 
        check_class_changed(rq, p, prev_class, oldprio);
        task_rq_unlock(rq, p, &flags);
@@ -3624,7 +3639,7 @@ static int sched_copy_attr(struct sched_attr __user *uattr,
         * XXX: do we want to be lenient like existing syscalls; or do we want
         * to be strict and return an error on out-of-bounds values?
         */
-       attr->sched_nice = clamp(attr->sched_nice, -20, 19);
+       attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE);
 
 out:
        return ret;
@@ -3845,7 +3860,7 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
        else if (task_has_rt_policy(p))
                attr.sched_priority = p->rt_priority;
        else
-               attr.sched_nice = TASK_NICE(p);
+               attr.sched_nice = task_nice(p);
 
        rcu_read_unlock();
 
@@ -4483,6 +4498,7 @@ void init_idle(struct task_struct *idle, int cpu)
        rcu_read_unlock();
 
        rq->curr = rq->idle = idle;
+       idle->on_rq = 1;
 #if defined(CONFIG_SMP)
        idle->on_cpu = 1;
 #endif
@@ -4702,8 +4718,10 @@ void idle_task_exit(void)
 
        BUG_ON(cpu_online(smp_processor_id()));
 
-       if (mm != &init_mm)
+       if (mm != &init_mm) {
                switch_mm(mm, &init_mm, current);
+               finish_arch_post_lock_switch();
+       }
        mmdrop(mm);
 }
 
@@ -4721,6 +4739,22 @@ static void calc_load_migrate(struct rq *rq)
                atomic_long_add(delta, &calc_load_tasks);
 }
 
+static void put_prev_task_fake(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static const struct sched_class fake_sched_class = {
+       .put_prev_task = put_prev_task_fake,
+};
+
+static struct task_struct fake_task = {
+       /*
+        * Avoid pull_{rt,dl}_task()
+        */
+       .prio = MAX_PRIO + 1,
+       .sched_class = &fake_sched_class,
+};
+
 /*
  * Migrate all tasks from the rq, sleeping tasks will be migrated by
  * try_to_wake_up()->select_task_rq().
@@ -4761,7 +4795,7 @@ static void migrate_tasks(unsigned int dead_cpu)
                if (rq->nr_running == 1)
                        break;
 
-               next = pick_next_task(rq);
+               next = pick_next_task(rq, &fake_task);
                BUG_ON(!next);
                next->sched_class->put_prev_task(rq, next);
 
@@ -4851,7 +4885,7 @@ set_table_entry(struct ctl_table *entry,
 static struct ctl_table *
 sd_alloc_ctl_domain_table(struct sched_domain *sd)
 {
-       struct ctl_table *table = sd_alloc_ctl_entry(13);
+       struct ctl_table *table = sd_alloc_ctl_entry(14);
 
        if (table == NULL)
                return NULL;
@@ -4879,9 +4913,12 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
                sizeof(int), 0644, proc_dointvec_minmax, false);
        set_table_entry(&table[10], "flags", &sd->flags,
                sizeof(int), 0644, proc_dointvec_minmax, false);
-       set_table_entry(&table[11], "name", sd->name,
+       set_table_entry(&table[11], "max_newidle_lb_cost",
+               &sd->max_newidle_lb_cost,
+               sizeof(long), 0644, proc_doulongvec_minmax, false);
+       set_table_entry(&table[12], "name", sd->name,
                CORENAME_MAX_SIZE, 0444, proc_dostring, false);
-       /* &table[12] is terminator */
+       /* &table[13] is terminator */
 
        return table;
 }
@@ -6858,7 +6895,6 @@ void __init sched_init(void)
 
                rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
 #ifdef CONFIG_RT_GROUP_SCHED
-               INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
                init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
 #endif
 
@@ -6947,7 +6983,8 @@ void __might_sleep(const char *file, int line, int preempt_offset)
        static unsigned long prev_jiffy;        /* ratelimiting */
 
        rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
-       if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
+       if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
+            !is_idle_task(current)) ||
            system_state != SYSTEM_RUNNING || oops_in_progress)
                return;
        if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
@@ -6965,6 +7002,13 @@ void __might_sleep(const char *file, int line, int preempt_offset)
        debug_show_held_locks(current);
        if (irqs_disabled())
                print_irqtrace_events(current);
+#ifdef CONFIG_DEBUG_PREEMPT
+       if (!preempt_count_equals(preempt_offset)) {
+               pr_err("Preemption disabled at:");
+               print_ip_sym(current->preempt_disable_ip);
+               pr_cont("\n");
+       }
+#endif
        dump_stack();
 }
 EXPORT_SYMBOL(__might_sleep);
@@ -7018,7 +7062,7 @@ void normalize_rt_tasks(void)
                         * Renice negative nice level userspace
                         * tasks back to 0:
                         */
-                       if (TASK_NICE(p) < 0 && p->mm)
+                       if (task_nice(p) < 0 && p->mm)
                                set_user_nice(p, 0);
                        continue;
                }
index 99947919e30bc68963d3bfa937ebad8a2c90ba87..a95097cb4591b5bfa2466adb5600895e782fc661 100644 (file)
@@ -142,7 +142,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
        p->utimescaled += cputime_scaled;
        account_group_user_time(p, cputime);
 
-       index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
+       index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
 
        /* Add user time to cpustat. */
        task_group_account_field(p, index, (__force u64) cputime);
@@ -169,7 +169,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
        p->gtime += cputime;
 
        /* Add guest time to cpustat. */
-       if (TASK_NICE(p) > 0) {
+       if (task_nice(p) > 0) {
                cpustat[CPUTIME_NICE] += (__force u64) cputime;
                cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
        } else {
@@ -258,16 +258,22 @@ static __always_inline bool steal_account_process_tick(void)
 {
 #ifdef CONFIG_PARAVIRT
        if (static_key_false(&paravirt_steal_enabled)) {
-               u64 steal, st = 0;
+               u64 steal;
+               cputime_t steal_ct;
 
                steal = paravirt_steal_clock(smp_processor_id());
                steal -= this_rq()->prev_steal_time;
 
-               st = steal_ticks(steal);
-               this_rq()->prev_steal_time += st * TICK_NSEC;
+               /*
+                * cputime_t may be less precise than nsecs (eg: if it's
+                * based on jiffies). Lets cast the result to cputime
+                * granularity and account the rest on the next rounds.
+                */
+               steal_ct = nsecs_to_cputime(steal);
+               this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
 
-               account_steal_time(st);
-               return st;
+               account_steal_time(steal_ct);
+               return steal_ct;
        }
 #endif
        return false;
index 6e79b3faa4cd5384754232dc3b74300367afec8e..27ef409255253367d35fd598ddb01550612df040 100644 (file)
@@ -210,6 +210,16 @@ static inline int has_pushable_dl_tasks(struct rq *rq)
 
 static int push_dl_task(struct rq *rq);
 
+static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
+{
+       return dl_task(prev);
+}
+
+static inline void set_post_schedule(struct rq *rq)
+{
+       rq->post_schedule = has_pushable_dl_tasks(rq);
+}
+
 #else
 
 static inline
@@ -232,6 +242,19 @@ void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 {
 }
 
+static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
+{
+       return false;
+}
+
+static inline int pull_dl_task(struct rq *rq)
+{
+       return 0;
+}
+
+static inline void set_post_schedule(struct rq *rq)
+{
+}
 #endif /* CONFIG_SMP */
 
 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
@@ -586,8 +609,8 @@ static void update_curr_dl(struct rq *rq)
         * approach need further study.
         */
        delta_exec = rq_clock_task(rq) - curr->se.exec_start;
-       if (unlikely((s64)delta_exec < 0))
-               delta_exec = 0;
+       if (unlikely((s64)delta_exec <= 0))
+               return;
 
        schedstat_set(curr->se.statistics.exec_max,
                      max(curr->se.statistics.exec_max, delta_exec));
@@ -942,6 +965,8 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
        resched_task(rq->curr);
 }
 
+static int pull_dl_task(struct rq *this_rq);
+
 #endif /* CONFIG_SMP */
 
 /*
@@ -988,7 +1013,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
        return rb_entry(left, struct sched_dl_entity, rb_node);
 }
 
-struct task_struct *pick_next_task_dl(struct rq *rq)
+struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
 {
        struct sched_dl_entity *dl_se;
        struct task_struct *p;
@@ -996,9 +1021,20 @@ struct task_struct *pick_next_task_dl(struct rq *rq)
 
        dl_rq = &rq->dl;
 
+       if (need_pull_dl_task(rq, prev))
+               pull_dl_task(rq);
+       /*
+        * When prev is DL, we may throttle it in put_prev_task().
+        * So, we update time before we check for dl_nr_running.
+        */
+       if (prev->sched_class == &dl_sched_class)
+               update_curr_dl(rq);
+
        if (unlikely(!dl_rq->dl_nr_running))
                return NULL;
 
+       put_prev_task(rq, prev);
+
        dl_se = pick_next_dl_entity(rq, dl_rq);
        BUG_ON(!dl_se);
 
@@ -1013,9 +1049,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq)
                start_hrtick_dl(rq, p);
 #endif
 
-#ifdef CONFIG_SMP
-       rq->post_schedule = has_pushable_dl_tasks(rq);
-#endif /* CONFIG_SMP */
+       set_post_schedule(rq);
 
        return p;
 }
@@ -1424,13 +1458,6 @@ skip:
        return ret;
 }
 
-static void pre_schedule_dl(struct rq *rq, struct task_struct *prev)
-{
-       /* Try to pull other tasks here */
-       if (dl_task(prev))
-               pull_dl_task(rq);
-}
-
 static void post_schedule_dl(struct rq *rq)
 {
        push_dl_tasks(rq);
@@ -1558,7 +1585,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
        if (unlikely(p->dl.dl_throttled))
                return;
 
-       if (p->on_rq || rq->curr != p) {
+       if (p->on_rq && rq->curr != p) {
 #ifdef CONFIG_SMP
                if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
                        /* Only reschedule if pushing failed */
@@ -1623,7 +1650,6 @@ const struct sched_class dl_sched_class = {
        .set_cpus_allowed       = set_cpus_allowed_dl,
        .rq_online              = rq_online_dl,
        .rq_offline             = rq_offline_dl,
-       .pre_schedule           = pre_schedule_dl,
        .post_schedule          = post_schedule_dl,
        .task_woken             = task_woken_dl,
 #endif
index dd52e7ffb10ed3e16dd36ca01a08e9438fcd7872..f3344c31632aee78050316de3138fbe69280d027 100644 (file)
@@ -321,6 +321,7 @@ do {                                                                        \
        P(sched_goidle);
 #ifdef CONFIG_SMP
        P64(avg_idle);
+       P64(max_idle_balance_cost);
 #endif
 
        P(ttwu_count);
@@ -533,15 +534,15 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
                        unsigned long nr_faults = -1;
                        int cpu_current, home_node;
 
-                       if (p->numa_faults)
-                               nr_faults = p->numa_faults[2*node + i];
+                       if (p->numa_faults_memory)
+                               nr_faults = p->numa_faults_memory[2*node + i];
 
                        cpu_current = !i ? (task_node(p) == node) :
                                (pol && node_isset(node, pol->v.nodes));
 
                        home_node = (p->numa_preferred_nid == node);
 
-                       SEQ_printf(m, "numa_faults, %d, %d, %d, %d, %ld\n",
+                       SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
                                i, node, cpu_current, home_node, nr_faults);
                }
        }
index 9b4c4f3201301a269bd66718650899274f204bc6..7e9bd0b1fa9ef1aa16880a5a10601374c7bb618b 100644 (file)
@@ -322,13 +322,13 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
        list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
 
 /* Do the two (enqueued) entities belong to the same group ? */
-static inline int
+static inline struct cfs_rq *
 is_same_group(struct sched_entity *se, struct sched_entity *pse)
 {
        if (se->cfs_rq == pse->cfs_rq)
-               return 1;
+               return se->cfs_rq;
 
-       return 0;
+       return NULL;
 }
 
 static inline struct sched_entity *parent_entity(struct sched_entity *se)
@@ -336,17 +336,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
        return se->parent;
 }
 
-/* return depth at which a sched entity is present in the hierarchy */
-static inline int depth_se(struct sched_entity *se)
-{
-       int depth = 0;
-
-       for_each_sched_entity(se)
-               depth++;
-
-       return depth;
-}
-
 static void
 find_matching_se(struct sched_entity **se, struct sched_entity **pse)
 {
@@ -360,8 +349,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
         */
 
        /* First walk up until both entities are at same depth */
-       se_depth = depth_se(*se);
-       pse_depth = depth_se(*pse);
+       se_depth = (*se)->depth;
+       pse_depth = (*pse)->depth;
 
        while (se_depth > pse_depth) {
                se_depth--;
@@ -426,12 +415,6 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 #define for_each_leaf_cfs_rq(rq, cfs_rq) \
                for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
 
-static inline int
-is_same_group(struct sched_entity *se, struct sched_entity *pse)
-{
-       return 1;
-}
-
 static inline struct sched_entity *parent_entity(struct sched_entity *se)
 {
        return NULL;
@@ -819,14 +802,6 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
 /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
 unsigned int sysctl_numa_balancing_scan_delay = 1000;
 
-/*
- * After skipping a page migration on a shared page, skip N more numa page
- * migrations unconditionally. This reduces the number of NUMA migrations
- * in shared memory workloads, and has the effect of pulling tasks towards
- * where their memory lives, over pulling the memory towards the task.
- */
-unsigned int sysctl_numa_balancing_migrate_deferred = 16;
-
 static unsigned int task_nr_scan_windows(struct task_struct *p)
 {
        unsigned long rss = 0;
@@ -893,10 +868,26 @@ struct numa_group {
        struct list_head task_list;
 
        struct rcu_head rcu;
+       nodemask_t active_nodes;
        unsigned long total_faults;
+       /*
+        * Faults_cpu is used to decide whether memory should move
+        * towards the CPU. As a consequence, these stats are weighted
+        * more by CPU use than by memory faults.
+        */
+       unsigned long *faults_cpu;
        unsigned long faults[0];
 };
 
+/* Shared or private faults. */
+#define NR_NUMA_HINT_FAULT_TYPES 2
+
+/* Memory and CPU locality */
+#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
+
+/* Averaged statistics, and temporary buffers. */
+#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
+
 pid_t task_numa_group_id(struct task_struct *p)
 {
        return p->numa_group ? p->numa_group->gid : 0;
@@ -904,16 +895,16 @@ pid_t task_numa_group_id(struct task_struct *p)
 
 static inline int task_faults_idx(int nid, int priv)
 {
-       return 2 * nid + priv;
+       return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
 }
 
 static inline unsigned long task_faults(struct task_struct *p, int nid)
 {
-       if (!p->numa_faults)
+       if (!p->numa_faults_memory)
                return 0;
 
-       return p->numa_faults[task_faults_idx(nid, 0)] +
-               p->numa_faults[task_faults_idx(nid, 1)];
+       return p->numa_faults_memory[task_faults_idx(nid, 0)] +
+               p->numa_faults_memory[task_faults_idx(nid, 1)];
 }
 
 static inline unsigned long group_faults(struct task_struct *p, int nid)
@@ -925,6 +916,12 @@ static inline unsigned long group_faults(struct task_struct *p, int nid)
                p->numa_group->faults[task_faults_idx(nid, 1)];
 }
 
+static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
+{
+       return group->faults_cpu[task_faults_idx(nid, 0)] +
+               group->faults_cpu[task_faults_idx(nid, 1)];
+}
+
 /*
  * These return the fraction of accesses done by a particular task, or
  * task group, on a particular numa node.  The group weight is given a
@@ -935,7 +932,7 @@ static inline unsigned long task_weight(struct task_struct *p, int nid)
 {
        unsigned long total_faults;
 
-       if (!p->numa_faults)
+       if (!p->numa_faults_memory)
                return 0;
 
        total_faults = p->total_numa_faults;
@@ -954,6 +951,69 @@ static inline unsigned long group_weight(struct task_struct *p, int nid)
        return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
 }
 
+bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
+                               int src_nid, int dst_cpu)
+{
+       struct numa_group *ng = p->numa_group;
+       int dst_nid = cpu_to_node(dst_cpu);
+       int last_cpupid, this_cpupid;
+
+       this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+
+       /*
+        * Multi-stage node selection is used in conjunction with a periodic
+        * migration fault to build a temporal task<->page relation. By using
+        * a two-stage filter we remove short/unlikely relations.
+        *
+        * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
+        * a task's usage of a particular page (n_p) per total usage of this
+        * page (n_t) (in a given time-span) to a probability.
+        *
+        * Our periodic faults will sample this probability and getting the
+        * same result twice in a row, given these samples are fully
+        * independent, is then given by P(n)^2, provided our sample period
+        * is sufficiently short compared to the usage pattern.
+        *
+        * This quadric squishes small probabilities, making it less likely we
+        * act on an unlikely task<->page relation.
+        */
+       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+       if (!cpupid_pid_unset(last_cpupid) &&
+                               cpupid_to_nid(last_cpupid) != dst_nid)
+               return false;
+
+       /* Always allow migrate on private faults */
+       if (cpupid_match_pid(p, last_cpupid))
+               return true;
+
+       /* A shared fault, but p->numa_group has not been set up yet. */
+       if (!ng)
+               return true;
+
+       /*
+        * Do not migrate if the destination is not a node that
+        * is actively used by this numa group.
+        */
+       if (!node_isset(dst_nid, ng->active_nodes))
+               return false;
+
+       /*
+        * Source is a node that is not actively used by this
+        * numa group, while the destination is. Migrate.
+        */
+       if (!node_isset(src_nid, ng->active_nodes))
+               return true;
+
+       /*
+        * Both source and destination are nodes in active
+        * use by this numa group. Maximize memory bandwidth
+        * by migrating from more heavily used groups, to less
+        * heavily used ones, spreading the load around.
+        * Use a 1/4 hysteresis to avoid spurious page movement.
+        */
+       return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
+}
+
 static unsigned long weighted_cpuload(const int cpu);
 static unsigned long source_load(int cpu, int type);
 static unsigned long target_load(int cpu, int type);
@@ -1267,7 +1327,7 @@ static int task_numa_migrate(struct task_struct *p)
 static void numa_migrate_preferred(struct task_struct *p)
 {
        /* This task has no NUMA fault statistics yet */
-       if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
+       if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
                return;
 
        /* Periodically retry migrating the task to the preferred node */
@@ -1281,6 +1341,38 @@ static void numa_migrate_preferred(struct task_struct *p)
        task_numa_migrate(p);
 }
 
+/*
+ * Find the nodes on which the workload is actively running. We do this by
+ * tracking the nodes from which NUMA hinting faults are triggered. This can
+ * be different from the set of nodes where the workload's memory is currently
+ * located.
+ *
+ * The bitmask is used to make smarter decisions on when to do NUMA page
+ * migrations, To prevent flip-flopping, and excessive page migrations, nodes
+ * are added when they cause over 6/16 of the maximum number of faults, but
+ * only removed when they drop below 3/16.
+ */
+static void update_numa_active_node_mask(struct numa_group *numa_group)
+{
+       unsigned long faults, max_faults = 0;
+       int nid;
+
+       for_each_online_node(nid) {
+               faults = group_faults_cpu(numa_group, nid);
+               if (faults > max_faults)
+                       max_faults = faults;
+       }
+
+       for_each_online_node(nid) {
+               faults = group_faults_cpu(numa_group, nid);
+               if (!node_isset(nid, numa_group->active_nodes)) {
+                       if (faults > max_faults * 6 / 16)
+                               node_set(nid, numa_group->active_nodes);
+               } else if (faults < max_faults * 3 / 16)
+                       node_clear(nid, numa_group->active_nodes);
+       }
+}
+
 /*
  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
  * increments. The more local the fault statistics are, the higher the scan
@@ -1355,11 +1447,41 @@ static void update_task_scan_period(struct task_struct *p,
        memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
 }
 
+/*
+ * Get the fraction of time the task has been running since the last
+ * NUMA placement cycle. The scheduler keeps similar statistics, but
+ * decays those on a 32ms period, which is orders of magnitude off
+ * from the dozens-of-seconds NUMA balancing period. Use the scheduler
+ * stats only if the task is so new there are no NUMA statistics yet.
+ */
+static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
+{
+       u64 runtime, delta, now;
+       /* Use the start of this time slice to avoid calculations. */
+       now = p->se.exec_start;
+       runtime = p->se.sum_exec_runtime;
+
+       if (p->last_task_numa_placement) {
+               delta = runtime - p->last_sum_exec_runtime;
+               *period = now - p->last_task_numa_placement;
+       } else {
+               delta = p->se.avg.runnable_avg_sum;
+               *period = p->se.avg.runnable_avg_period;
+       }
+
+       p->last_sum_exec_runtime = runtime;
+       p->last_task_numa_placement = now;
+
+       return delta;
+}
+
 static void task_numa_placement(struct task_struct *p)
 {
        int seq, nid, max_nid = -1, max_group_nid = -1;
        unsigned long max_faults = 0, max_group_faults = 0;
        unsigned long fault_types[2] = { 0, 0 };
+       unsigned long total_faults;
+       u64 runtime, period;
        spinlock_t *group_lock = NULL;
 
        seq = ACCESS_ONCE(p->mm->numa_scan_seq);
@@ -1368,6 +1490,10 @@ static void task_numa_placement(struct task_struct *p)
        p->numa_scan_seq = seq;
        p->numa_scan_period_max = task_scan_max(p);
 
+       total_faults = p->numa_faults_locality[0] +
+                      p->numa_faults_locality[1];
+       runtime = numa_get_avg_runtime(p, &period);
+
        /* If the task is part of a group prevent parallel updates to group stats */
        if (p->numa_group) {
                group_lock = &p->numa_group->lock;
@@ -1379,24 +1505,37 @@ static void task_numa_placement(struct task_struct *p)
                unsigned long faults = 0, group_faults = 0;
                int priv, i;
 
-               for (priv = 0; priv < 2; priv++) {
-                       long diff;
+               for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
+                       long diff, f_diff, f_weight;
 
                        i = task_faults_idx(nid, priv);
-                       diff = -p->numa_faults[i];
 
                        /* Decay existing window, copy faults since last scan */
-                       p->numa_faults[i] >>= 1;
-                       p->numa_faults[i] += p->numa_faults_buffer[i];
-                       fault_types[priv] += p->numa_faults_buffer[i];
-                       p->numa_faults_buffer[i] = 0;
+                       diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
+                       fault_types[priv] += p->numa_faults_buffer_memory[i];
+                       p->numa_faults_buffer_memory[i] = 0;
 
-                       faults += p->numa_faults[i];
-                       diff += p->numa_faults[i];
+                       /*
+                        * Normalize the faults_from, so all tasks in a group
+                        * count according to CPU use, instead of by the raw
+                        * number of faults. Tasks with little runtime have
+                        * little over-all impact on throughput, and thus their
+                        * faults are less important.
+                        */
+                       f_weight = div64_u64(runtime << 16, period + 1);
+                       f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
+                                  (total_faults + 1);
+                       f_diff = f_weight - p->numa_faults_cpu[i] / 2;
+                       p->numa_faults_buffer_cpu[i] = 0;
+
+                       p->numa_faults_memory[i] += diff;
+                       p->numa_faults_cpu[i] += f_diff;
+                       faults += p->numa_faults_memory[i];
                        p->total_numa_faults += diff;
                        if (p->numa_group) {
                                /* safe because we can only change our own group */
                                p->numa_group->faults[i] += diff;
+                               p->numa_group->faults_cpu[i] += f_diff;
                                p->numa_group->total_faults += diff;
                                group_faults += p->numa_group->faults[i];
                        }
@@ -1416,6 +1555,7 @@ static void task_numa_placement(struct task_struct *p)
        update_task_scan_period(p, fault_types[0], fault_types[1]);
 
        if (p->numa_group) {
+               update_numa_active_node_mask(p->numa_group);
                /*
                 * If the preferred task and group nids are different,
                 * iterate over the nodes again to find the best place.
@@ -1465,7 +1605,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
 
        if (unlikely(!p->numa_group)) {
                unsigned int size = sizeof(struct numa_group) +
-                                   2*nr_node_ids*sizeof(unsigned long);
+                                   4*nr_node_ids*sizeof(unsigned long);
 
                grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
                if (!grp)
@@ -1475,9 +1615,14 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
                spin_lock_init(&grp->lock);
                INIT_LIST_HEAD(&grp->task_list);
                grp->gid = p->pid;
+               /* Second half of the array tracks nids where faults happen */
+               grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
+                                               nr_node_ids;
+
+               node_set(task_node(current), grp->active_nodes);
 
-               for (i = 0; i < 2*nr_node_ids; i++)
-                       grp->faults[i] = p->numa_faults[i];
+               for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+                       grp->faults[i] = p->numa_faults_memory[i];
 
                grp->total_faults = p->total_numa_faults;
 
@@ -1534,9 +1679,9 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
 
        double_lock(&my_grp->lock, &grp->lock);
 
-       for (i = 0; i < 2*nr_node_ids; i++) {
-               my_grp->faults[i] -= p->numa_faults[i];
-               grp->faults[i] += p->numa_faults[i];
+       for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
+               my_grp->faults[i] -= p->numa_faults_memory[i];
+               grp->faults[i] += p->numa_faults_memory[i];
        }
        my_grp->total_faults -= p->total_numa_faults;
        grp->total_faults += p->total_numa_faults;
@@ -1562,12 +1707,12 @@ void task_numa_free(struct task_struct *p)
 {
        struct numa_group *grp = p->numa_group;
        int i;
-       void *numa_faults = p->numa_faults;
+       void *numa_faults = p->numa_faults_memory;
 
        if (grp) {
                spin_lock(&grp->lock);
-               for (i = 0; i < 2*nr_node_ids; i++)
-                       grp->faults[i] -= p->numa_faults[i];
+               for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+                       grp->faults[i] -= p->numa_faults_memory[i];
                grp->total_faults -= p->total_numa_faults;
 
                list_del(&p->numa_entry);
@@ -1577,18 +1722,21 @@ void task_numa_free(struct task_struct *p)
                put_numa_group(grp);
        }
 
-       p->numa_faults = NULL;
-       p->numa_faults_buffer = NULL;
+       p->numa_faults_memory = NULL;
+       p->numa_faults_buffer_memory = NULL;
+       p->numa_faults_cpu= NULL;
+       p->numa_faults_buffer_cpu = NULL;
        kfree(numa_faults);
 }
 
 /*
  * Got a PROT_NONE fault for a page on @node.
  */
-void task_numa_fault(int last_cpupid, int node, int pages, int flags)
+void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 {
        struct task_struct *p = current;
        bool migrated = flags & TNF_MIGRATED;
+       int cpu_node = task_node(current);
        int priv;
 
        if (!numabalancing_enabled)
@@ -1603,16 +1751,24 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
                return;
 
        /* Allocate buffer to track faults on a per-node basis */
-       if (unlikely(!p->numa_faults)) {
-               int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
+       if (unlikely(!p->numa_faults_memory)) {
+               int size = sizeof(*p->numa_faults_memory) *
+                          NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
 
-               /* numa_faults and numa_faults_buffer share the allocation */
-               p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
-               if (!p->numa_faults)
+               p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
+               if (!p->numa_faults_memory)
                        return;
 
-               BUG_ON(p->numa_faults_buffer);
-               p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
+               BUG_ON(p->numa_faults_buffer_memory);
+               /*
+                * The averaged statistics, shared & private, memory & cpu,
+                * occupy the first half of the array. The second half of the
+                * array is for current counters, which are averaged into the
+                * first set by task_numa_placement.
+                */
+               p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
+               p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
+               p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
                p->total_numa_faults = 0;
                memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
        }
@@ -1641,7 +1797,8 @@ void task_numa_fault(int last_cpupid, int node, int pages, int flags)
        if (migrated)
                p->numa_pages_migrated += pages;
 
-       p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
+       p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
+       p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
        p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
 }
 
@@ -2219,13 +2376,20 @@ static inline void __update_group_entity_contrib(struct sched_entity *se)
                se->avg.load_avg_contrib >>= NICE_0_SHIFT;
        }
 }
-#else
+
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
+{
+       __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
+       __update_tg_runnable_avg(&rq->avg, &rq->cfs);
+}
+#else /* CONFIG_FAIR_GROUP_SCHED */
 static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
                                                 int force_update) {}
 static inline void __update_tg_runnable_avg(struct sched_avg *sa,
                                                  struct cfs_rq *cfs_rq) {}
 static inline void __update_group_entity_contrib(struct sched_entity *se) {}
-#endif
+static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
+#endif /* CONFIG_FAIR_GROUP_SCHED */
 
 static inline void __update_task_entity_contrib(struct sched_entity *se)
 {
@@ -2323,12 +2487,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
        __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
 }
 
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
-{
-       __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
-       __update_tg_runnable_avg(&rq->avg, &rq->cfs);
-}
-
 /* Add the load generated by se into cfs_rq's child load-average */
 static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
                                                  struct sched_entity *se,
@@ -2416,7 +2574,10 @@ void idle_exit_fair(struct rq *this_rq)
        update_rq_runnable_avg(this_rq, 0);
 }
 
-#else
+static int idle_balance(struct rq *this_rq);
+
+#else /* CONFIG_SMP */
+
 static inline void update_entity_load_avg(struct sched_entity *se,
                                          int update_cfs_rq) {}
 static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
@@ -2428,7 +2589,13 @@ static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
                                           int sleep) {}
 static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
                                              int force_update) {}
-#endif
+
+static inline int idle_balance(struct rq *rq)
+{
+       return 0;
+}
+
+#endif /* CONFIG_SMP */
 
 static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
@@ -2578,10 +2745,10 @@ static void __clear_buddies_last(struct sched_entity *se)
 {
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
-               if (cfs_rq->last == se)
-                       cfs_rq->last = NULL;
-               else
+               if (cfs_rq->last != se)
                        break;
+
+               cfs_rq->last = NULL;
        }
 }
 
@@ -2589,10 +2756,10 @@ static void __clear_buddies_next(struct sched_entity *se)
 {
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
-               if (cfs_rq->next == se)
-                       cfs_rq->next = NULL;
-               else
+               if (cfs_rq->next != se)
                        break;
+
+               cfs_rq->next = NULL;
        }
 }
 
@@ -2600,10 +2767,10 @@ static void __clear_buddies_skip(struct sched_entity *se)
 {
        for_each_sched_entity(se) {
                struct cfs_rq *cfs_rq = cfs_rq_of(se);
-               if (cfs_rq->skip == se)
-                       cfs_rq->skip = NULL;
-               else
+               if (cfs_rq->skip != se)
                        break;
+
+               cfs_rq->skip = NULL;
        }
 }
 
@@ -2746,17 +2913,36 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
  * 3) pick the "last" process, for cache locality
  * 4) do not run the "skip" process, if something else is available
  */
-static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
+static struct sched_entity *
+pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
 {
-       struct sched_entity *se = __pick_first_entity(cfs_rq);
-       struct sched_entity *left = se;
+       struct sched_entity *left = __pick_first_entity(cfs_rq);
+       struct sched_entity *se;
+
+       /*
+        * If curr is set we have to see if its left of the leftmost entity
+        * still in the tree, provided there was anything in the tree at all.
+        */
+       if (!left || (curr && entity_before(curr, left)))
+               left = curr;
+
+       se = left; /* ideally we run the leftmost entity */
 
        /*
         * Avoid running the skip buddy, if running something else can
         * be done without getting too unfair.
         */
        if (cfs_rq->skip == se) {
-               struct sched_entity *second = __pick_next_entity(se);
+               struct sched_entity *second;
+
+               if (se == curr) {
+                       second = __pick_first_entity(cfs_rq);
+               } else {
+                       second = __pick_next_entity(se);
+                       if (!second || (curr && entity_before(curr, second)))
+                               second = curr;
+               }
+
                if (second && wakeup_preempt_entity(second, left) < 1)
                        se = second;
        }
@@ -2778,7 +2964,7 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
        return se;
 }
 
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
 
 static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
 {
@@ -3433,22 +3619,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
 }
 
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
        if (!cfs_bandwidth_used())
-               return;
+               return false;
 
        if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
-               return;
+               return false;
 
        /*
         * it's possible for a throttled entity to be forced into a running
         * state (e.g. set_curr_task), in this case we're finished.
         */
        if (cfs_rq_throttled(cfs_rq))
-               return;
+               return true;
 
        throttle_cfs_rq(cfs_rq);
+       return true;
 }
 
 static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
@@ -3558,7 +3745,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
 }
 
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
-static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
+static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 
@@ -4213,13 +4400,14 @@ done:
 }
 
 /*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
+ * select_task_rq_fair: Select target runqueue for the waking task in domains
+ * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
+ * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
  *
- * Balance, ie. select the least loaded group.
+ * Balances load by selecting the idlest cpu in the idlest group, or under
+ * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
  *
- * Returns the target CPU number, or the same CPU if no balancing is needed.
+ * Returns the target cpu number.
  *
  * preempt must be disabled.
  */
@@ -4494,26 +4682,124 @@ preempt:
                set_last_buddy(se);
 }
 
-static struct task_struct *pick_next_task_fair(struct rq *rq)
+static struct task_struct *
+pick_next_task_fair(struct rq *rq, struct task_struct *prev)
 {
-       struct task_struct *p;
        struct cfs_rq *cfs_rq = &rq->cfs;
        struct sched_entity *se;
+       struct task_struct *p;
+       int new_tasks;
 
+again:
+#ifdef CONFIG_FAIR_GROUP_SCHED
        if (!cfs_rq->nr_running)
-               return NULL;
+               goto idle;
+
+       if (prev->sched_class != &fair_sched_class)
+               goto simple;
+
+       /*
+        * Because of the set_next_buddy() in dequeue_task_fair() it is rather
+        * likely that a next task is from the same cgroup as the current.
+        *
+        * Therefore attempt to avoid putting and setting the entire cgroup
+        * hierarchy, only change the part that actually changes.
+        */
+
+       do {
+               struct sched_entity *curr = cfs_rq->curr;
+
+               /*
+                * Since we got here without doing put_prev_entity() we also
+                * have to consider cfs_rq->curr. If it is still a runnable
+                * entity, update_curr() will update its vruntime, otherwise
+                * forget we've ever seen it.
+                */
+               if (curr && curr->on_rq)
+                       update_curr(cfs_rq);
+               else
+                       curr = NULL;
+
+               /*
+                * This call to check_cfs_rq_runtime() will do the throttle and
+                * dequeue its entity in the parent(s). Therefore the 'simple'
+                * nr_running test will indeed be correct.
+                */
+               if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+                       goto simple;
+
+               se = pick_next_entity(cfs_rq, curr);
+               cfs_rq = group_cfs_rq(se);
+       } while (cfs_rq);
+
+       p = task_of(se);
+
+       /*
+        * Since we haven't yet done put_prev_entity and if the selected task
+        * is a different task than we started out with, try and touch the
+        * least amount of cfs_rqs.
+        */
+       if (prev != p) {
+               struct sched_entity *pse = &prev->se;
+
+               while (!(cfs_rq = is_same_group(se, pse))) {
+                       int se_depth = se->depth;
+                       int pse_depth = pse->depth;
+
+                       if (se_depth <= pse_depth) {
+                               put_prev_entity(cfs_rq_of(pse), pse);
+                               pse = parent_entity(pse);
+                       }
+                       if (se_depth >= pse_depth) {
+                               set_next_entity(cfs_rq_of(se), se);
+                               se = parent_entity(se);
+                       }
+               }
+
+               put_prev_entity(cfs_rq, pse);
+               set_next_entity(cfs_rq, se);
+       }
+
+       if (hrtick_enabled(rq))
+               hrtick_start_fair(rq, p);
+
+       return p;
+simple:
+       cfs_rq = &rq->cfs;
+#endif
+
+       if (!cfs_rq->nr_running)
+               goto idle;
+
+       put_prev_task(rq, prev);
 
        do {
-               se = pick_next_entity(cfs_rq);
+               se = pick_next_entity(cfs_rq, NULL);
                set_next_entity(cfs_rq, se);
                cfs_rq = group_cfs_rq(se);
        } while (cfs_rq);
 
        p = task_of(se);
+
        if (hrtick_enabled(rq))
                hrtick_start_fair(rq, p);
 
        return p;
+
+idle:
+       new_tasks = idle_balance(rq);
+       /*
+        * Because idle_balance() releases (and re-acquires) rq->lock, it is
+        * possible for any higher priority task to appear. In that case we
+        * must re-start the pick_next_entity() loop.
+        */
+       if (new_tasks < 0)
+               return RETRY_TASK;
+
+       if (new_tasks > 0)
+               goto again;
+
+       return NULL;
 }
 
 /*
@@ -4751,7 +5037,7 @@ static void move_task(struct task_struct *p, struct lb_env *env)
  * Is this task likely cache-hot:
  */
 static int
-task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
+task_hot(struct task_struct *p, u64 now)
 {
        s64 delta;
 
@@ -4785,7 +5071,7 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
 {
        int src_nid, dst_nid;
 
-       if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
+       if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
            !(env->sd->flags & SD_NUMA)) {
                return false;
        }
@@ -4816,7 +5102,7 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
        if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
                return false;
 
-       if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
+       if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
                return false;
 
        src_nid = cpu_to_node(env->src_cpu);
@@ -4912,7 +5198,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
         * 2) task is cache cold, or
         * 3) too many balance attempts have failed.
         */
-       tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
+       tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq));
        if (!tsk_cache_hot)
                tsk_cache_hot = migrate_degrades_locality(p, env);
 
@@ -5775,12 +6061,10 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
        pwr_now /= SCHED_POWER_SCALE;
 
        /* Amount of load we'd subtract */
-       tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
-               busiest->group_power;
-       if (busiest->avg_load > tmp) {
+       if (busiest->avg_load > scaled_busy_load_per_task) {
                pwr_move += busiest->group_power *
                            min(busiest->load_per_task,
-                               busiest->avg_load - tmp);
+                               busiest->avg_load - scaled_busy_load_per_task);
        }
 
        /* Amount of load we'd add */
@@ -6359,17 +6643,23 @@ out:
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-void idle_balance(int this_cpu, struct rq *this_rq)
+static int idle_balance(struct rq *this_rq)
 {
        struct sched_domain *sd;
        int pulled_task = 0;
        unsigned long next_balance = jiffies + HZ;
        u64 curr_cost = 0;
+       int this_cpu = this_rq->cpu;
 
+       idle_enter_fair(this_rq);
+       /*
+        * We must set idle_stamp _before_ calling idle_balance(), such that we
+        * measure the duration of idle_balance() as idle time.
+        */
        this_rq->idle_stamp = rq_clock(this_rq);
 
        if (this_rq->avg_idle < sysctl_sched_migration_cost)
-               return;
+               goto out;
 
        /*
         * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6407,15 +6697,22 @@ void idle_balance(int this_cpu, struct rq *this_rq)
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
                        next_balance = sd->last_balance + interval;
-               if (pulled_task) {
-                       this_rq->idle_stamp = 0;
+               if (pulled_task)
                        break;
-               }
        }
        rcu_read_unlock();
 
        raw_spin_lock(&this_rq->lock);
 
+       /*
+        * While browsing the domains, we released the rq lock.
+        * A task could have be enqueued in the meantime
+        */
+       if (this_rq->cfs.h_nr_running && !pulled_task) {
+               pulled_task = 1;
+               goto out;
+       }
+
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
                 * We are going idle. next_balance may be set based on
@@ -6426,6 +6723,20 @@ void idle_balance(int this_cpu, struct rq *this_rq)
 
        if (curr_cost > this_rq->max_idle_balance_cost)
                this_rq->max_idle_balance_cost = curr_cost;
+
+out:
+       /* Is there a task of a high priority class? */
+       if (this_rq->nr_running != this_rq->cfs.h_nr_running &&
+           (this_rq->dl.dl_nr_running ||
+            (this_rq->rt.rt_nr_running && !rt_rq_throttled(&this_rq->rt))))
+               pulled_task = -1;
+
+       if (pulled_task) {
+               idle_exit_fair(this_rq);
+               this_rq->idle_stamp = 0;
+       }
+
+       return pulled_task;
 }
 
 /*
@@ -6496,6 +6807,11 @@ out_unlock:
        return 0;
 }
 
+static inline int on_null_domain(struct rq *rq)
+{
+       return unlikely(!rcu_dereference_sched(rq->sd));
+}
+
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * idle load balancing details
@@ -6550,8 +6866,13 @@ static void nohz_balancer_kick(void)
 static inline void nohz_balance_exit_idle(int cpu)
 {
        if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
-               cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
-               atomic_dec(&nohz.nr_cpus);
+               /*
+                * Completely isolated CPUs don't ever set, so we must test.
+                */
+               if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
+                       cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+                       atomic_dec(&nohz.nr_cpus);
+               }
                clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
        }
 }
@@ -6605,6 +6926,12 @@ void nohz_balance_enter_idle(int cpu)
        if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
                return;
 
+       /*
+        * If we're a completely isolated CPU, we don't play.
+        */
+       if (on_null_domain(cpu_rq(cpu)))
+               return;
+
        cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
        atomic_inc(&nohz.nr_cpus);
        set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
@@ -6867,11 +7194,6 @@ static void run_rebalance_domains(struct softirq_action *h)
        nohz_idle_balance(this_rq, idle);
 }
 
-static inline int on_null_domain(struct rq *rq)
-{
-       return !rcu_dereference_sched(rq->sd);
-}
-
 /*
  * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  */
@@ -7036,7 +7358,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
  */
 static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
-       if (!p->se.on_rq)
+       struct sched_entity *se = &p->se;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       /*
+        * Since the real-depth could have been changed (only FAIR
+        * class maintain depth value), reset depth properly.
+        */
+       se->depth = se->parent ? se->parent->depth + 1 : 0;
+#endif
+       if (!se->on_rq)
                return;
 
        /*
@@ -7084,7 +7414,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_move_group_fair(struct task_struct *p, int on_rq)
 {
+       struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq;
+
        /*
         * If the task was not on the rq at the time of this cgroup movement
         * it must have been asleep, sleeping tasks keep their ->vruntime
@@ -7110,23 +7442,24 @@ static void task_move_group_fair(struct task_struct *p, int on_rq)
         * To prevent boost or penalty in the new cfs_rq caused by delta
         * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
         */
-       if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
+       if (!on_rq && (!se->sum_exec_runtime || p->state == TASK_WAKING))
                on_rq = 1;
 
        if (!on_rq)
-               p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
+               se->vruntime -= cfs_rq_of(se)->min_vruntime;
        set_task_rq(p, task_cpu(p));
+       se->depth = se->parent ? se->parent->depth + 1 : 0;
        if (!on_rq) {
-               cfs_rq = cfs_rq_of(&p->se);
-               p->se.vruntime += cfs_rq->min_vruntime;
+               cfs_rq = cfs_rq_of(se);
+               se->vruntime += cfs_rq->min_vruntime;
 #ifdef CONFIG_SMP
                /*
                 * migrate_task_rq_fair() will have removed our previous
                 * contribution, but we must synchronize for ongoing future
                 * decay.
                 */
-               p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
-               cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
+               se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
+               cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
 #endif
        }
 }
@@ -7222,10 +7555,13 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
        if (!se)
                return;
 
-       if (!parent)
+       if (!parent) {
                se->cfs_rq = &rq->cfs;
-       else
+               se->depth = 0;
+       } else {
                se->cfs_rq = parent->my_q;
+               se->depth = parent->depth + 1;
+       }
 
        se->my_q = cfs_rq;
        /* guarantee group entities always have weight */
similarity index 95%
rename from kernel/cpu/idle.c
rename to kernel/sched/idle.c
index 277f494c2a9ae695c9a61dd44e5a2bfe68d75b2c..b7976a1271788a86471928461af5e30676f828a4 100644 (file)
@@ -3,6 +3,7 @@
  */
 #include <linux/sched.h>
 #include <linux/cpu.h>
+#include <linux/cpuidle.h>
 #include <linux/tick.h>
 #include <linux/mm.h>
 #include <linux/stackprotector.h>
@@ -95,8 +96,10 @@ static void cpu_idle_loop(void)
                                if (!current_clr_polling_and_test()) {
                                        stop_critical_timings();
                                        rcu_idle_enter();
-                                       arch_cpu_idle();
-                                       WARN_ON_ONCE(irqs_disabled());
+                                       if (cpuidle_idle_call())
+                                               arch_cpu_idle();
+                                       if (WARN_ON_ONCE(irqs_disabled()))
+                                               local_irq_enable();
                                        rcu_idle_exit();
                                        start_critical_timings();
                                } else {
index 516c3d9ceea1455cee6c5a7b96db7f92a141f9af..879f2b75266a9933823a2056541af469cdc9c3dd 100644 (file)
@@ -13,18 +13,8 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
        return task_cpu(p); /* IDLE tasks as never migrated */
 }
-
-static void pre_schedule_idle(struct rq *rq, struct task_struct *prev)
-{
-       idle_exit_fair(rq);
-       rq_last_tick_reset(rq);
-}
-
-static void post_schedule_idle(struct rq *rq)
-{
-       idle_enter_fair(rq);
-}
 #endif /* CONFIG_SMP */
+
 /*
  * Idle tasks are unconditionally rescheduled:
  */
@@ -33,13 +23,12 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
        resched_task(rq->idle);
 }
 
-static struct task_struct *pick_next_task_idle(struct rq *rq)
+static struct task_struct *
+pick_next_task_idle(struct rq *rq, struct task_struct *prev)
 {
+       put_prev_task(rq, prev);
+
        schedstat_inc(rq, sched_goidle);
-#ifdef CONFIG_SMP
-       /* Trigger the post schedule to do an idle_enter for CFS */
-       rq->post_schedule = 1;
-#endif
        return rq->idle;
 }
 
@@ -58,6 +47,8 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
 
 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
 {
+       idle_exit_fair(rq);
+       rq_last_tick_reset(rq);
 }
 
 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
@@ -101,8 +92,6 @@ const struct sched_class idle_sched_class = {
 
 #ifdef CONFIG_SMP
        .select_task_rq         = select_task_rq_idle,
-       .pre_schedule           = pre_schedule_idle,
-       .post_schedule          = post_schedule_idle,
 #endif
 
        .set_curr_task          = set_curr_task_idle,
index 1999021042c7010c6e5e86539b9c0b9a03519dcd..d8cdf1618551c80143e0f5fd38de556d089eb1f6 100644 (file)
@@ -229,6 +229,14 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
 
 #ifdef CONFIG_SMP
 
+static int pull_rt_task(struct rq *this_rq);
+
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
+{
+       /* Try to pull RT tasks here if we lower this rq's prio */
+       return rq->rt.highest_prio.curr > prev->prio;
+}
+
 static inline int rt_overloaded(struct rq *rq)
 {
        return atomic_read(&rq->rd->rto_count);
@@ -315,6 +323,15 @@ static inline int has_pushable_tasks(struct rq *rq)
        return !plist_head_empty(&rq->rt.pushable_tasks);
 }
 
+static inline void set_post_schedule(struct rq *rq)
+{
+       /*
+        * We detect this state here so that we can avoid taking the RQ
+        * lock again later if there is no need to push
+        */
+       rq->post_schedule = has_pushable_tasks(rq);
+}
+
 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
 {
        plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -359,6 +376,19 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
 {
 }
 
+static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
+{
+       return false;
+}
+
+static inline int pull_rt_task(struct rq *this_rq)
+{
+       return 0;
+}
+
+static inline void set_post_schedule(struct rq *rq)
+{
+}
 #endif /* CONFIG_SMP */
 
 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
@@ -440,11 +470,6 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
                dequeue_rt_entity(rt_se);
 }
 
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
-       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
-}
-
 static int rt_se_boosted(struct sched_rt_entity *rt_se)
 {
        struct rt_rq *rt_rq = group_rt_rq(rt_se);
@@ -515,11 +540,6 @@ static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 {
 }
 
-static inline int rt_rq_throttled(struct rt_rq *rt_rq)
-{
-       return rt_rq->rt_throttled;
-}
-
 static inline const struct cpumask *sched_rt_period_mask(void)
 {
        return cpu_online_mask;
@@ -1318,15 +1338,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 {
        struct sched_rt_entity *rt_se;
        struct task_struct *p;
-       struct rt_rq *rt_rq;
-
-       rt_rq = &rq->rt;
-
-       if (!rt_rq->rt_nr_running)
-               return NULL;
-
-       if (rt_rq_throttled(rt_rq))
-               return NULL;
+       struct rt_rq *rt_rq  = &rq->rt;
 
        do {
                rt_se = pick_next_rt_entity(rq, rt_rq);
@@ -1340,21 +1352,45 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
        return p;
 }
 
-static struct task_struct *pick_next_task_rt(struct rq *rq)
+static struct task_struct *
+pick_next_task_rt(struct rq *rq, struct task_struct *prev)
 {
-       struct task_struct *p = _pick_next_task_rt(rq);
+       struct task_struct *p;
+       struct rt_rq *rt_rq = &rq->rt;
+
+       if (need_pull_rt_task(rq, prev)) {
+               pull_rt_task(rq);
+               /*
+                * pull_rt_task() can drop (and re-acquire) rq->lock; this
+                * means a dl task can slip in, in which case we need to
+                * re-start task selection.
+                */
+               if (unlikely(rq->dl.dl_nr_running))
+                       return RETRY_TASK;
+       }
+
+       /*
+        * We may dequeue prev's rt_rq in put_prev_task().
+        * So, we update time before rt_nr_running check.
+        */
+       if (prev->sched_class == &rt_sched_class)
+               update_curr_rt(rq);
+
+       if (!rt_rq->rt_nr_running)
+               return NULL;
+
+       if (rt_rq_throttled(rt_rq))
+               return NULL;
+
+       put_prev_task(rq, prev);
+
+       p = _pick_next_task_rt(rq);
 
        /* The running task is never eligible for pushing */
        if (p)
                dequeue_pushable_task(rq, p);
 
-#ifdef CONFIG_SMP
-       /*
-        * We detect this state here so that we can avoid taking the RQ
-        * lock again later if there is no need to push
-        */
-       rq->post_schedule = has_pushable_tasks(rq);
-#endif
+       set_post_schedule(rq);
 
        return p;
 }
@@ -1724,13 +1760,6 @@ skip:
        return ret;
 }
 
-static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
-{
-       /* Try to pull RT tasks here if we lower this rq's prio */
-       if (rq->rt.highest_prio.curr > prev->prio)
-               pull_rt_task(rq);
-}
-
 static void post_schedule_rt(struct rq *rq)
 {
        push_rt_tasks(rq);
@@ -1833,7 +1862,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
                resched_task(rq->curr);
 }
 
-void init_sched_rt_class(void)
+void __init init_sched_rt_class(void)
 {
        unsigned int i;
 
@@ -2007,7 +2036,6 @@ const struct sched_class rt_sched_class = {
        .set_cpus_allowed       = set_cpus_allowed_rt,
        .rq_online              = rq_online_rt,
        .rq_offline             = rq_offline_rt,
-       .pre_schedule           = pre_schedule_rt,
        .post_schedule          = post_schedule_rt,
        .task_woken             = task_woken_rt,
        .switched_from          = switched_from_rt,
index f964add50f3863805f725e8fc13b447806f41022..c9007f28d3a222ca97b5fb98210b2ecc1e756b7a 100644 (file)
@@ -23,24 +23,6 @@ extern atomic_long_t calc_load_tasks;
 extern long calc_load_fold_active(struct rq *this_rq);
 extern void update_cpu_load_active(struct rq *this_rq);
 
-/*
- * Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
- * and back.
- */
-#define NICE_TO_PRIO(nice)     (MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio)     ((prio) - MAX_RT_PRIO - 20)
-#define TASK_NICE(p)           PRIO_TO_NICE((p)->static_prio)
-
-/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p)           ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p)      USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO          (USER_PRIO(MAX_PRIO))
-
 /*
  * Helpers for converting nanosecond timing to jiffy resolution
  */
@@ -441,6 +423,18 @@ struct rt_rq {
 #endif
 };
 
+#ifdef CONFIG_RT_GROUP_SCHED
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
+}
+#else
+static inline int rt_rq_throttled(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_throttled;
+}
+#endif
+
 /* Deadline class' related fields in a runqueue */
 struct dl_rq {
        /* runqueue is an rbtree, ordered by deadline */
@@ -558,11 +552,9 @@ struct rq {
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /* list of leaf cfs_rq on this cpu: */
        struct list_head leaf_cfs_rq_list;
-#endif /* CONFIG_FAIR_GROUP_SCHED */
 
-#ifdef CONFIG_RT_GROUP_SCHED
-       struct list_head leaf_rt_rq_list;
-#endif
+       struct sched_avg avg;
+#endif /* CONFIG_FAIR_GROUP_SCHED */
 
        /*
         * This is part of a global counter where only the total sum
@@ -651,8 +643,6 @@ struct rq {
 #ifdef CONFIG_SMP
        struct llist_head wake_list;
 #endif
-
-       struct sched_avg avg;
 };
 
 static inline int cpu_of(struct rq *rq)
@@ -1112,6 +1102,8 @@ static const u32 prio_to_wmult[40] = {
 
 #define DEQUEUE_SLEEP          1
 
+#define RETRY_TASK             ((void *)-1UL)
+
 struct sched_class {
        const struct sched_class *next;
 
@@ -1122,14 +1114,22 @@ struct sched_class {
 
        void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
 
-       struct task_struct * (*pick_next_task) (struct rq *rq);
+       /*
+        * It is the responsibility of the pick_next_task() method that will
+        * return the next task to call put_prev_task() on the @prev task or
+        * something equivalent.
+        *
+        * May return RETRY_TASK when it finds a higher prio class has runnable
+        * tasks.
+        */
+       struct task_struct * (*pick_next_task) (struct rq *rq,
+                                               struct task_struct *prev);
        void (*put_prev_task) (struct rq *rq, struct task_struct *p);
 
 #ifdef CONFIG_SMP
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
        void (*migrate_task_rq)(struct task_struct *p, int next_cpu);
 
-       void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
        void (*post_schedule) (struct rq *this_rq);
        void (*task_waking) (struct task_struct *task);
        void (*task_woken) (struct rq *this_rq, struct task_struct *task);
@@ -1159,6 +1159,11 @@ struct sched_class {
 #endif
 };
 
+static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
+{
+       prev->sched_class->put_prev_task(rq, prev);
+}
+
 #define sched_class_highest (&stop_sched_class)
 #define for_each_class(class) \
    for (class = sched_class_highest; class; class = class->next)
@@ -1175,16 +1180,14 @@ extern const struct sched_class idle_sched_class;
 extern void update_group_power(struct sched_domain *sd, int cpu);
 
 extern void trigger_load_balance(struct rq *rq);
-extern void idle_balance(int this_cpu, struct rq *this_rq);
 
 extern void idle_enter_fair(struct rq *this_rq);
 extern void idle_exit_fair(struct rq *this_rq);
 
-#else  /* CONFIG_SMP */
+#else
 
-static inline void idle_balance(int cpu, struct rq *rq)
-{
-}
+static inline void idle_enter_fair(struct rq *rq) { }
+static inline void idle_exit_fair(struct rq *rq) { }
 
 #endif
 
@@ -1213,16 +1216,6 @@ extern void update_idle_cpu_load(struct rq *this_rq);
 
 extern void init_task_runnable_average(struct task_struct *p);
 
-#ifdef CONFIG_PARAVIRT
-static inline u64 steal_ticks(u64 steal)
-{
-       if (unlikely(steal > NSEC_PER_SEC))
-               return div_u64(steal, TICK_NSEC);
-
-       return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
-}
-#endif
-
 static inline void inc_nr_running(struct rq *rq)
 {
        rq->nr_running++;
index fdb6bb0b33561af759e24d8806776dfc0823867c..d6ce65dde5412d4b4b9d8473caf92318ba7fcb24 100644 (file)
@@ -23,16 +23,19 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
        /* we're never preempted */
 }
 
-static struct task_struct *pick_next_task_stop(struct rq *rq)
+static struct task_struct *
+pick_next_task_stop(struct rq *rq, struct task_struct *prev)
 {
        struct task_struct *stop = rq->stop;
 
-       if (stop && stop->on_rq) {
-               stop->se.exec_start = rq_clock_task(rq);
-               return stop;
-       }
+       if (!stop || !stop->on_rq)
+               return NULL;
 
-       return NULL;
+       put_prev_task(rq, prev);
+
+       stop->se.exec_start = rq_clock_task(rq);
+
+       return stop;
 }
 
 static void
index 490fcbb1dc5b41727408d28e17ae145754c2c051..b50990a5bea0220df9034f0bcc71d92e452edc78 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/smp.h>
 #include <linux/smpboot.h>
 #include <linux/tick.h>
+#include <linux/irq.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/irq.h>
index c0a58be780a407a5bf350e852db1c9800c98dd34..adaeab6f7a870ae69537baebf4ca092a161d5013 100644 (file)
@@ -174,10 +174,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
 
        /* normalize: avoid signed division (rounding problems) */
        error = -ESRCH;
-       if (niceval < -20)
-               niceval = -20;
-       if (niceval > 19)
-               niceval = 19;
+       if (niceval < MIN_NICE)
+               niceval = MIN_NICE;
+       if (niceval > MAX_NICE)
+               niceval = MAX_NICE;
 
        rcu_read_lock();
        read_lock(&tasklist_lock);
index 49e13e1f8fe6a5e481edb3026ae20360918986c3..7754ff16f3342c42d21509437c4c8f3c110694f6 100644 (file)
@@ -385,13 +385,6 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "numa_balancing_migrate_deferred",
-               .data           = &sysctl_numa_balancing_migrate_deferred,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
        {
                .procname       = "numa_balancing",
                .data           = NULL, /* filled in by handler */
index 3ce6e8c5f3fca86b436b288194a661cfe7b80829..f448513a45ed6d59ac5e7363fa5e43dafae34820 100644 (file)
@@ -124,7 +124,7 @@ config NO_HZ_FULL
 endchoice
 
 config NO_HZ_FULL_ALL
-       bool "Full dynticks system on all CPUs by default"
+       bool "Full dynticks system on all CPUs by default (except CPU 0)"
        depends on NO_HZ_FULL
        help
          If the user doesn't pass the nohz_full boot option to
index 9250130646f510796fb2811ad7fb11483abc0040..57a413fd0ebf557b947e8fe8b38e3c180f9bfedd 100644 (file)
@@ -3,7 +3,10 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o
 
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD)                += clockevents.o
 obj-$(CONFIG_GENERIC_CLOCKEVENTS)              += tick-common.o
-obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)    += tick-broadcast.o
+ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
+ obj-y                                         += tick-broadcast.o
+ obj-$(CONFIG_TICK_ONESHOT)                    += tick-broadcast-hrtimer.o
+endif
 obj-$(CONFIG_GENERIC_SCHED_CLOCK)              += sched_clock.o
 obj-$(CONFIG_TICK_ONESHOT)                     += tick-oneshot.o
 obj-$(CONFIG_TICK_ONESHOT)                     += tick-sched.o
index 086ad6043bcbde85bcb1755e7fa17979d696a46a..ad362c260ef40a7b744bef657aca8f4f5a4609e6 100644 (file)
@@ -439,6 +439,19 @@ void clockevents_config_and_register(struct clock_event_device *dev,
 }
 EXPORT_SYMBOL_GPL(clockevents_config_and_register);
 
+int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
+{
+       clockevents_config(dev, freq);
+
+       if (dev->mode == CLOCK_EVT_MODE_ONESHOT)
+               return clockevents_program_event(dev, dev->next_event, false);
+
+       if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
+               dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev);
+
+       return 0;
+}
+
 /**
  * clockevents_update_freq - Update frequency and reprogram a clock event device.
  * @dev:       device to modify
@@ -446,17 +459,22 @@ EXPORT_SYMBOL_GPL(clockevents_config_and_register);
  *
  * Reconfigure and reprogram a clock event device in oneshot
  * mode. Must be called on the cpu for which the device delivers per
- * cpu timer events with interrupts disabled!  Returns 0 on success,
- * -ETIME when the event is in the past.
+ * cpu timer events. If called for the broadcast device the core takes
+ * care of serialization.
+ *
+ * Returns 0 on success, -ETIME when the event is in the past.
  */
 int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
 {
-       clockevents_config(dev, freq);
-
-       if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
-               return 0;
+       unsigned long flags;
+       int ret;
 
-       return clockevents_program_event(dev, dev->next_event, false);
+       local_irq_save(flags);
+       ret = tick_broadcast_update_freq(dev, freq);
+       if (ret == -ENODEV)
+               ret = __clockevents_update_freq(dev, freq);
+       local_irq_restore(flags);
+       return ret;
 }
 
 /*
@@ -524,12 +542,13 @@ void clockevents_resume(void)
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 /**
  * clockevents_notify - notification about relevant events
+ * Returns 0 on success, any other value on error
  */
-void clockevents_notify(unsigned long reason, void *arg)
+int clockevents_notify(unsigned long reason, void *arg)
 {
        struct clock_event_device *dev, *tmp;
        unsigned long flags;
-       int cpu;
+       int cpu, ret = 0;
 
        raw_spin_lock_irqsave(&clockevents_lock, flags);
 
@@ -542,7 +561,7 @@ void clockevents_notify(unsigned long reason, void *arg)
 
        case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
        case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
-               tick_broadcast_oneshot_control(reason);
+               ret = tick_broadcast_oneshot_control(reason);
                break;
 
        case CLOCK_EVT_NOTIFY_CPU_DYING:
@@ -585,6 +604,7 @@ void clockevents_notify(unsigned long reason, void *arg)
                break;
        }
        raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(clockevents_notify);
 
index af8d1d4f3d55156eaae7936b1a34d8cd7141248f..419a52cecd20c4fa2cac2fab666b1be48a4e9f4b 100644 (file)
@@ -514,12 +514,13 @@ static void sync_cmos_clock(struct work_struct *work)
                next.tv_sec++;
                next.tv_nsec -= NSEC_PER_SEC;
        }
-       schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
+       queue_delayed_work(system_power_efficient_wq,
+                          &sync_cmos_work, timespec_to_jiffies(&next));
 }
 
 void ntp_notify_cmos_timer(void)
 {
-       schedule_delayed_work(&sync_cmos_work, 0);
+       queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0);
 }
 
 #else
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
new file mode 100644 (file)
index 0000000..eb682d5
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * linux/kernel/time/tick-broadcast-hrtimer.c
+ * This file emulates a local clock event device
+ * via a pseudo clock device.
+ */
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/profile.h>
+#include <linux/clockchips.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+
+#include "tick-internal.h"
+
+static struct hrtimer bctimer;
+
+static void bc_set_mode(enum clock_event_mode mode,
+                       struct clock_event_device *bc)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               /*
+                * Note, we cannot cancel the timer here as we might
+                * run into the following live lock scenario:
+                *
+                * cpu 0                cpu1
+                * lock(broadcast_lock);
+                *                      hrtimer_interrupt()
+                *                      bc_handler()
+                *                         tick_handle_oneshot_broadcast();
+                *                          lock(broadcast_lock);
+                * hrtimer_cancel()
+                *  wait_for_callback()
+                */
+               hrtimer_try_to_cancel(&bctimer);
+               break;
+       default:
+               break;
+       }
+}
+
+/*
+ * This is called from the guts of the broadcast code when the cpu
+ * which is about to enter idle has the earliest broadcast timer event.
+ */
+static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
+{
+       /*
+        * We try to cancel the timer first. If the callback is on
+        * flight on some other cpu then we let it handle it. If we
+        * were able to cancel the timer nothing can rearm it as we
+        * own broadcast_lock.
+        *
+        * However we can also be called from the event handler of
+        * ce_broadcast_hrtimer itself when it expires. We cannot
+        * restart the timer because we are in the callback, but we
+        * can set the expiry time and let the callback return
+        * HRTIMER_RESTART.
+        */
+       if (hrtimer_try_to_cancel(&bctimer) >= 0) {
+               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+               /* Bind the "device" to the cpu */
+               bc->bound_on = smp_processor_id();
+       } else if (bc->bound_on == smp_processor_id()) {
+               hrtimer_set_expires(&bctimer, expires);
+       }
+       return 0;
+}
+
+static struct clock_event_device ce_broadcast_hrtimer = {
+       .set_mode               = bc_set_mode,
+       .set_next_ktime         = bc_set_next,
+       .features               = CLOCK_EVT_FEAT_ONESHOT |
+                                 CLOCK_EVT_FEAT_KTIME |
+                                 CLOCK_EVT_FEAT_HRTIMER,
+       .rating                 = 0,
+       .bound_on               = -1,
+       .min_delta_ns           = 1,
+       .max_delta_ns           = KTIME_MAX,
+       .min_delta_ticks        = 1,
+       .max_delta_ticks        = ULONG_MAX,
+       .mult                   = 1,
+       .shift                  = 0,
+       .cpumask                = cpu_all_mask,
+};
+
+static enum hrtimer_restart bc_handler(struct hrtimer *t)
+{
+       ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
+
+       if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX)
+               return HRTIMER_NORESTART;
+
+       return HRTIMER_RESTART;
+}
+
+void tick_setup_hrtimer_broadcast(void)
+{
+       hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       bctimer.function = bc_handler;
+       clockevents_register_device(&ce_broadcast_hrtimer);
+}
index 98977a57ac72d2a221ed78731b62d67a2e1778fb..64c5990fd500b86e86e9d0a92d86fa7b7b45a0b1 100644 (file)
@@ -120,6 +120,19 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
        return (dev && tick_broadcast_device.evtdev == dev);
 }
 
+int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
+{
+       int ret = -ENODEV;
+
+       if (tick_is_broadcast_device(dev)) {
+               raw_spin_lock(&tick_broadcast_lock);
+               ret = __clockevents_update_freq(dev, freq);
+               raw_spin_unlock(&tick_broadcast_lock);
+       }
+       return ret;
+}
+
+
 static void err_broadcast(const struct cpumask *mask)
 {
        pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
@@ -272,12 +285,8 @@ static void tick_do_broadcast(struct cpumask *mask)
  */
 static void tick_do_periodic_broadcast(void)
 {
-       raw_spin_lock(&tick_broadcast_lock);
-
        cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
        tick_do_broadcast(tmpmask);
-
-       raw_spin_unlock(&tick_broadcast_lock);
 }
 
 /*
@@ -287,13 +296,15 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
 {
        ktime_t next;
 
+       raw_spin_lock(&tick_broadcast_lock);
+
        tick_do_periodic_broadcast();
 
        /*
         * The device is in periodic mode. No reprogramming necessary:
         */
        if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
-               return;
+               goto unlock;
 
        /*
         * Setup the next period for devices, which do not have
@@ -306,9 +317,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
                next = ktime_add(next, tick_period);
 
                if (!clockevents_program_event(dev, next, false))
-                       return;
+                       goto unlock;
                tick_do_periodic_broadcast();
        }
+unlock:
+       raw_spin_unlock(&tick_broadcast_lock);
 }
 
 /*
@@ -630,24 +643,61 @@ again:
        raw_spin_unlock(&tick_broadcast_lock);
 }
 
+static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
+{
+       if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
+               return 0;
+       if (bc->next_event.tv64 == KTIME_MAX)
+               return 0;
+       return bc->bound_on == cpu ? -EBUSY : 0;
+}
+
+static void broadcast_shutdown_local(struct clock_event_device *bc,
+                                    struct clock_event_device *dev)
+{
+       /*
+        * For hrtimer based broadcasting we cannot shutdown the cpu
+        * local device if our own event is the first one to expire or
+        * if we own the broadcast timer.
+        */
+       if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
+               if (broadcast_needs_cpu(bc, smp_processor_id()))
+                       return;
+               if (dev->next_event.tv64 < bc->next_event.tv64)
+                       return;
+       }
+       clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
+}
+
+static void broadcast_move_bc(int deadcpu)
+{
+       struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+       if (!bc || !broadcast_needs_cpu(bc, deadcpu))
+               return;
+       /* This moves the broadcast assignment to this cpu */
+       clockevents_program_event(bc, bc->next_event, 1);
+}
+
 /*
  * Powerstate information: The system enters/leaves a state, where
  * affected devices might stop
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
  */
-void tick_broadcast_oneshot_control(unsigned long reason)
+int tick_broadcast_oneshot_control(unsigned long reason)
 {
        struct clock_event_device *bc, *dev;
        struct tick_device *td;
        unsigned long flags;
        ktime_t now;
-       int cpu;
+       int cpu, ret = 0;
 
        /*
         * Periodic mode does not care about the enter/exit of power
         * states
         */
        if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
-               return;
+               return 0;
 
        /*
         * We are called with preemtion disabled from the depth of the
@@ -658,7 +708,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
        dev = td->evtdev;
 
        if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
-               return;
+               return 0;
 
        bc = tick_broadcast_device.evtdev;
 
@@ -666,7 +716,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
        if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
                if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
                        WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
-                       clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
+                       broadcast_shutdown_local(bc, dev);
                        /*
                         * We only reprogram the broadcast timer if we
                         * did not mark ourself in the force mask and
@@ -679,6 +729,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
                            dev->next_event.tv64 < bc->next_event.tv64)
                                tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
                }
+               /*
+                * If the current CPU owns the hrtimer broadcast
+                * mechanism, it cannot go deep idle and we remove the
+                * CPU from the broadcast mask. We don't have to go
+                * through the EXIT path as the local timer is not
+                * shutdown.
+                */
+               ret = broadcast_needs_cpu(bc, cpu);
+               if (ret)
+                       cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
        } else {
                if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
                        clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
@@ -746,6 +806,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
        }
 out:
        raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+       return ret;
 }
 
 /*
@@ -852,6 +913,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
        cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
        cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
 
+       broadcast_move_bc(cpu);
+
        raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
 }
 
index 20b2fe37d1053a21b138dbb2a7856e574c2ea9aa..015661279b682fea4fcbc3a393d9e24760bf8b34 100644 (file)
@@ -98,18 +98,19 @@ static void tick_periodic(int cpu)
 void tick_handle_periodic(struct clock_event_device *dev)
 {
        int cpu = smp_processor_id();
-       ktime_t next;
+       ktime_t next = dev->next_event;
 
        tick_periodic(cpu);
 
        if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
                return;
-       /*
-        * Setup the next period for devices, which do not have
-        * periodic mode:
-        */
-       next = ktime_add(dev->next_event, tick_period);
        for (;;) {
+               /*
+                * Setup the next period for devices, which do not have
+                * periodic mode:
+                */
+               next = ktime_add(next, tick_period);
+
                if (!clockevents_program_event(dev, next, false))
                        return;
                /*
@@ -118,12 +119,11 @@ void tick_handle_periodic(struct clock_event_device *dev)
                 * to be sure we're using a real hardware clocksource.
                 * Otherwise we could get trapped in an infinite
                 * loop, as the tick_periodic() increments jiffies,
-                * when then will increment time, posibly causing
+                * which then will increment time, possibly causing
                 * the loop to trigger again and again.
                 */
                if (timekeeping_valid_for_hres())
                        tick_periodic(cpu);
-               next = ktime_add(next, tick_period);
        }
 }
 
index 8329669b51ec3d8a74897f979f8c6270a4db3f37..7ab92b19965a65934f2cd06de6f4fa34be74dce4 100644 (file)
@@ -46,7 +46,7 @@ extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
 extern void tick_resume_oneshot(void);
 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
-extern void tick_broadcast_oneshot_control(unsigned long reason);
+extern int tick_broadcast_oneshot_control(unsigned long reason);
 extern void tick_broadcast_switch_to_oneshot(void);
 extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
@@ -58,7 +58,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
        BUG();
 }
-static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
+static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
 static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
@@ -87,7 +87,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
        BUG();
 }
-static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
+static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
 {
@@ -111,6 +111,7 @@ extern int tick_resume_broadcast(void);
 extern void tick_broadcast_init(void);
 extern void
 tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
+int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq);
 
 #else /* !BROADCAST */
 
@@ -133,6 +134,8 @@ static inline void tick_shutdown_broadcast(unsigned int *cpup) { }
 static inline void tick_suspend_broadcast(void) { }
 static inline int tick_resume_broadcast(void) { return 0; }
 static inline void tick_broadcast_init(void) { }
+static inline int tick_broadcast_update_freq(struct clock_event_device *dev,
+                                            u32 freq) { return -ENODEV; }
 
 /*
  * Set the periodic handler in non broadcast mode
@@ -152,6 +155,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
        return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
 }
 
+int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
+
 #endif
 
 extern void do_timer(unsigned long ticks);
index 0aa4ce81bc168e2432f8f76e1e62c0b41512d47b..5b40279ecd711d93074a33b0198dff6201c202e8 100644 (file)
@@ -1435,7 +1435,8 @@ void update_wall_time(void)
 out:
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
        if (clock_set)
-               clock_was_set();
+               /* Have to call _delayed version, since in irq context*/
+               clock_was_set_delayed();
 }
 
 /**
index 802433a4f5eb7a6b73fa0442d44bf7f196ad6f9b..4d54f97558df8ca830ed4b4f38e847e6d7a72878 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/seq_file.h>
 #include <linux/time.h>
 
+#include "timekeeping_internal.h"
+
 static unsigned int sleep_time_bin[32] = {0};
 
 static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
index accfd241b9e5d5c67040407dfa4f3d8603851de2..87bd529879c23bb12705fa0144cff354064f91dc 100644 (file)
@@ -52,7 +52,7 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/timer.h>
 
-u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
 
 EXPORT_SYMBOL(jiffies_64);
 
@@ -81,6 +81,7 @@ struct tvec_base {
        unsigned long timer_jiffies;
        unsigned long next_timer;
        unsigned long active_timers;
+       unsigned long all_timers;
        struct tvec_root tv1;
        struct tvec tv2;
        struct tvec tv3;
@@ -337,6 +338,20 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
 }
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
+/*
+ * If the list is empty, catch up ->timer_jiffies to the current time.
+ * The caller must hold the tvec_base lock.  Returns true if the list
+ * was empty and therefore ->timer_jiffies was updated.
+ */
+static bool catchup_timer_jiffies(struct tvec_base *base)
+{
+       if (!base->all_timers) {
+               base->timer_jiffies = jiffies;
+               return true;
+       }
+       return false;
+}
+
 static void
 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
@@ -383,15 +398,17 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 
 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
+       (void)catchup_timer_jiffies(base);
        __internal_add_timer(base, timer);
        /*
         * Update base->active_timers and base->next_timer
         */
        if (!tbase_get_deferrable(timer->base)) {
-               if (time_before(timer->expires, base->next_timer))
+               if (!base->active_timers++ ||
+                   time_before(timer->expires, base->next_timer))
                        base->next_timer = timer->expires;
-               base->active_timers++;
        }
+       base->all_timers++;
 }
 
 #ifdef CONFIG_TIMER_STATS
@@ -671,6 +688,8 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
        detach_timer(timer, true);
        if (!tbase_get_deferrable(timer->base))
                base->active_timers--;
+       base->all_timers--;
+       (void)catchup_timer_jiffies(base);
 }
 
 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
@@ -685,6 +704,8 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
                if (timer->expires == base->next_timer)
                        base->next_timer = base->timer_jiffies;
        }
+       base->all_timers--;
+       (void)catchup_timer_jiffies(base);
        return 1;
 }
 
@@ -739,12 +760,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
        debug_activate(timer, expires);
 
-       cpu = smp_processor_id();
-
-#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
-       if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
-               cpu = get_nohz_timer_target();
-#endif
+       cpu = get_nohz_timer_target(pinned);
        new_base = per_cpu(tvec_bases, cpu);
 
        if (base != new_base) {
@@ -939,8 +955,15 @@ void add_timer_on(struct timer_list *timer, int cpu)
         * with the timer by holding the timer base lock. This also
         * makes sure that a CPU on the way to stop its tick can not
         * evaluate the timer wheel.
+        *
+        * Spare the IPI for deferrable timers on idle targets though.
+        * The next busy ticks will take care of it. Except full dynticks
+        * require special care against races with idle_cpu(), lets deal
+        * with that later.
         */
-       wake_up_nohz_cpu(cpu);
+       if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu))
+               wake_up_nohz_cpu(cpu);
+
        spin_unlock_irqrestore(&base->lock, flags);
 }
 EXPORT_SYMBOL_GPL(add_timer_on);
@@ -1146,6 +1169,10 @@ static inline void __run_timers(struct tvec_base *base)
        struct timer_list *timer;
 
        spin_lock_irq(&base->lock);
+       if (catchup_timer_jiffies(base)) {
+               spin_unlock_irq(&base->lock);
+               return;
+       }
        while (time_after_eq(jiffies, base->timer_jiffies)) {
                struct list_head work_list;
                struct list_head *head = &work_list;
@@ -1160,7 +1187,7 @@ static inline void __run_timers(struct tvec_base *base)
                                        !cascade(base, &base->tv4, INDEX(2)))
                        cascade(base, &base->tv5, INDEX(3));
                ++base->timer_jiffies;
-               list_replace_init(base->tv1.vec + index, &work_list);
+               list_replace_init(base->tv1.vec + index, head);
                while (!list_empty(head)) {
                        void (*fn)(unsigned long);
                        unsigned long data;
@@ -1523,9 +1550,8 @@ static int init_timers_cpu(int cpu)
                        if (!base)
                                return -ENOMEM;
 
-                       /* Make sure that tvec_base is 2 byte aligned */
-                       if (tbase_get_deferrable(base)) {
-                               WARN_ON(1);
+                       /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
+                       if (WARN_ON(base != tbase_get_base(base))) {
                                kfree(base);
                                return -ENOMEM;
                        }
@@ -1559,6 +1585,7 @@ static int init_timers_cpu(int cpu)
        base->timer_jiffies = jiffies;
        base->next_timer = base->timer_jiffies;
        base->active_timers = 0;
+       base->all_timers = 0;
        return 0;
 }
 
@@ -1648,9 +1675,9 @@ void __init init_timers(void)
 
        err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
                               (void *)(long)smp_processor_id());
-       init_timer_stats();
-
        BUG_ON(err != NOTIFY_OK);
+
+       init_timer_stats();
        register_cpu_notifier(&timers_nb);
        open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
diff --git a/kernel/torture.c b/kernel/torture.c
new file mode 100644 (file)
index 0000000..acc9afc
--- /dev/null
@@ -0,0 +1,719 @@
+/*
+ * Common functions for in-kernel torture tests.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2014
+ *
+ * Author: Paul E. McKenney <paulmck@us.ibm.com>
+ *     Based on kernel/rcu/torture.c.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/trace_clock.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
+
+static char *torture_type;
+static bool verbose;
+
+/* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
+#define FULLSTOP_DONTSTOP 0    /* Normal operation. */
+#define FULLSTOP_SHUTDOWN 1    /* System shutdown with torture running. */
+#define FULLSTOP_RMMOD    2    /* Normal rmmod of torture. */
+static int fullstop = FULLSTOP_RMMOD;
+static DEFINE_MUTEX(fullstop_mutex);
+static int *torture_runnable;
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Variables for online-offline handling.  Only present if CPU hotplug
+ * is enabled, otherwise does nothing.
+ */
+
+static struct task_struct *onoff_task;
+static long onoff_holdoff;
+static long onoff_interval;
+static long n_offline_attempts;
+static long n_offline_successes;
+static unsigned long sum_offline;
+static int min_offline = -1;
+static int max_offline;
+static long n_online_attempts;
+static long n_online_successes;
+static unsigned long sum_online;
+static int min_online = -1;
+static int max_online;
+
+/*
+ * Execute random CPU-hotplug operations at the interval specified
+ * by the onoff_interval.
+ */
+static int
+torture_onoff(void *arg)
+{
+       int cpu;
+       unsigned long delta;
+       int maxcpu = -1;
+       DEFINE_TORTURE_RANDOM(rand);
+       int ret;
+       unsigned long starttime;
+
+       VERBOSE_TOROUT_STRING("torture_onoff task started");
+       for_each_online_cpu(cpu)
+               maxcpu = cpu;
+       WARN_ON(maxcpu < 0);
+       if (onoff_holdoff > 0) {
+               VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
+               schedule_timeout_interruptible(onoff_holdoff);
+               VERBOSE_TOROUT_STRING("torture_onoff end holdoff");
+       }
+       while (!torture_must_stop()) {
+               cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
+               if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
+                       if (verbose)
+                               pr_alert("%s" TORTURE_FLAG
+                                        "torture_onoff task: offlining %d\n",
+                                        torture_type, cpu);
+                       starttime = jiffies;
+                       n_offline_attempts++;
+                       ret = cpu_down(cpu);
+                       if (ret) {
+                               if (verbose)
+                                       pr_alert("%s" TORTURE_FLAG
+                                                "torture_onoff task: offline %d failed: errno %d\n",
+                                                torture_type, cpu, ret);
+                       } else {
+                               if (verbose)
+                                       pr_alert("%s" TORTURE_FLAG
+                                                "torture_onoff task: offlined %d\n",
+                                                torture_type, cpu);
+                               n_offline_successes++;
+                               delta = jiffies - starttime;
+                               sum_offline += delta;
+                               if (min_offline < 0) {
+                                       min_offline = delta;
+                                       max_offline = delta;
+                               }
+                               if (min_offline > delta)
+                                       min_offline = delta;
+                               if (max_offline < delta)
+                                       max_offline = delta;
+                       }
+               } else if (cpu_is_hotpluggable(cpu)) {
+                       if (verbose)
+                               pr_alert("%s" TORTURE_FLAG
+                                        "torture_onoff task: onlining %d\n",
+                                        torture_type, cpu);
+                       starttime = jiffies;
+                       n_online_attempts++;
+                       ret = cpu_up(cpu);
+                       if (ret) {
+                               if (verbose)
+                                       pr_alert("%s" TORTURE_FLAG
+                                                "torture_onoff task: online %d failed: errno %d\n",
+                                                torture_type, cpu, ret);
+                       } else {
+                               if (verbose)
+                                       pr_alert("%s" TORTURE_FLAG
+                                                "torture_onoff task: onlined %d\n",
+                                                torture_type, cpu);
+                               n_online_successes++;
+                               delta = jiffies - starttime;
+                               sum_online += delta;
+                               if (min_online < 0) {
+                                       min_online = delta;
+                                       max_online = delta;
+                               }
+                               if (min_online > delta)
+                                       min_online = delta;
+                               if (max_online < delta)
+                                       max_online = delta;
+                       }
+               }
+               schedule_timeout_interruptible(onoff_interval);
+       }
+       torture_kthread_stopping("torture_onoff");
+       return 0;
+}
+
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+/*
+ * Initiate online-offline handling.
+ */
+int torture_onoff_init(long ooholdoff, long oointerval)
+{
+       int ret = 0;
+
+#ifdef CONFIG_HOTPLUG_CPU
+       onoff_holdoff = ooholdoff;
+       onoff_interval = oointerval;
+       if (onoff_interval <= 0)
+               return 0;
+       ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+       return ret;
+}
+EXPORT_SYMBOL_GPL(torture_onoff_init);
+
+/*
+ * Clean up after online/offline testing.
+ */
+static void torture_onoff_cleanup(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       if (onoff_task == NULL)
+               return;
+       VERBOSE_TOROUT_STRING("Stopping torture_onoff task");
+       kthread_stop(onoff_task);
+       onoff_task = NULL;
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+}
+EXPORT_SYMBOL_GPL(torture_onoff_cleanup);
+
+/*
+ * Print online/offline testing statistics.
+ */
+char *torture_onoff_stats(char *page)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       page += sprintf(page,
+                      "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
+                      n_online_successes, n_online_attempts,
+                      n_offline_successes, n_offline_attempts,
+                      min_online, max_online,
+                      min_offline, max_offline,
+                      sum_online, sum_offline, HZ);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+       return page;
+}
+EXPORT_SYMBOL_GPL(torture_onoff_stats);
+
+/*
+ * Were all the online/offline operations successful?
+ */
+bool torture_onoff_failures(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       return n_online_successes != n_online_attempts ||
+              n_offline_successes != n_offline_attempts;
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+       return false;
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
+}
+EXPORT_SYMBOL_GPL(torture_onoff_failures);
+
+#define TORTURE_RANDOM_MULT    39916801  /* prime */
+#define TORTURE_RANDOM_ADD     479001701 /* prime */
+#define TORTURE_RANDOM_REFRESH 10000
+
+/*
+ * Crude but fast random-number generator.  Uses a linear congruential
+ * generator, with occasional help from cpu_clock().
+ */
+unsigned long
+torture_random(struct torture_random_state *trsp)
+{
+       if (--trsp->trs_count < 0) {
+               trsp->trs_state += (unsigned long)local_clock();
+               trsp->trs_count = TORTURE_RANDOM_REFRESH;
+       }
+       trsp->trs_state = trsp->trs_state * TORTURE_RANDOM_MULT +
+               TORTURE_RANDOM_ADD;
+       return swahw32(trsp->trs_state);
+}
+EXPORT_SYMBOL_GPL(torture_random);
+
+/*
+ * Variables for shuffling.  The idea is to ensure that each CPU stays
+ * idle for an extended period to test interactions with dyntick idle,
+ * as well as interactions with any per-CPU varibles.
+ */
+struct shuffle_task {
+       struct list_head st_l;
+       struct task_struct *st_t;
+};
+
+static long shuffle_interval;  /* In jiffies. */
+static struct task_struct *shuffler_task;
+static cpumask_var_t shuffle_tmp_mask;
+static int shuffle_idle_cpu;   /* Force all torture tasks off this CPU */
+static struct list_head shuffle_task_list = LIST_HEAD_INIT(shuffle_task_list);
+static DEFINE_MUTEX(shuffle_task_mutex);
+
+/*
+ * Register a task to be shuffled.  If there is no memory, just splat
+ * and don't bother registering.
+ */
+void torture_shuffle_task_register(struct task_struct *tp)
+{
+       struct shuffle_task *stp;
+
+       if (WARN_ON_ONCE(tp == NULL))
+               return;
+       stp = kmalloc(sizeof(*stp), GFP_KERNEL);
+       if (WARN_ON_ONCE(stp == NULL))
+               return;
+       stp->st_t = tp;
+       mutex_lock(&shuffle_task_mutex);
+       list_add(&stp->st_l, &shuffle_task_list);
+       mutex_unlock(&shuffle_task_mutex);
+}
+EXPORT_SYMBOL_GPL(torture_shuffle_task_register);
+
+/*
+ * Unregister all tasks, for example, at the end of the torture run.
+ */
+static void torture_shuffle_task_unregister_all(void)
+{
+       struct shuffle_task *stp;
+       struct shuffle_task *p;
+
+       mutex_lock(&shuffle_task_mutex);
+       list_for_each_entry_safe(stp, p, &shuffle_task_list, st_l) {
+               list_del(&stp->st_l);
+               kfree(stp);
+       }
+       mutex_unlock(&shuffle_task_mutex);
+}
+
+/* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
+ * A special case is when shuffle_idle_cpu = -1, in which case we allow
+ * the tasks to run on all CPUs.
+ */
+static void torture_shuffle_tasks(void)
+{
+       struct shuffle_task *stp;
+
+       cpumask_setall(shuffle_tmp_mask);
+       get_online_cpus();
+
+       /* No point in shuffling if there is only one online CPU (ex: UP) */
+       if (num_online_cpus() == 1) {
+               put_online_cpus();
+               return;
+       }
+
+       /* Advance to the next CPU.  Upon overflow, don't idle any CPUs. */
+       shuffle_idle_cpu = cpumask_next(shuffle_idle_cpu, shuffle_tmp_mask);
+       if (shuffle_idle_cpu >= nr_cpu_ids)
+               shuffle_idle_cpu = -1;
+       if (shuffle_idle_cpu != -1) {
+               cpumask_clear_cpu(shuffle_idle_cpu, shuffle_tmp_mask);
+               if (cpumask_empty(shuffle_tmp_mask)) {
+                       put_online_cpus();
+                       return;
+               }
+       }
+
+       mutex_lock(&shuffle_task_mutex);
+       list_for_each_entry(stp, &shuffle_task_list, st_l)
+               set_cpus_allowed_ptr(stp->st_t, shuffle_tmp_mask);
+       mutex_unlock(&shuffle_task_mutex);
+
+       put_online_cpus();
+}
+
+/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
+ * system to become idle at a time and cut off its timer ticks. This is meant
+ * to test the support for such tickless idle CPU in RCU.
+ */
+static int torture_shuffle(void *arg)
+{
+       VERBOSE_TOROUT_STRING("torture_shuffle task started");
+       do {
+               schedule_timeout_interruptible(shuffle_interval);
+               torture_shuffle_tasks();
+               torture_shutdown_absorb("torture_shuffle");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("torture_shuffle");
+       return 0;
+}
+
+/*
+ * Start the shuffler, with shuffint in jiffies.
+ */
+int torture_shuffle_init(long shuffint)
+{
+       shuffle_interval = shuffint;
+
+       shuffle_idle_cpu = -1;
+
+       if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
+               VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
+               return -ENOMEM;
+       }
+
+       /* Create the shuffler thread */
+       return torture_create_kthread(torture_shuffle, NULL, shuffler_task);
+}
+EXPORT_SYMBOL_GPL(torture_shuffle_init);
+
+/*
+ * Stop the shuffling.
+ */
+static void torture_shuffle_cleanup(void)
+{
+       torture_shuffle_task_unregister_all();
+       if (shuffler_task) {
+               VERBOSE_TOROUT_STRING("Stopping torture_shuffle task");
+               kthread_stop(shuffler_task);
+               free_cpumask_var(shuffle_tmp_mask);
+       }
+       shuffler_task = NULL;
+}
+EXPORT_SYMBOL_GPL(torture_shuffle_cleanup);
+
+/*
+ * Variables for auto-shutdown.  This allows "lights out" torture runs
+ * to be fully scripted.
+ */
+static int shutdown_secs;              /* desired test duration in seconds. */
+static struct task_struct *shutdown_task;
+static unsigned long shutdown_time;    /* jiffies to system shutdown. */
+static void (*torture_shutdown_hook)(void);
+
+/*
+ * Absorb kthreads into a kernel function that won't return, so that
+ * they won't ever access module text or data again.
+ */
+void torture_shutdown_absorb(const char *title)
+{
+       while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+               pr_notice("torture thread %s parking due to system shutdown\n",
+                         title);
+               schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
+       }
+}
+EXPORT_SYMBOL_GPL(torture_shutdown_absorb);
+
+/*
+ * Cause the torture test to shutdown the system after the test has
+ * run for the time specified by the shutdown_secs parameter.
+ */
+static int torture_shutdown(void *arg)
+{
+       long delta;
+       unsigned long jiffies_snap;
+
+       VERBOSE_TOROUT_STRING("torture_shutdown task started");
+       jiffies_snap = jiffies;
+       while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
+              !torture_must_stop()) {
+               delta = shutdown_time - jiffies_snap;
+               if (verbose)
+                       pr_alert("%s" TORTURE_FLAG
+                                "torture_shutdown task: %lu jiffies remaining\n",
+                                torture_type, delta);
+               schedule_timeout_interruptible(delta);
+               jiffies_snap = jiffies;
+       }
+       if (torture_must_stop()) {
+               torture_kthread_stopping("torture_shutdown");
+               return 0;
+       }
+
+       /* OK, shut down the system. */
+
+       VERBOSE_TOROUT_STRING("torture_shutdown task shutting down system");
+       shutdown_task = NULL;   /* Avoid self-kill deadlock. */
+       if (torture_shutdown_hook)
+               torture_shutdown_hook();
+       else
+               VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
+       kernel_power_off();     /* Shut down the system. */
+       return 0;
+}
+
+/*
+ * Start up the shutdown task.
+ */
+int torture_shutdown_init(int ssecs, void (*cleanup)(void))
+{
+       int ret = 0;
+
+       shutdown_secs = ssecs;
+       torture_shutdown_hook = cleanup;
+       if (shutdown_secs > 0) {
+               shutdown_time = jiffies + shutdown_secs * HZ;
+               ret = torture_create_kthread(torture_shutdown, NULL,
+                                            shutdown_task);
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(torture_shutdown_init);
+
+/*
+ * Detect and respond to a system shutdown.
+ */
+static int torture_shutdown_notify(struct notifier_block *unused1,
+                                  unsigned long unused2, void *unused3)
+{
+       mutex_lock(&fullstop_mutex);
+       if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
+               VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
+               ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
+       } else {
+               pr_warn("Concurrent rmmod and shutdown illegal!\n");
+       }
+       mutex_unlock(&fullstop_mutex);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block torture_shutdown_nb = {
+       .notifier_call = torture_shutdown_notify,
+};
+
+/*
+ * Shut down the shutdown task.  Say what???  Heh!  This can happen if
+ * the torture module gets an rmmod before the shutdown time arrives.  ;-)
+ */
+static void torture_shutdown_cleanup(void)
+{
+       unregister_reboot_notifier(&torture_shutdown_nb);
+       if (shutdown_task != NULL) {
+               VERBOSE_TOROUT_STRING("Stopping torture_shutdown task");
+               kthread_stop(shutdown_task);
+       }
+       shutdown_task = NULL;
+}
+
+/*
+ * Variables for stuttering, which means to periodically pause and
+ * restart testing in order to catch bugs that appear when load is
+ * suddenly applied to or removed from the system.
+ */
+static struct task_struct *stutter_task;
+static int stutter_pause_test;
+static int stutter;
+
+/*
+ * Block until the stutter interval ends.  This must be called periodically
+ * by all running kthreads that need to be subject to stuttering.
+ */
+void stutter_wait(const char *title)
+{
+       while (ACCESS_ONCE(stutter_pause_test) ||
+              (torture_runnable && !ACCESS_ONCE(*torture_runnable))) {
+               if (stutter_pause_test)
+                       schedule_timeout_interruptible(1);
+               else
+                       schedule_timeout_interruptible(round_jiffies_relative(HZ));
+               torture_shutdown_absorb(title);
+       }
+}
+EXPORT_SYMBOL_GPL(stutter_wait);
+
+/*
+ * Cause the torture test to "stutter", starting and stopping all
+ * threads periodically.
+ */
+static int torture_stutter(void *arg)
+{
+       VERBOSE_TOROUT_STRING("torture_stutter task started");
+       do {
+               if (!torture_must_stop()) {
+                       schedule_timeout_interruptible(stutter);
+                       ACCESS_ONCE(stutter_pause_test) = 1;
+               }
+               if (!torture_must_stop())
+                       schedule_timeout_interruptible(stutter);
+               ACCESS_ONCE(stutter_pause_test) = 0;
+               torture_shutdown_absorb("torture_stutter");
+       } while (!torture_must_stop());
+       torture_kthread_stopping("torture_stutter");
+       return 0;
+}
+
+/*
+ * Initialize and kick off the torture_stutter kthread.
+ */
+int torture_stutter_init(int s)
+{
+       int ret;
+
+       stutter = s;
+       ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(torture_stutter_init);
+
+/*
+ * Cleanup after the torture_stutter kthread.
+ */
+static void torture_stutter_cleanup(void)
+{
+       if (!stutter_task)
+               return;
+       VERBOSE_TOROUT_STRING("Stopping torture_stutter task");
+       kthread_stop(stutter_task);
+       stutter_task = NULL;
+}
+
+/*
+ * Initialize torture module.  Please note that this is -not- invoked via
+ * the usual module_init() mechanism, but rather by an explicit call from
+ * the client torture module.  This call must be paired with a later
+ * torture_init_end().
+ *
+ * The runnable parameter points to a flag that controls whether or not
+ * the test is currently runnable.  If there is no such flag, pass in NULL.
+ */
+void __init torture_init_begin(char *ttype, bool v, int *runnable)
+{
+       mutex_lock(&fullstop_mutex);
+       torture_type = ttype;
+       verbose = v;
+       torture_runnable = runnable;
+       fullstop = FULLSTOP_DONTSTOP;
+
+}
+EXPORT_SYMBOL_GPL(torture_init_begin);
+
+/*
+ * Tell the torture module that initialization is complete.
+ */
+void __init torture_init_end(void)
+{
+       mutex_unlock(&fullstop_mutex);
+       register_reboot_notifier(&torture_shutdown_nb);
+}
+EXPORT_SYMBOL_GPL(torture_init_end);
+
+/*
+ * Clean up torture module.  Please note that this is -not- invoked via
+ * the usual module_exit() mechanism, but rather by an explicit call from
+ * the client torture module.  Returns true if a race with system shutdown
+ * is detected, otherwise, all kthreads started by functions in this file
+ * will be shut down.
+ *
+ * This must be called before the caller starts shutting down its own
+ * kthreads.
+ */
+bool torture_cleanup(void)
+{
+       mutex_lock(&fullstop_mutex);
+       if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+               pr_warn("Concurrent rmmod and shutdown illegal!\n");
+               mutex_unlock(&fullstop_mutex);
+               schedule_timeout_uninterruptible(10);
+               return true;
+       }
+       ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
+       mutex_unlock(&fullstop_mutex);
+       torture_shutdown_cleanup();
+       torture_shuffle_cleanup();
+       torture_stutter_cleanup();
+       torture_onoff_cleanup();
+       return false;
+}
+EXPORT_SYMBOL_GPL(torture_cleanup);
+
+/*
+ * Is it time for the current torture test to stop?
+ */
+bool torture_must_stop(void)
+{
+       return torture_must_stop_irq() || kthread_should_stop();
+}
+EXPORT_SYMBOL_GPL(torture_must_stop);
+
+/*
+ * Is it time for the current torture test to stop?  This is the irq-safe
+ * version, hence no check for kthread_should_stop().
+ */
+bool torture_must_stop_irq(void)
+{
+       return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP;
+}
+EXPORT_SYMBOL_GPL(torture_must_stop_irq);
+
+/*
+ * Each kthread must wait for kthread_should_stop() before returning from
+ * its top-level function, otherwise segfaults ensue.  This function
+ * prints a "stopping" message and waits for kthread_should_stop(), and
+ * should be called from all torture kthreads immediately prior to
+ * returning.
+ */
+void torture_kthread_stopping(char *title)
+{
+       if (verbose)
+               VERBOSE_TOROUT_STRING(title);
+       while (!kthread_should_stop()) {
+               torture_shutdown_absorb(title);
+               schedule_timeout_uninterruptible(1);
+       }
+}
+EXPORT_SYMBOL_GPL(torture_kthread_stopping);
+
+/*
+ * Create a generic torture kthread that is immediately runnable.  If you
+ * need the kthread to be stopped so that you can do something to it before
+ * it starts, you will need to open-code your own.
+ */
+int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+                           char *f, struct task_struct **tp)
+{
+       int ret = 0;
+
+       VERBOSE_TOROUT_STRING(m);
+       *tp = kthread_run(fn, arg, s);
+       if (IS_ERR(*tp)) {
+               ret = PTR_ERR(*tp);
+               VERBOSE_TOROUT_ERRSTRING(f);
+               *tp = NULL;
+       }
+       torture_shuffle_task_register(*tp);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(_torture_create_kthread);
+
+/*
+ * Stop a generic kthread, emitting a message.
+ */
+void _torture_stop_kthread(char *m, struct task_struct **tp)
+{
+       if (*tp == NULL)
+               return;
+       VERBOSE_TOROUT_STRING(m);
+       kthread_stop(*tp);
+       *tp = NULL;
+}
+EXPORT_SYMBOL_GPL(_torture_stop_kthread);
index a5457d577b98313b1ca8b1670ad32a2140001498..0434ff1b808e92c539b70828185c8fa1c44c865a 100644 (file)
@@ -40,8 +40,8 @@ static int write_iteration = 50;
 module_param(write_iteration, uint, 0644);
 MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
 
-static int producer_nice = 19;
-static int consumer_nice = 19;
+static int producer_nice = MAX_NICE;
+static int consumer_nice = MAX_NICE;
 
 static int producer_fifo = -1;
 static int consumer_fifo = -1;
@@ -308,7 +308,7 @@ static void ring_buffer_producer(void)
 
        /* Let the user know that the test is running at low priority */
        if (producer_fifo < 0 && consumer_fifo < 0 &&
-           producer_nice == 19 && consumer_nice == 19)
+           producer_nice == MAX_NICE && consumer_nice == MAX_NICE)
                trace_printk("WARNING!!! This test is running at lowest priority.\n");
 
        trace_printk("Time:     %lld (usecs)\n", time);
index 815c878f409bd94e08777d1b9f83b1553f4a2e24..24c1f23825579df4f2bf46e2dca98e6af8fb4abd 100644 (file)
@@ -1600,15 +1600,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer,
 }
 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
 
+static struct ring_buffer *temp_buffer;
+
 struct ring_buffer_event *
 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
                          struct ftrace_event_file *ftrace_file,
                          int type, unsigned long len,
                          unsigned long flags, int pc)
 {
+       struct ring_buffer_event *entry;
+
        *current_rb = ftrace_file->tr->trace_buffer.buffer;
-       return trace_buffer_lock_reserve(*current_rb,
+       entry = trace_buffer_lock_reserve(*current_rb,
                                         type, len, flags, pc);
+       /*
+        * If tracing is off, but we have triggers enabled
+        * we still need to look at the event data. Use the temp_buffer
+        * to store the trace event for the tigger to use. It's recusive
+        * safe and will not be recorded anywhere.
+        */
+       if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
+               *current_rb = temp_buffer;
+               entry = trace_buffer_lock_reserve(*current_rb,
+                                                 type, len, flags, pc);
+       }
+       return entry;
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
 
@@ -6494,11 +6510,16 @@ __init static int tracer_alloc_buffers(void)
 
        raw_spin_lock_init(&global_trace.start_lock);
 
+       /* Used for event triggers */
+       temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
+       if (!temp_buffer)
+               goto out_free_cpumask;
+
        /* TODO: make the number of buffers hot pluggable with CPUS */
        if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
                WARN_ON(1);
-               goto out_free_cpumask;
+               goto out_free_temp_buffer;
        }
 
        if (global_trace.buffer_disabled)
@@ -6540,6 +6561,8 @@ __init static int tracer_alloc_buffers(void)
 
        return 0;
 
+out_free_temp_buffer:
+       ring_buffer_free(temp_buffer);
 out_free_cpumask:
        free_percpu(global_trace.trace_buffer.data);
 #ifdef CONFIG_TRACER_MAX_TRACE
index e854f420e033eb65a2bca233bb8df2e42778faf7..c894614de14d8efdbbc145141d19f9a56af9a984 100644 (file)
@@ -31,9 +31,25 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
        }
 
        /* The ftrace function trace is allowed only for root. */
-       if (ftrace_event_is_function(tp_event) &&
-           perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
-               return -EPERM;
+       if (ftrace_event_is_function(tp_event)) {
+               if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               /*
+                * We don't allow user space callchains for  function trace
+                * event, due to issues with page faults while tracing page
+                * fault handler and its overall trickiness nature.
+                */
+               if (!p_event->attr.exclude_callchain_user)
+                       return -EINVAL;
+
+               /*
+                * Same reason to disable user stack dump as for user space
+                * callchains above.
+                */
+               if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
+                       return -EINVAL;
+       }
 
        /* No tracing, just counting, so no obvious leak */
        if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
index 2aefbee93a6d574a0ea082632788a1d5c7791474..887ef88b0bc70e10463a37da502e1718385e35ca 100644 (file)
@@ -498,14 +498,14 @@ void trace_hardirqs_off(void)
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
 
-void trace_hardirqs_on_caller(unsigned long caller_addr)
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 {
        if (!preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, caller_addr);
 }
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
-void trace_hardirqs_off_caller(unsigned long caller_addr)
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 {
        if (!preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, caller_addr);
index 193e977a10eaeb6a7f9ca5927d08f558d68df434..0ee63af30bd14a4ad7f4b8f846d19b100fd596b3 100644 (file)
@@ -516,6 +516,13 @@ void destroy_work_on_stack(struct work_struct *work)
 }
 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 
+void destroy_delayed_work_on_stack(struct delayed_work *work)
+{
+       destroy_timer_on_stack(&work->timer);
+       debug_object_free(&work->work, &work_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
+
 #else
 static inline void debug_work_activate(struct work_struct *work) { }
 static inline void debug_work_deactivate(struct work_struct *work) { }
@@ -3225,7 +3232,7 @@ static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
                return -ENOMEM;
 
        if (sscanf(buf, "%d", &attrs->nice) == 1 &&
-           attrs->nice >= -20 && attrs->nice <= 19)
+           attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
                ret = apply_workqueue_attrs(wq, attrs);
        else
                ret = -EINVAL;
index a48abeac753f39ef520fbe1bc5243cb494c47621..dd7f8858188a6ac92ac19bb7ae032d62786ee612 100644 (file)
@@ -980,6 +980,21 @@ config DEBUG_LOCKING_API_SELFTESTS
          The following locking APIs are covered: spinlocks, rwlocks,
          mutexes and rwsems.
 
+config LOCK_TORTURE_TEST
+       tristate "torture tests for locking"
+       depends on DEBUG_KERNEL
+       select TORTURE_TEST
+       default n
+       help
+         This option provides a kernel module that runs torture tests
+         on kernel locking primitives.  The kernel module may be built
+         after the fact on the running kernel to be tested, if desired.
+
+         Say Y here if you want kernel locking-primitive torture tests
+         to be built into the kernel.
+         Say M if you want these torture tests to build as a module.
+         Say N if you are unsure.
+
 endmenu # lock debugging
 
 config TRACE_IRQFLAGS
@@ -1141,9 +1156,14 @@ config SPARSE_RCU_POINTER
 
         Say N if you are unsure.
 
+config TORTURE_TEST
+       tristate
+       default n
+
 config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
+       select TORTURE_TEST
        default n
        help
          This option provides a kernel module that runs torture tests
index 1e5b2df442916de82ef497f844c068724f8cd555..61489677870007e273301209438ac4913562b140 100644 (file)
@@ -244,8 +244,19 @@ static void __prandom_reseed(bool late)
        static bool latch = false;
        static DEFINE_SPINLOCK(lock);
 
+       /* Asking for random bytes might result in bytes getting
+        * moved into the nonblocking pool and thus marking it
+        * as initialized. In this case we would double back into
+        * this function and attempt to do a late reseed.
+        * Ignore the pointless attempt to reseed again if we're
+        * already waiting for bytes when the nonblocking pool
+        * got initialized.
+        */
+
        /* only allow initial seeding (late == false) once */
-       spin_lock_irqsave(&lock, flags);
+       if (!spin_trylock_irqsave(&lock, flags))
+               return;
+
        if (latch && !late)
                goto out;
        latch = true;
index e5878de4f1013ddbdd3db07d1fca2bcc3a692a7c..9b1f9062a202fc243bef05853e5b965e91f96182 100644 (file)
@@ -648,7 +648,7 @@ EXPORT_SYMBOL(memmove);
  * @count: The size of the area.
  */
 #undef memcmp
-int memcmp(const void *cs, const void *ct, size_t count)
+__visible int memcmp(const void *cs, const void *ct, size_t count)
 {
        const unsigned char *su1, *su2;
        int res = 0;
index ae3c8f3595d4ff522f0427b05ace2f7e041da8ad..4755c857694246ca6365a382931bcd431af9bda2 100644 (file)
@@ -1556,10 +1556,10 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
 
 #ifdef CONFIG_COMPAT
 
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
-                                    compat_ulong_t __user *nmask,
-                                    compat_ulong_t maxnode,
-                                    compat_ulong_t addr, compat_ulong_t flags)
+COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
+                      compat_ulong_t __user *, nmask,
+                      compat_ulong_t, maxnode,
+                      compat_ulong_t, addr, compat_ulong_t, flags)
 {
        long err;
        unsigned long __user *nm = NULL;
@@ -1586,8 +1586,8 @@ asmlinkage long compat_sys_get_mempolicy(int __user *policy,
        return err;
 }
 
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
-                                    compat_ulong_t maxnode)
+COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
+                      compat_ulong_t, maxnode)
 {
        long err = 0;
        unsigned long __user *nm = NULL;
@@ -1609,9 +1609,9 @@ asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
        return sys_set_mempolicy(mode, nm, nr_bits+1);
 }
 
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
-                            compat_ulong_t mode, compat_ulong_t __user *nmask,
-                            compat_ulong_t maxnode, compat_ulong_t flags)
+COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
+                      compat_ulong_t, mode, compat_ulong_t __user *, nmask,
+                      compat_ulong_t, maxnode, compat_ulong_t, flags)
 {
        long err = 0;
        unsigned long __user *nm = NULL;
@@ -2301,35 +2301,6 @@ static void sp_free(struct sp_node *n)
        kmem_cache_free(sn_cache, n);
 }
 
-#ifdef CONFIG_NUMA_BALANCING
-static bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
-{
-       /* Never defer a private fault */
-       if (cpupid_match_pid(p, last_cpupid))
-               return false;
-
-       if (p->numa_migrate_deferred) {
-               p->numa_migrate_deferred--;
-               return true;
-       }
-       return false;
-}
-
-static inline void defer_numa_migrate(struct task_struct *p)
-{
-       p->numa_migrate_deferred = sysctl_numa_balancing_migrate_deferred;
-}
-#else
-static inline bool numa_migrate_deferred(struct task_struct *p, int last_cpupid)
-{
-       return false;
-}
-
-static inline void defer_numa_migrate(struct task_struct *p)
-{
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
 /**
  * mpol_misplaced - check whether current page node is valid in policy
  *
@@ -2403,52 +2374,9 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
 
        /* Migrate the page towards the node whose CPU is referencing it */
        if (pol->flags & MPOL_F_MORON) {
-               int last_cpupid;
-               int this_cpupid;
-
                polnid = thisnid;
-               this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
-
-               /*
-                * Multi-stage node selection is used in conjunction
-                * with a periodic migration fault to build a temporal
-                * task<->page relation. By using a two-stage filter we
-                * remove short/unlikely relations.
-                *
-                * Using P(p) ~ n_p / n_t as per frequentist
-                * probability, we can equate a task's usage of a
-                * particular page (n_p) per total usage of this
-                * page (n_t) (in a given time-span) to a probability.
-                *
-                * Our periodic faults will sample this probability and
-                * getting the same result twice in a row, given these
-                * samples are fully independent, is then given by
-                * P(n)^2, provided our sample period is sufficiently
-                * short compared to the usage pattern.
-                *
-                * This quadric squishes small probabilities, making
-                * it less likely we act on an unlikely task<->page
-                * relation.
-                */
-               last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
-               if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid) {
 
-                       /* See sysctl_numa_balancing_migrate_deferred comment */
-                       if (!cpupid_match_pid(current, last_cpupid))
-                               defer_numa_migrate(current);
-
-                       goto out;
-               }
-
-               /*
-                * The quadratic filter above reduces extraneous migration
-                * of shared pages somewhat. This code reduces it even more,
-                * reducing the overhead of page migrations of shared pages.
-                * This makes workloads with shared pages rely more on
-                * "move task near its memory", and less on "move memory
-                * towards its task", which is exactly what we want.
-                */
-               if (numa_migrate_deferred(current, last_cpupid))
+               if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
                        goto out;
        }
 
index 8a8cd0265e523b54909e456881a00d497fa456d3..f802c2d216a7d28bf76c5c911d83d10d213fd474 100644 (file)
@@ -31,6 +31,9 @@ void use_mm(struct mm_struct *mm)
        tsk->mm = mm;
        switch_mm(active_mm, mm, tsk);
        task_unlock(tsk);
+#ifdef finish_arch_post_lock_switch
+       finish_arch_post_lock_switch();
+#endif
 
        if (active_mm != mm)
                mmdrop(active_mm);
index 036cfe07050f65eee962f51e6223321f9e01e132..63e24fb4387b6d305960f9e7ba8c0554e6818ca5 100644 (file)
@@ -102,10 +102,11 @@ struct pcpu_chunk {
        int                     free_size;      /* free bytes in the chunk */
        int                     contig_hint;    /* max contiguous size hint */
        void                    *base_addr;     /* base address of this chunk */
-       int                     map_used;       /* # of map entries used */
+       int                     map_used;       /* # of map entries used before the sentry */
        int                     map_alloc;      /* # of map entries allocated */
        int                     *map;           /* allocation map */
        void                    *data;          /* chunk data */
+       int                     first_free;     /* no free below this */
        bool                    immutable;      /* no [de]population allowed */
        unsigned long           populated[];    /* populated bitmap */
 };
@@ -356,11 +357,11 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
 {
        int new_alloc;
 
-       if (chunk->map_alloc >= chunk->map_used + 2)
+       if (chunk->map_alloc >= chunk->map_used + 3)
                return 0;
 
        new_alloc = PCPU_DFL_MAP_ALLOC;
-       while (new_alloc < chunk->map_used + 2)
+       while (new_alloc < chunk->map_used + 3)
                new_alloc *= 2;
 
        return new_alloc;
@@ -417,48 +418,6 @@ out_unlock:
        return 0;
 }
 
-/**
- * pcpu_split_block - split a map block
- * @chunk: chunk of interest
- * @i: index of map block to split
- * @head: head size in bytes (can be 0)
- * @tail: tail size in bytes (can be 0)
- *
- * Split the @i'th map block into two or three blocks.  If @head is
- * non-zero, @head bytes block is inserted before block @i moving it
- * to @i+1 and reducing its size by @head bytes.
- *
- * If @tail is non-zero, the target block, which can be @i or @i+1
- * depending on @head, is reduced by @tail bytes and @tail byte block
- * is inserted after the target block.
- *
- * @chunk->map must have enough free slots to accommodate the split.
- *
- * CONTEXT:
- * pcpu_lock.
- */
-static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
-                            int head, int tail)
-{
-       int nr_extra = !!head + !!tail;
-
-       BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
-
-       /* insert new subblocks */
-       memmove(&chunk->map[i + nr_extra], &chunk->map[i],
-               sizeof(chunk->map[0]) * (chunk->map_used - i));
-       chunk->map_used += nr_extra;
-
-       if (head) {
-               chunk->map[i + 1] = chunk->map[i] - head;
-               chunk->map[i++] = head;
-       }
-       if (tail) {
-               chunk->map[i++] -= tail;
-               chunk->map[i] = tail;
-       }
-}
-
 /**
  * pcpu_alloc_area - allocate area from a pcpu_chunk
  * @chunk: chunk of interest
@@ -483,19 +442,27 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
        int oslot = pcpu_chunk_slot(chunk);
        int max_contig = 0;
        int i, off;
+       bool seen_free = false;
+       int *p;
 
-       for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
-               bool is_last = i + 1 == chunk->map_used;
+       for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
                int head, tail;
+               int this_size;
+
+               off = *p;
+               if (off & 1)
+                       continue;
 
                /* extra for alignment requirement */
                head = ALIGN(off, align) - off;
-               BUG_ON(i == 0 && head != 0);
 
-               if (chunk->map[i] < 0)
-                       continue;
-               if (chunk->map[i] < head + size) {
-                       max_contig = max(chunk->map[i], max_contig);
+               this_size = (p[1] & ~1) - off;
+               if (this_size < head + size) {
+                       if (!seen_free) {
+                               chunk->first_free = i;
+                               seen_free = true;
+                       }
+                       max_contig = max(this_size, max_contig);
                        continue;
                }
 
@@ -505,44 +472,59 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
                 * than sizeof(int), which is very small but isn't too
                 * uncommon for percpu allocations.
                 */
-               if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
-                       if (chunk->map[i - 1] > 0)
-                               chunk->map[i - 1] += head;
-                       else {
-                               chunk->map[i - 1] -= head;
+               if (head && (head < sizeof(int) || !(p[-1] & 1))) {
+                       *p = off += head;
+                       if (p[-1] & 1)
                                chunk->free_size -= head;
-                       }
-                       chunk->map[i] -= head;
-                       off += head;
+                       else
+                               max_contig = max(*p - p[-1], max_contig);
+                       this_size -= head;
                        head = 0;
                }
 
                /* if tail is small, just keep it around */
-               tail = chunk->map[i] - head - size;
-               if (tail < sizeof(int))
+               tail = this_size - head - size;
+               if (tail < sizeof(int)) {
                        tail = 0;
+                       size = this_size - head;
+               }
 
                /* split if warranted */
                if (head || tail) {
-                       pcpu_split_block(chunk, i, head, tail);
+                       int nr_extra = !!head + !!tail;
+
+                       /* insert new subblocks */
+                       memmove(p + nr_extra + 1, p + 1,
+                               sizeof(chunk->map[0]) * (chunk->map_used - i));
+                       chunk->map_used += nr_extra;
+
                        if (head) {
-                               i++;
-                               off += head;
-                               max_contig = max(chunk->map[i - 1], max_contig);
+                               if (!seen_free) {
+                                       chunk->first_free = i;
+                                       seen_free = true;
+                               }
+                               *++p = off += head;
+                               ++i;
+                               max_contig = max(head, max_contig);
+                       }
+                       if (tail) {
+                               p[1] = off + size;
+                               max_contig = max(tail, max_contig);
                        }
-                       if (tail)
-                               max_contig = max(chunk->map[i + 1], max_contig);
                }
 
+               if (!seen_free)
+                       chunk->first_free = i + 1;
+
                /* update hint and mark allocated */
-               if (is_last)
+               if (i + 1 == chunk->map_used)
                        chunk->contig_hint = max_contig; /* fully scanned */
                else
                        chunk->contig_hint = max(chunk->contig_hint,
                                                 max_contig);
 
-               chunk->free_size -= chunk->map[i];
-               chunk->map[i] = -chunk->map[i];
+               chunk->free_size -= size;
+               *p |= 1;
 
                pcpu_chunk_relocate(chunk, oslot);
                return off;
@@ -570,34 +552,50 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
 {
        int oslot = pcpu_chunk_slot(chunk);
-       int i, off;
-
-       for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
-               if (off == freeme)
-                       break;
+       int off = 0;
+       unsigned i, j;
+       int to_free = 0;
+       int *p;
+
+       freeme |= 1;    /* we are searching for <given offset, in use> pair */
+
+       i = 0;
+       j = chunk->map_used;
+       while (i != j) {
+               unsigned k = (i + j) / 2;
+               off = chunk->map[k];
+               if (off < freeme)
+                       i = k + 1;
+               else if (off > freeme)
+                       j = k;
+               else
+                       i = j = k;
+       }
        BUG_ON(off != freeme);
-       BUG_ON(chunk->map[i] > 0);
 
-       chunk->map[i] = -chunk->map[i];
-       chunk->free_size += chunk->map[i];
+       if (i < chunk->first_free)
+               chunk->first_free = i;
 
+       p = chunk->map + i;
+       *p = off &= ~1;
+       chunk->free_size += (p[1] & ~1) - off;
+
+       /* merge with next? */
+       if (!(p[1] & 1))
+               to_free++;
        /* merge with previous? */
-       if (i > 0 && chunk->map[i - 1] >= 0) {
-               chunk->map[i - 1] += chunk->map[i];
-               chunk->map_used--;
-               memmove(&chunk->map[i], &chunk->map[i + 1],
-                       (chunk->map_used - i) * sizeof(chunk->map[0]));
+       if (i > 0 && !(p[-1] & 1)) {
+               to_free++;
                i--;
+               p--;
        }
-       /* merge with next? */
-       if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
-               chunk->map[i] += chunk->map[i + 1];
-               chunk->map_used--;
-               memmove(&chunk->map[i + 1], &chunk->map[i + 2],
-                       (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
+       if (to_free) {
+               chunk->map_used -= to_free;
+               memmove(p + 1, p + 1 + to_free,
+                       (chunk->map_used - i) * sizeof(chunk->map[0]));
        }
 
-       chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
+       chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
        pcpu_chunk_relocate(chunk, oslot);
 }
 
@@ -617,7 +615,9 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
        }
 
        chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
-       chunk->map[chunk->map_used++] = pcpu_unit_size;
+       chunk->map[0] = 0;
+       chunk->map[1] = pcpu_unit_size | 1;
+       chunk->map_used = 1;
 
        INIT_LIST_HEAD(&chunk->list);
        chunk->free_size = pcpu_unit_size;
@@ -713,6 +713,16 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
        unsigned long flags;
        void __percpu *ptr;
 
+       /*
+        * We want the lowest bit of offset available for in-use/free
+        * indicator, so force >= 16bit alignment and make size even.
+        */
+       if (unlikely(align < 2))
+               align = 2;
+
+       if (unlikely(size & 1))
+               size++;
+
        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
                WARN(true, "illegal size (%zu) or align (%zu) for "
                     "percpu allocation\n", size, align);
@@ -1343,9 +1353,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        }
        schunk->contig_hint = schunk->free_size;
 
-       schunk->map[schunk->map_used++] = -ai->static_size;
+       schunk->map[0] = 1;
+       schunk->map[1] = ai->static_size;
+       schunk->map_used = 1;
        if (schunk->free_size)
-               schunk->map[schunk->map_used++] = schunk->free_size;
+               schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
+       else
+               schunk->map[1] |= 1;
 
        /* init dynamic chunk if necessary */
        if (dyn_size) {
@@ -1358,8 +1372,10 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
                bitmap_fill(dchunk->populated, pcpu_unit_pages);
 
                dchunk->contig_hint = dchunk->free_size = dyn_size;
-               dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
-               dchunk->map[dchunk->map_used++] = dchunk->free_size;
+               dchunk->map[0] = 1;
+               dchunk->map[1] = pcpu_reserved_chunk_limit;
+               dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
+               dchunk->map_used = 2;
        }
 
        /* link the first chunk in */
index fd26d0433509e2b5885c74249a9eb04828285180..3c5cf68566ec1375ff4d4deedc09db4c540e4b37 100644 (file)
@@ -456,25 +456,23 @@ free_iovecs:
        return rc;
 }
 
-asmlinkage ssize_t
-compat_sys_process_vm_readv(compat_pid_t pid,
-                           const struct compat_iovec __user *lvec,
-                           unsigned long liovcnt,
-                           const struct compat_iovec __user *rvec,
-                           unsigned long riovcnt,
-                           unsigned long flags)
+COMPAT_SYSCALL_DEFINE6(process_vm_readv, compat_pid_t, pid,
+                      const struct compat_iovec __user *, lvec,
+                      compat_ulong_t, liovcnt,
+                      const struct compat_iovec __user *, rvec,
+                      compat_ulong_t, riovcnt,
+                      compat_ulong_t, flags)
 {
        return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
                                    riovcnt, flags, 0);
 }
 
-asmlinkage ssize_t
-compat_sys_process_vm_writev(compat_pid_t pid,
-                            const struct compat_iovec __user *lvec,
-                            unsigned long liovcnt,
-                            const struct compat_iovec __user *rvec,
-                            unsigned long riovcnt,
-                            unsigned long flags)
+COMPAT_SYSCALL_DEFINE6(process_vm_writev, compat_pid_t, pid,
+                      const struct compat_iovec __user *, lvec,
+                      compat_ulong_t, liovcnt,
+                      const struct compat_iovec __user *, rvec,
+                      compat_ulong_t, riovcnt,
+                      compat_ulong_t, flags)
 {
        return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
                                    riovcnt, flags, 1);
index 8fc049f9a5a6c5d511ac7a5ac0dd41c698ed99a3..11cf322f8133d7048d414c6f0eed227f6b12d3ac 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1165,6 +1165,16 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                }
                set_pte_at(mm, address, pte,
                           swp_entry_to_pte(make_hwpoison_entry(page)));
+       } else if (pte_unused(pteval)) {
+               /*
+                * The guest indicated that the page content is of no
+                * interest anymore. Simply discard the pte, vmscan
+                * will take care of the rest.
+                */
+               if (PageAnon(page))
+                       dec_mm_counter(mm, MM_ANONPAGES);
+               else
+                       dec_mm_counter(mm, MM_FILEPAGES);
        } else if (PageAnon(page)) {
                swp_entry_t entry = { .val = page_private(page) };
                pte_t swp_pte;
index ec9909935fb60850ed3635e1636f568c8c88bbf0..175273f38cb1bd59f5aeb88cb8c815033475dfe8 100644 (file)
@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
 static void vlan_transfer_features(struct net_device *dev,
                                   struct net_device *vlandev)
 {
+       struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
+
        vlandev->gso_max_size = dev->gso_max_size;
 
-       if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
+       if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
                vlandev->hard_header_len = dev->hard_header_len;
        else
                vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
index 4b65aa492fb6bc7908243e96768ae796a68776b2..27bfe2f8e2de71bee3f873106d88498765d36e93 100644 (file)
@@ -578,6 +578,9 @@ static int vlan_dev_init(struct net_device *dev)
 
        dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
        dev->gso_max_size = real_dev->gso_max_size;
+       if (dev->features & NETIF_F_VLAN_FEATURES)
+               netdev_warn(real_dev, "VLAN features are set incorrectly.  Q-in-Q configurations may not work correctly.\n");
+
 
        /* ipv6 shared card related stuff */
        dev->dev_id = real_dev->dev_id;
@@ -592,7 +595,8 @@ static int vlan_dev_init(struct net_device *dev)
 #endif
 
        dev->needed_headroom = real_dev->needed_headroom;
-       if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
+       if (vlan_hw_offload_capable(real_dev->features,
+                                   vlan_dev_priv(dev)->vlan_proto)) {
                dev->header_ops      = &vlan_passthru_header_ops;
                dev->hard_header_len = real_dev->hard_header_len;
        } else {
index 63f0455c0bc3e21fea311a4d14d24a995c606d6f..8fe8b71b487add263a711d39e52f683675708dd6 100644 (file)
@@ -49,14 +49,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        brstats->tx_bytes += skb->len;
        u64_stats_update_end(&brstats->syncp);
 
-       if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
-               goto out;
-
        BR_INPUT_SKB_CB(skb)->brdev = dev;
 
        skb_reset_mac_header(skb);
        skb_pull(skb, ETH_HLEN);
 
+       if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
+               goto out;
+
        if (is_broadcast_ether_addr(dest))
                br_flood_deliver(br, skb, false);
        else if (is_multicast_ether_addr(dest)) {
index 28d54462742278f388caf3f4a96e13b212b35688..d0cca3c65f0174ab8c6522b5f54ab2ad33b66dca 100644 (file)
@@ -29,6 +29,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
        struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
        struct net_bridge *br = netdev_priv(brdev);
        struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
+       struct net_port_vlans *pv;
 
        u64_stats_update_begin(&brstats->syncp);
        brstats->rx_packets++;
@@ -39,18 +40,18 @@ static int br_pass_frame_up(struct sk_buff *skb)
         * packet is allowed except in promisc modue when someone
         * may be running packet capture.
         */
+       pv = br_get_vlan_info(br);
        if (!(brdev->flags & IFF_PROMISC) &&
-           !br_allowed_egress(br, br_get_vlan_info(br), skb)) {
+           !br_allowed_egress(br, pv, skb)) {
                kfree_skb(skb);
                return NET_RX_DROP;
        }
 
-       skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
-       if (!skb)
-               return NET_RX_DROP;
-
        indev = skb->dev;
        skb->dev = brdev;
+       skb = br_handle_vlan(br, pv, skb);
+       if (!skb)
+               return NET_RX_DROP;
 
        return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
                       netif_receive_skb);
index 8249ca764c79c5f2ddab51006ad445752b3ac137..f23c74b3a95327722916405cee92d5d61048c8a2 100644 (file)
@@ -119,22 +119,6 @@ static void __vlan_flush(struct net_port_vlans *v)
        kfree_rcu(v, rcu);
 }
 
-/* Strip the tag from the packet.  Will return skb with tci set 0.  */
-static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
-{
-       if (skb->protocol != htons(ETH_P_8021Q)) {
-               skb->vlan_tci = 0;
-               return skb;
-       }
-
-       skb->vlan_tci = 0;
-       skb = vlan_untag(skb);
-       if (skb)
-               skb->vlan_tci = 0;
-
-       return skb;
-}
-
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
                               const struct net_port_vlans *pv,
                               struct sk_buff *skb)
@@ -144,13 +128,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
        if (!br->vlan_enabled)
                goto out;
 
+       /* Vlan filter table must be configured at this point.  The
+        * only exception is the bridge is set in promisc mode and the
+        * packet is destined for the bridge device.  In this case
+        * pass the packet as is.
+        */
+       if (!pv) {
+               if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
+                       goto out;
+               } else {
+                       kfree_skb(skb);
+                       return NULL;
+               }
+       }
+
        /* At this point, we know that the frame was filtered and contains
         * a valid vlan id.  If the vlan id is set in the untagged bitmap,
         * send untagged; otherwise, send tagged.
         */
        br_vlan_get_tag(skb, &vid);
        if (test_bit(vid, pv->untagged_bitmap))
-               skb = br_vlan_untag(skb);
+               skb->vlan_tci = 0;
 
 out:
        return skb;
@@ -174,6 +172,18 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        if (!v)
                return false;
 
+       /* If vlan tx offload is disabled on bridge device and frame was
+        * sent from vlan device on the bridge device, it does not have
+        * HW accelerated vlan tag.
+        */
+       if (unlikely(!vlan_tx_tag_present(skb) &&
+                    (skb->protocol == htons(ETH_P_8021Q) ||
+                     skb->protocol == htons(ETH_P_8021AD)))) {
+               skb = vlan_untag(skb);
+               if (unlikely(!skb))
+                       return false;
+       }
+
        err = br_vlan_get_tag(skb, vid);
        if (!*vid) {
                u16 pvid = br_get_pvid(v);
index f50161fb812eace2eb659ae78bf12062c608b5b6..9a76eaf63184753d6020eb1f3a9ab51299b1ae02 100644 (file)
@@ -384,8 +384,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
        return sock_setsockopt(sock, level, optname, optval, optlen);
 }
 
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
-                               char __user *optval, unsigned int optlen)
+COMPAT_SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+                      char __user *, optval, unsigned int, optlen)
 {
        int err;
        struct socket *sock = sockfd_lookup(fd, &err);
@@ -504,8 +504,8 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
 }
 EXPORT_SYMBOL(compat_sock_get_timestampns);
 
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
-                               char __user *optval, int __user *optlen)
+COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+                      char __user *, optval, int __user *, optlen)
 {
        int err;
        struct socket *sock = sockfd_lookup(fd, &err);
@@ -735,15 +735,15 @@ static unsigned char nas[21] = {
 };
 #undef AL
 
-asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
        if (flags & MSG_CMSG_COMPAT)
                return -EINVAL;
        return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
-asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
-                                   unsigned int vlen, unsigned int flags)
+COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+                      unsigned int, vlen, unsigned int, flags)
 {
        if (flags & MSG_CMSG_COMPAT)
                return -EINVAL;
@@ -751,28 +751,28 @@ asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
                              flags | MSG_CMSG_COMPAT);
 }
 
-asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
 {
        if (flags & MSG_CMSG_COMPAT)
                return -EINVAL;
        return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
 }
 
-asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
+COMPAT_SYSCALL_DEFINE4(recv, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags)
 {
        return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
 }
 
-asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
-                                   unsigned int flags, struct sockaddr __user *addr,
-                                   int __user *addrlen)
+COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len,
+                      unsigned int, flags, struct sockaddr __user *, addr,
+                      int __user *, addrlen)
 {
        return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
 }
 
-asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
-                                   unsigned int vlen, unsigned int flags,
-                                   struct compat_timespec __user *timeout)
+COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+                      unsigned int, vlen, unsigned int, flags,
+                      struct compat_timespec __user *, timeout)
 {
        int datagrams;
        struct timespec ktspec;
@@ -795,7 +795,7 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
        return datagrams;
 }
 
-asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
+COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
 {
        int ret;
        u32 a[6];
index b1b0c8d4d7df31d34a575ab326dab71f1a315e51..45fa2f11f84dcc7f0efe12711c2d51526360a08d 100644 (file)
@@ -2286,7 +2286,7 @@ out:
 }
 EXPORT_SYMBOL(skb_checksum_help);
 
-__be16 skb_network_protocol(struct sk_buff *skb)
+__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
 {
        __be16 type = skb->protocol;
        int vlan_depth = ETH_HLEN;
@@ -2313,6 +2313,8 @@ __be16 skb_network_protocol(struct sk_buff *skb)
                vlan_depth += VLAN_HLEN;
        }
 
+       *depth = vlan_depth;
+
        return type;
 }
 
@@ -2326,12 +2328,13 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
 {
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_offload *ptype;
-       __be16 type = skb_network_protocol(skb);
+       int vlan_depth = skb->mac_len;
+       __be16 type = skb_network_protocol(skb, &vlan_depth);
 
        if (unlikely(!type))
                return ERR_PTR(-EINVAL);
 
-       __skb_pull(skb, skb->mac_len);
+       __skb_pull(skb, vlan_depth);
 
        rcu_read_lock();
        list_for_each_entry_rcu(ptype, &offload_base, list) {
@@ -2498,8 +2501,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
                                            const struct net_device *dev,
                                            netdev_features_t features)
 {
+       int tmp;
+
        if (skb->ip_summed != CHECKSUM_NONE &&
-           !can_checksum_protocol(features, skb_network_protocol(skb))) {
+           !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
                features &= ~NETIF_F_ALL_CSUM;
        } else if (illegal_highdma(dev, skb)) {
                features &= ~NETIF_F_SG;
index 869c7afe3b070576464693e4d6ca00482c151f42..90b96a11b974d2697defcc9ccf4b14b65d704c49 100644 (file)
@@ -2127,25 +2127,31 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
  *
  *     The `hlen` as calculated by skb_zerocopy_headlen() specifies the
  *     headroom in the `to` buffer.
+ *
+ *     Return value:
+ *     0: everything is OK
+ *     -ENOMEM: couldn't orphan frags of @from due to lack of memory
+ *     -EFAULT: skb_copy_bits() found some problem with skb geometry
  */
-void
-skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
+int
+skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
 {
        int i, j = 0;
        int plen = 0; /* length of skb->head fragment */
+       int ret;
        struct page *page;
        unsigned int offset;
 
        BUG_ON(!from->head_frag && !hlen);
 
        /* dont bother with small payloads */
-       if (len <= skb_tailroom(to)) {
-               skb_copy_bits(from, 0, skb_put(to, len), len);
-               return;
-       }
+       if (len <= skb_tailroom(to))
+               return skb_copy_bits(from, 0, skb_put(to, len), len);
 
        if (hlen) {
-               skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+               ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
+               if (unlikely(ret))
+                       return ret;
                len -= hlen;
        } else {
                plen = min_t(int, skb_headlen(from), len);
@@ -2163,6 +2169,11 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
        to->len += len + plen;
        to->data_len += len + plen;
 
+       if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
+               skb_tx_error(from);
+               return -ENOMEM;
+       }
+
        for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
                if (!len)
                        break;
@@ -2173,6 +2184,8 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
                j++;
        }
        skb_shinfo(to)->nr_frags = j;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(skb_zerocopy);
 
@@ -2866,8 +2879,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        int err = -ENOMEM;
        int i = 0;
        int pos;
+       int dummy;
 
-       proto = skb_network_protocol(head_skb);
+       proto = skb_network_protocol(head_skb, &dummy);
        if (unlikely(!proto))
                return ERR_PTR(-EINVAL);
 
index 1863422fb7d553340151b1402ee30d4be1fcde29..250be7421ab36c50ce00a25dcd3c659ca1c97f18 100644 (file)
@@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
        int i;
        bool csum_err = false;
 
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+       if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+               /* Looped back packet, drop it! */
+               if (rt_is_output_route(skb_rtable(skb)))
+                       goto drop;
+       }
+#endif
+
        if (parse_gre_header(skb, &tpi, &csum_err) < 0)
                goto drop;
 
index 78a89e61925d6ae27937098f906145d2d8c48f5d..a82a22d8f77fdca5f496e9d7cb45f40b70d194ca 100644 (file)
@@ -416,9 +416,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
 
 #ifdef CONFIG_NET_IPGRE_BROADCAST
        if (ipv4_is_multicast(iph->daddr)) {
-               /* Looped back packet, drop it! */
-               if (rt_is_output_route(skb_rtable(skb)))
-                       goto drop;
                tunnel->dev->stats.multicast++;
                skb->pkt_type = PACKET_BROADCAST;
        }
index 6f847dd56dbc7fb53a7f3dc9dfc8330e507ec0e9..8d69626f2206900dfbb336063034875743a3936e 100644 (file)
@@ -108,6 +108,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
        nf_reset(skb);
        secpath_reset(skb);
        skb_clear_hash_if_not_l4(skb);
+       skb_dst_drop(skb);
        skb->vlan_tci = 0;
        skb_set_queue_mapping(skb, 0);
        skb->pkt_type = PACKET_HOST;
index 3cf9765104978cbf8ed49d184f926f23130cd4fd..1e4eac779f51c81bf5472d13ed446fefb0827753 100644 (file)
@@ -2628,7 +2628,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
 {
        __be32 dest, src;
        __u16 destp, srcp;
-       long delta = tw->tw_ttd - jiffies;
+       s32 delta = tw->tw_ttd - inet_tw_time_stamp();
 
        dest  = tw->tw_daddr;
        src   = tw->tw_rcv_saddr;
index 344e972426df847a6f6b87414c37ef29e2ddd127..6c7fa0853fc74ef179b00de52d78aecee342e18b 100644 (file)
@@ -133,10 +133,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
 static DEFINE_SPINLOCK(addrconf_hash_lock);
 
-static void addrconf_verify(unsigned long);
+static void addrconf_verify(void);
+static void addrconf_verify_rtnl(void);
+static void addrconf_verify_work(struct work_struct *);
 
-static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0);
-static DEFINE_SPINLOCK(addrconf_verify_lock);
+static struct workqueue_struct *addrconf_wq;
+static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
 
 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
@@ -151,7 +153,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
                                                  u32 flags, u32 noflags);
 
 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
-static void addrconf_dad_timer(unsigned long data);
+static void addrconf_dad_work(struct work_struct *w);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
 static void addrconf_dad_run(struct inet6_dev *idev);
 static void addrconf_rs_timer(unsigned long data);
@@ -247,9 +249,9 @@ static void addrconf_del_rs_timer(struct inet6_dev *idev)
                __in6_dev_put(idev);
 }
 
-static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp)
+static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
 {
-       if (del_timer(&ifp->dad_timer))
+       if (cancel_delayed_work(&ifp->dad_work))
                __in6_ifa_put(ifp);
 }
 
@@ -261,12 +263,12 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
        mod_timer(&idev->rs_timer, jiffies + when);
 }
 
-static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
-                                  unsigned long when)
+static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
+                                  unsigned long delay)
 {
-       if (!timer_pending(&ifp->dad_timer))
+       if (!delayed_work_pending(&ifp->dad_work))
                in6_ifa_hold(ifp);
-       mod_timer(&ifp->dad_timer, jiffies + when);
+       mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
 }
 
 static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -751,8 +753,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
 
        in6_dev_put(ifp->idev);
 
-       if (del_timer(&ifp->dad_timer))
-               pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
+       if (cancel_delayed_work(&ifp->dad_work))
+               pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
+                         ifp);
 
        if (ifp->state != INET6_IFADDR_STATE_DEAD) {
                pr_warn("Freeing alive inet6 address %p\n", ifp);
@@ -849,8 +852,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
 
        spin_lock_init(&ifa->lock);
        spin_lock_init(&ifa->state_lock);
-       setup_timer(&ifa->dad_timer, addrconf_dad_timer,
-                   (unsigned long)ifa);
+       INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
        INIT_HLIST_NODE(&ifa->addr_lst);
        ifa->scope = scope;
        ifa->prefix_len = pfxlen;
@@ -990,6 +992,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
        enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
        unsigned long expires;
 
+       ASSERT_RTNL();
+
        spin_lock_bh(&ifp->state_lock);
        state = ifp->state;
        ifp->state = INET6_IFADDR_STATE_DEAD;
@@ -1021,7 +1025,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
 
        write_unlock_bh(&ifp->idev->lock);
 
-       addrconf_del_dad_timer(ifp);
+       addrconf_del_dad_work(ifp);
 
        ipv6_ifa_notify(RTM_DELADDR, ifp);
 
@@ -1604,7 +1608,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
 {
        if (ifp->flags&IFA_F_PERMANENT) {
                spin_lock_bh(&ifp->lock);
-               addrconf_del_dad_timer(ifp);
+               addrconf_del_dad_work(ifp);
                ifp->flags |= IFA_F_TENTATIVE;
                if (dad_failed)
                        ifp->flags |= IFA_F_DADFAILED;
@@ -1625,20 +1629,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
                        spin_unlock_bh(&ifp->lock);
                }
                ipv6_del_addr(ifp);
-       } else
+       } else {
                ipv6_del_addr(ifp);
+       }
 }
 
 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
 {
        int err = -ENOENT;
 
-       spin_lock(&ifp->state_lock);
+       spin_lock_bh(&ifp->state_lock);
        if (ifp->state == INET6_IFADDR_STATE_DAD) {
                ifp->state = INET6_IFADDR_STATE_POSTDAD;
                err = 0;
        }
-       spin_unlock(&ifp->state_lock);
+       spin_unlock_bh(&ifp->state_lock);
 
        return err;
 }
@@ -1671,7 +1676,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
                }
        }
 
-       addrconf_dad_stop(ifp, 1);
+       spin_lock_bh(&ifp->state_lock);
+       /* transition from _POSTDAD to _ERRDAD */
+       ifp->state = INET6_IFADDR_STATE_ERRDAD;
+       spin_unlock_bh(&ifp->state_lock);
+
+       addrconf_mod_dad_work(ifp, 0);
 }
 
 /* Join to solicited addr multicast group. */
@@ -1680,6 +1690,8 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
 {
        struct in6_addr maddr;
 
+       ASSERT_RTNL();
+
        if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
 
@@ -1691,6 +1703,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
 {
        struct in6_addr maddr;
 
+       ASSERT_RTNL();
+
        if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
                return;
 
@@ -1701,6 +1715,9 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
+
+       ASSERT_RTNL();
+
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1712,6 +1729,9 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
 {
        struct in6_addr addr;
+
+       ASSERT_RTNL();
+
        if (ifp->prefix_len >= 127) /* RFC 6164 */
                return;
        ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -2271,11 +2291,13 @@ ok:
                                return;
                        }
 
-                       ifp->flags |= IFA_F_MANAGETEMPADDR;
                        update_lft = 0;
                        create = 1;
+                       spin_lock_bh(&ifp->lock);
+                       ifp->flags |= IFA_F_MANAGETEMPADDR;
                        ifp->cstamp = jiffies;
                        ifp->tokenized = tokenized;
+                       spin_unlock_bh(&ifp->lock);
                        addrconf_dad_start(ifp);
                }
 
@@ -2326,7 +2348,7 @@ ok:
                                         create, now);
 
                        in6_ifa_put(ifp);
-                       addrconf_verify(0);
+                       addrconf_verify();
                }
        }
        inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
@@ -2475,7 +2497,7 @@ static int inet6_addr_add(struct net *net, int ifindex,
                        manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
                                         true, jiffies);
                in6_ifa_put(ifp);
-               addrconf_verify(0);
+               addrconf_verify_rtnl();
                return 0;
        }
 
@@ -3011,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
                hlist_for_each_entry_rcu(ifa, h, addr_lst) {
                        if (ifa->idev == idev) {
                                hlist_del_init_rcu(&ifa->addr_lst);
-                               addrconf_del_dad_timer(ifa);
+                               addrconf_del_dad_work(ifa);
                                goto restart;
                        }
                }
@@ -3049,7 +3071,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
        while (!list_empty(&idev->addr_list)) {
                ifa = list_first_entry(&idev->addr_list,
                                       struct inet6_ifaddr, if_list);
-               addrconf_del_dad_timer(ifa);
+               addrconf_del_dad_work(ifa);
 
                list_del(&ifa->if_list);
 
@@ -3148,10 +3170,10 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
                rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
 
        ifp->dad_probes = idev->cnf.dad_transmits;
-       addrconf_mod_dad_timer(ifp, rand_num);
+       addrconf_mod_dad_work(ifp, rand_num);
 }
 
-static void addrconf_dad_start(struct inet6_ifaddr *ifp)
+static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 {
        struct inet6_dev *idev = ifp->idev;
        struct net_device *dev = idev->dev;
@@ -3203,25 +3225,68 @@ out:
        read_unlock_bh(&idev->lock);
 }
 
-static void addrconf_dad_timer(unsigned long data)
+static void addrconf_dad_start(struct inet6_ifaddr *ifp)
 {
-       struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
+       bool begin_dad = false;
+
+       spin_lock_bh(&ifp->state_lock);
+       if (ifp->state != INET6_IFADDR_STATE_DEAD) {
+               ifp->state = INET6_IFADDR_STATE_PREDAD;
+               begin_dad = true;
+       }
+       spin_unlock_bh(&ifp->state_lock);
+
+       if (begin_dad)
+               addrconf_mod_dad_work(ifp, 0);
+}
+
+static void addrconf_dad_work(struct work_struct *w)
+{
+       struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
+                                               struct inet6_ifaddr,
+                                               dad_work);
        struct inet6_dev *idev = ifp->idev;
        struct in6_addr mcaddr;
 
+       enum {
+               DAD_PROCESS,
+               DAD_BEGIN,
+               DAD_ABORT,
+       } action = DAD_PROCESS;
+
+       rtnl_lock();
+
+       spin_lock_bh(&ifp->state_lock);
+       if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
+               action = DAD_BEGIN;
+               ifp->state = INET6_IFADDR_STATE_DAD;
+       } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
+               action = DAD_ABORT;
+               ifp->state = INET6_IFADDR_STATE_POSTDAD;
+       }
+       spin_unlock_bh(&ifp->state_lock);
+
+       if (action == DAD_BEGIN) {
+               addrconf_dad_begin(ifp);
+               goto out;
+       } else if (action == DAD_ABORT) {
+               addrconf_dad_stop(ifp, 1);
+               goto out;
+       }
+
        if (!ifp->dad_probes && addrconf_dad_end(ifp))
                goto out;
 
-       write_lock(&idev->lock);
+       write_lock_bh(&idev->lock);
        if (idev->dead || !(idev->if_flags & IF_READY)) {
-               write_unlock(&idev->lock);
+               write_unlock_bh(&idev->lock);
                goto out;
        }
 
        spin_lock(&ifp->lock);
        if (ifp->state == INET6_IFADDR_STATE_DEAD) {
                spin_unlock(&ifp->lock);
-               write_unlock(&idev->lock);
+               write_unlock_bh(&idev->lock);
                goto out;
        }
 
@@ -3232,7 +3297,7 @@ static void addrconf_dad_timer(unsigned long data)
 
                ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
                spin_unlock(&ifp->lock);
-               write_unlock(&idev->lock);
+               write_unlock_bh(&idev->lock);
 
                addrconf_dad_completed(ifp);
 
@@ -3240,16 +3305,17 @@ static void addrconf_dad_timer(unsigned long data)
        }
 
        ifp->dad_probes--;
-       addrconf_mod_dad_timer(ifp,
-                              NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
+       addrconf_mod_dad_work(ifp,
+                             NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
        spin_unlock(&ifp->lock);
-       write_unlock(&idev->lock);
+       write_unlock_bh(&idev->lock);
 
        /* send a neighbour solicitation for our addr */
        addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
        ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
 out:
        in6_ifa_put(ifp);
+       rtnl_unlock();
 }
 
 /* ifp->idev must be at least read locked */
@@ -3276,7 +3342,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
        struct in6_addr lladdr;
        bool send_rs, send_mld;
 
-       addrconf_del_dad_timer(ifp);
+       addrconf_del_dad_work(ifp);
 
        /*
         *      Configure the address for reception. Now it is valid.
@@ -3517,23 +3583,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
  *     Periodic address status verification
  */
 
-static void addrconf_verify(unsigned long foo)
+static void addrconf_verify_rtnl(void)
 {
        unsigned long now, next, next_sec, next_sched;
        struct inet6_ifaddr *ifp;
        int i;
 
+       ASSERT_RTNL();
+
        rcu_read_lock_bh();
-       spin_lock(&addrconf_verify_lock);
        now = jiffies;
        next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
 
-       del_timer(&addr_chk_timer);
+       cancel_delayed_work(&addr_chk_work);
 
        for (i = 0; i < IN6_ADDR_HSIZE; i++) {
 restart:
-               hlist_for_each_entry_rcu_bh(ifp,
-                                        &inet6_addr_lst[i], addr_lst) {
+               hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
                        unsigned long age;
 
                        /* When setting preferred_lft to a value not zero or
@@ -3628,13 +3694,22 @@ restart:
 
        ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
              now, next, next_sec, next_sched);
-
-       addr_chk_timer.expires = next_sched;
-       add_timer(&addr_chk_timer);
-       spin_unlock(&addrconf_verify_lock);
+       mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
        rcu_read_unlock_bh();
 }
 
+static void addrconf_verify_work(struct work_struct *w)
+{
+       rtnl_lock();
+       addrconf_verify_rtnl();
+       rtnl_unlock();
+}
+
+static void addrconf_verify(void)
+{
+       mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
+}
+
 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
                                     struct in6_addr **peer_pfx)
 {
@@ -3691,6 +3766,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
        bool was_managetempaddr;
        bool had_prefixroute;
 
+       ASSERT_RTNL();
+
        if (!valid_lft || (prefered_lft > valid_lft))
                return -EINVAL;
 
@@ -3756,7 +3833,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
                                 !was_managetempaddr, jiffies);
        }
 
-       addrconf_verify(0);
+       addrconf_verify_rtnl();
 
        return 0;
 }
@@ -4386,6 +4463,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
        bool update_rs = false;
        struct in6_addr ll_addr;
 
+       ASSERT_RTNL();
+
        if (token == NULL)
                return -EINVAL;
        if (ipv6_addr_any(token))
@@ -4434,7 +4513,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
        }
 
        write_unlock_bh(&idev->lock);
-       addrconf_verify(0);
+       addrconf_verify_rtnl();
        return 0;
 }
 
@@ -4636,6 +4715,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
 {
        struct net *net = dev_net(ifp->idev->dev);
 
+       if (event)
+               ASSERT_RTNL();
+
        inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
 
        switch (event) {
@@ -5244,6 +5326,12 @@ int __init addrconf_init(void)
        if (err < 0)
                goto out_addrlabel;
 
+       addrconf_wq = create_workqueue("ipv6_addrconf");
+       if (!addrconf_wq) {
+               err = -ENOMEM;
+               goto out_nowq;
+       }
+
        /* The addrconf netdev notifier requires that loopback_dev
         * has it's ipv6 private information allocated and setup
         * before it can bring up and give link-local addresses
@@ -5274,7 +5362,7 @@ int __init addrconf_init(void)
 
        register_netdevice_notifier(&ipv6_dev_notf);
 
-       addrconf_verify(0);
+       addrconf_verify();
 
        rtnl_af_register(&inet6_ops);
 
@@ -5302,6 +5390,8 @@ errout:
        rtnl_af_unregister(&inet6_ops);
        unregister_netdevice_notifier(&ipv6_dev_notf);
 errlo:
+       destroy_workqueue(addrconf_wq);
+out_nowq:
        unregister_pernet_subsys(&addrconf_ops);
 out_addrlabel:
        ipv6_addr_label_cleanup();
@@ -5337,7 +5427,8 @@ void addrconf_cleanup(void)
        for (i = 0; i < IN6_ADDR_HSIZE; i++)
                WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
        spin_unlock_bh(&addrconf_hash_lock);
-
-       del_timer(&addr_chk_timer);
+       cancel_delayed_work(&addr_chk_work);
        rtnl_unlock();
+
+       destroy_workqueue(addrconf_wq);
 }
index 85d9d94c0a3c57706540ce9b5efc78309dd69ca0..c83827e7c3248ad493b576ca565f21b485aa0453 100644 (file)
@@ -2016,7 +2016,7 @@ static int __init l2tp_init(void)
        if (rc)
                goto out;
 
-       l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0);
+       l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
        if (!l2tp_wq) {
                pr_err("alloc_workqueue failed\n");
                rc = -ENOMEM;
index f072fe803510320f7210c31b1a08ccddea5ae5b6..108120f216b17671351cb492c86cb0ae928504f6 100644 (file)
@@ -354,13 +354,16 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
 
        skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
                                  GFP_ATOMIC);
-       if (!skb)
+       if (!skb) {
+               skb_tx_error(entskb);
                return NULL;
+       }
 
        nlh = nlmsg_put(skb, 0, 0,
                        NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
                        sizeof(struct nfgenmsg), 0);
        if (!nlh) {
+               skb_tx_error(entskb);
                kfree_skb(skb);
                return NULL;
        }
@@ -488,13 +491,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                nla->nla_type = NFQA_PAYLOAD;
                nla->nla_len = nla_attr_size(data_len);
 
-               skb_zerocopy(skb, entskb, data_len, hlen);
+               if (skb_zerocopy(skb, entskb, data_len, hlen))
+                       goto nla_put_failure;
        }
 
        nlh->nlmsg_len = skb->len;
        return skb;
 
 nla_put_failure:
+       skb_tx_error(entskb);
        kfree_skb(skb);
        net_err_ratelimited("nf_queue: error creating packet message\n");
        return NULL;
index 8601b320b443ba0a0dfdadb54f4fddf78a8308a7..270b77dfac304afea375d4a5999b88cca863da90 100644 (file)
@@ -464,7 +464,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        }
        nla->nla_len = nla_attr_size(skb->len);
 
-       skb_zerocopy(user_skb, skb, skb->len, hlen);
+       err = skb_zerocopy(user_skb, skb, skb->len, hlen);
+       if (err)
+               goto out;
 
        /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
        if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
@@ -478,6 +480,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
 out:
+       if (err)
+               skb_tx_error(skb);
        kfree_skb(nskb);
        return err;
 }
index dda451f4429ca4110d614c6b64b00b59908f0ffa..2998989e76db0a7ccb8e25ef11aa393180956593 100644 (file)
@@ -103,30 +103,24 @@ static void stats_read(struct flow_stats *stats,
 void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
                        unsigned long *used, __be16 *tcp_flags)
 {
-       int cpu, cur_cpu;
+       int cpu;
 
        *used = 0;
        *tcp_flags = 0;
        memset(ovs_stats, 0, sizeof(*ovs_stats));
 
+       local_bh_disable();
        if (!flow->stats.is_percpu) {
                stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
        } else {
-               cur_cpu = get_cpu();
                for_each_possible_cpu(cpu) {
                        struct flow_stats *stats;
 
-                       if (cpu == cur_cpu)
-                               local_bh_disable();
-
                        stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
                        stats_read(stats, ovs_stats, used, tcp_flags);
-
-                       if (cpu == cur_cpu)
-                               local_bh_enable();
                }
-               put_cpu();
        }
+       local_bh_enable();
 }
 
 static void stats_reset(struct flow_stats *stats)
@@ -141,25 +135,17 @@ static void stats_reset(struct flow_stats *stats)
 
 void ovs_flow_stats_clear(struct sw_flow *flow)
 {
-       int cpu, cur_cpu;
+       int cpu;
 
+       local_bh_disable();
        if (!flow->stats.is_percpu) {
                stats_reset(flow->stats.stat);
        } else {
-               cur_cpu = get_cpu();
-
                for_each_possible_cpu(cpu) {
-
-                       if (cpu == cur_cpu)
-                               local_bh_disable();
-
                        stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
-
-                       if (cpu == cur_cpu)
-                               local_bh_enable();
                }
-               put_cpu();
        }
+       local_bh_enable();
 }
 
 static int check_header(struct sk_buff *skb, int len)
index ce6ec6c2f4de9b1eaaf34973f874e39eff408a79..94404f19f9deebbb7c55b7a1660dd011510ba3fd 100644 (file)
@@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
 
        err = mutex_lock_interruptible(&u->readlock);
-       if (err) {
-               err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
+       if (unlikely(err)) {
+               /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+                * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+                */
+               err = noblock ? -EAGAIN : -ERESTARTSYS;
                goto out;
        }
 
@@ -1913,6 +1916,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
        struct unix_sock *u = unix_sk(sk);
        DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
        int copied = 0;
+       int noblock = flags & MSG_DONTWAIT;
        int check_creds = 0;
        int target;
        int err = 0;
@@ -1928,7 +1932,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                goto out;
 
        target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
-       timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+       timeo = sock_rcvtimeo(sk, noblock);
 
        /* Lock the socket to prevent queue disordering
         * while sleeps in memcpy_tomsg
@@ -1940,8 +1944,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
        }
 
        err = mutex_lock_interruptible(&u->readlock);
-       if (err) {
-               err = sock_intr_errno(timeo);
+       if (unlikely(err)) {
+               /* recvmsg() in non blocking mode is supposed to return -EAGAIN
+                * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+                */
+               err = noblock ? -EAGAIN : -ERESTARTSYS;
                goto out;
        }
 
index 547e15daf03dc86dbfa72818122bc4c61929edb0..93a0da26582b863c64efd5ff226a8386bfb58064 100644 (file)
@@ -155,6 +155,15 @@ ld-option = $(call try-run,\
 # Important: no spaces around options
 ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
 
+# ld-version
+# Usage: $(call ld-version)
+# Note this is mainly for HJ Lu's 3 number binutil versions
+ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
+
+# ld-ifversion
+# Usage:  $(call ld-ifversion, -ge, 22252, y)
+ld-ifversion = $(shell [ $(call ld-version) $(1) $(2) ] && echo $(3))
+
 ######
 
 ###
index d5d859c8072964014971b9b1401b96f83a183e5d..9f0ee22b914f3c8e83ea58ab71278e98b8c03e89 100644 (file)
@@ -198,7 +198,7 @@ $(multi-objs-y:.o=.s)   : modname = $(modname-multi)
 $(multi-objs-y:.o=.lst) : modname = $(modname-multi)
 
 quiet_cmd_cc_s_c = CC $(quiet_modtag)  $@
-cmd_cc_s_c       = $(CC) $(c_flags) -fverbose-asm -S -o $@ $<
+cmd_cc_s_c       = $(CC) $(c_flags) $(DISABLE_LTO) -fverbose-asm -S -o $@ $<
 
 $(obj)/%.s: $(src)/%.c FORCE
        $(call if_changed_dep,cc_s_c)
diff --git a/scripts/gcc-ld b/scripts/gcc-ld
new file mode 100644 (file)
index 0000000..cadab9a
--- /dev/null
@@ -0,0 +1,29 @@
+#!/bin/sh
+# run gcc with ld options
+# used as a wrapper to execute link time optimizations
+# yes virginia, this is not pretty
+
+ARGS="-nostdlib"
+
+while [ "$1" != "" ] ; do
+       case "$1" in
+       -save-temps|-m32|-m64) N="$1" ;;
+       -r) N="$1" ;;
+       -[Wg]*) N="$1" ;;
+       -[olv]|-[Ofd]*|-nostdlib) N="$1" ;;
+       --end-group|--start-group)
+                N="-Wl,$1" ;;
+       -[RTFGhIezcbyYu]*|\
+--script|--defsym|-init|-Map|--oformat|-rpath|\
+-rpath-link|--sort-section|--section-start|-Tbss|-Tdata|-Ttext|\
+--version-script|--dynamic-list|--version-exports-symbol|--wrap|-m)
+               A="$1" ; shift ; N="-Wl,$A,$1" ;;
+       -[m]*) N="$1" ;;
+       -*) N="-Wl,$1" ;;
+       *)  N="$1" ;;
+       esac
+       ARGS="$ARGS $N"
+       shift
+done
+
+exec $CC $ARGS
diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
new file mode 100755 (executable)
index 0000000..198580d
--- /dev/null
@@ -0,0 +1,8 @@
+#!/usr/bin/awk -f
+# extract linker version number from stdin and turn into single number
+       {
+       gsub(".*)", "");
+       split($1,a, ".");
+       print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5];
+       exit
+       }
index 99a45fdc1bbfa9a15e7bbd6e4f9ec14a8a8c2357..066355673930342a1b5e839950cbd1186ed6c24b 100644 (file)
@@ -623,7 +623,10 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
 
        switch (sym->st_shndx) {
        case SHN_COMMON:
-               warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name);
+               if (!strncmp(symname, "__gnu_lto_", sizeof("__gnu_lto_")-1)) {
+                       /* Should warn here, but modpost runs before the linker */
+               } else
+                       warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name);
                break;
        case SHN_UNDEF:
                /* undefined symbol */
@@ -849,6 +852,7 @@ static const char *section_white_list[] =
        ".xt.lit",         /* xtensa */
        ".arcextmap*",                  /* arc */
        ".gnu.linkonce.arcext*",        /* arc : modules */
+       ".gnu.lto*",
        NULL
 };
 
@@ -1455,6 +1459,10 @@ static void check_section_mismatch(const char *modname, struct elf_info *elf,
                to = find_elf_symbol(elf, r->r_addend, sym);
                tosym = sym_name(elf, to);
 
+               if (!strncmp(fromsym, "reference___initcall",
+                               sizeof("reference___initcall")-1))
+                       return;
+
                /* check whitelist - we may ignore it */
                if (secref_whitelist(mismatch,
                                        fromsec, fromsym, tosec, tosym)) {
@@ -1693,6 +1701,19 @@ static void check_sec_ref(struct module *mod, const char *modname,
        }
 }
 
+static char *remove_dot(char *s)
+{
+       char *end;
+       int n = strcspn(s, ".");
+
+       if (n > 0 && s[n] != 0) {
+               strtoul(s + n + 1, &end, 10);
+               if  (end > s + n + 1 && (*end == '.' || *end == 0))
+                       s[n] = 0;
+       }
+       return s;
+}
+
 static void read_symbols(char *modname)
 {
        const char *symname;
@@ -1731,7 +1752,7 @@ static void read_symbols(char *modname)
        }
 
        for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
-               symname = info.strtab + sym->st_name;
+               symname = remove_dot(info.strtab + sym->st_name);
 
                handle_modversions(mod, &info, sym, symname);
                handle_moddevtable(mod, &info, sym, symname);
index 51207e4d5f8bcae0add675e31cf579f28763d1d4..168b43dc0a59b6be4edea0fc5768fb2d63cfa440 100644 (file)
@@ -127,7 +127,7 @@ struct elf_info {
        Elf_Section  export_gpl_sec;
        Elf_Section  export_unused_gpl_sec;
        Elf_Section  export_gpl_future_sec;
-       const char   *strtab;
+       char         *strtab;
        char         *modinfo;
        unsigned int modinfo_len;
 
index bbd32c729dbb4e019d1461116b84c25107e35ab8..347896548ad3159a152186a4c1a27cdf92f1f4ad 100644 (file)
@@ -65,8 +65,8 @@ no_payload:
  * taking a 32-bit syscall are zero.  If you can, you should call sys_keyctl()
  * directly.
  */
-asmlinkage long compat_sys_keyctl(u32 option,
-                                 u32 arg2, u32 arg3, u32 arg4, u32 arg5)
+COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
+                      u32, arg2, u32, arg3, u32, arg4, u32, arg5)
 {
        switch (option) {
        case KEYCTL_GET_KEYRING_ID:
diff --git a/tools/include/linux/hash.h b/tools/include/linux/hash.h
new file mode 100644 (file)
index 0000000..d026c65
--- /dev/null
@@ -0,0 +1,5 @@
+#include "../../../include/linux/hash.h"
+
+#ifndef _TOOLS_LINUX_HASH_H
+#define _TOOLS_LINUX_HASH_H
+#endif
index ed2f51e11b80f36d63601c7abb1ea32db13566ff..ce00f7ee6455248d92dba7016b2770c824e897f9 100644 (file)
@@ -9,8 +9,10 @@ LIB_H=
 LIB_OBJS=
 
 LIB_H += fs/debugfs.h
+LIB_H += fs/fs.h
 
 LIB_OBJS += $(OUTPUT)fs/debugfs.o
+LIB_OBJS += $(OUTPUT)fs/fs.o
 
 LIBFILE = libapikfs.a
 
similarity index 91%
rename from tools/perf/util/fs.c
rename to tools/lib/api/fs/fs.c
index f5be1f26e724d303ff4a98051b8cf933e1ec1cb4..5b5eb788996e1de2f105eeedffdc1bb0cb72caba 100644 (file)
@@ -1,8 +1,13 @@
+/* TODO merge/factor in debugfs.c here */
 
-/* TODO merge/factor into tools/lib/lk/debugfs.c */
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/vfs.h>
 
-#include "util.h"
-#include "util/fs.h"
+#include "debugfs.h"
+#include "fs.h"
 
 static const char * const sysfs__fs_known_mountpoints[] = {
        "/sys",
similarity index 50%
rename from tools/perf/util/include/linux/magic.h
rename to tools/lib/api/fs/fs.h
index 07d63cf3e0f6f5d0860c7aa8a8531b49367fd468..cb7049551f335d6a7dbcb108933ba8e9d16e5e59 100644 (file)
@@ -1,9 +1,5 @@
-#ifndef _PERF_LINUX_MAGIC_H_
-#define _PERF_LINUX_MAGIC_H_
-
-#ifndef DEBUGFS_MAGIC
-#define DEBUGFS_MAGIC          0x64626720
-#endif
+#ifndef __API_FS__
+#define __API_FS__
 
 #ifndef SYSFS_MAGIC
 #define SYSFS_MAGIC            0x62656572
@@ -13,4 +9,6 @@
 #define PROC_SUPER_MAGIC       0x9fa0
 #endif
 
-#endif
+const char *sysfs__mountpoint(void);
+const char *procfs__mountpoint(void);
+#endif /* __API_FS__ */
index 888d51137fbe61e0dc04546b36a46486a9d9a555..1d78a4064da48218b90b15f8495c3626cf614b2c 100644 (file)
@@ -18,6 +18,10 @@ from it, into perf.data. Perf record options are accepted and are passed through
 "perf mem -t <TYPE> report" displays the result. It invokes perf report with the
 right set of options to display a memory access profile.
 
+Note that on Intel systems the memory latency reported is the use-latency,
+not the pure load (or store latency). Use latency includes any pipeline
+queueing delays in addition to the memory subsystem latency.
+
 OPTIONS
 -------
 <command>...::
index b715cb71592b0299d473eb7b40df77433485d28f..1513935c399b67c6e2158b9041af548edf3e610c 100644 (file)
@@ -136,6 +136,8 @@ Each probe argument follows below syntax.
 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
 'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
 
+On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
+
 LINE SYNTAX
 -----------
 Line range is described by following syntax.
index f41572d0dd76125b9ebc4b03fd0beb832192b39d..c0c87c87b60f83d92356e43488c83e6c074cb6e8 100644 (file)
@@ -6,6 +6,7 @@ tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/include/asm/bug.h
 tools/include/linux/compiler.h
+tools/include/linux/hash.h
 include/linux/const.h
 include/linux/perf_event.h
 include/linux/rbtree.h
index 7257e7e9e38a5e625f2fa562836ff3793d0275c4..50d875d970c4331f82dccfa35b6ebd980b1e3ea4 100644 (file)
@@ -7,6 +7,8 @@ include config/utilities.mak
 
 # Define V to have a more verbose compile.
 #
+# Define VF to have a more verbose feature check output.
+#
 # Define O to save output files in a separate directory.
 #
 # Define ARCH as name of target architecture if you want cross-builds.
@@ -55,6 +57,9 @@ include config/utilities.mak
 # Define NO_LIBAUDIT if you do not want libaudit support
 #
 # Define NO_LIBBIONIC if you do not want bionic support
+#
+# Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
+# for dwarf backtrace post unwind.
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(shell pwd)))
@@ -208,7 +213,7 @@ LIB_H += ../../include/uapi/linux/perf_event.h
 LIB_H += ../../include/linux/rbtree.h
 LIB_H += ../../include/linux/list.h
 LIB_H += ../../include/uapi/linux/const.h
-LIB_H += ../../include/linux/hash.h
+LIB_H += ../include/linux/hash.h
 LIB_H += ../../include/linux/stringify.h
 LIB_H += util/include/linux/bitmap.h
 LIB_H += util/include/linux/bitops.h
@@ -218,9 +223,7 @@ LIB_H += util/include/linux/ctype.h
 LIB_H += util/include/linux/kernel.h
 LIB_H += util/include/linux/list.h
 LIB_H += util/include/linux/export.h
-LIB_H += util/include/linux/magic.h
 LIB_H += util/include/linux/poison.h
-LIB_H += util/include/linux/prefetch.h
 LIB_H += util/include/linux/rbtree.h
 LIB_H += util/include/linux/rbtree_augmented.h
 LIB_H += util/include/linux/string.h
@@ -244,7 +247,6 @@ LIB_H += util/cache.h
 LIB_H += util/callchain.h
 LIB_H += util/build-id.h
 LIB_H += util/debug.h
-LIB_H += util/fs.h
 LIB_H += util/pmu.h
 LIB_H += util/event.h
 LIB_H += util/evsel.h
@@ -306,7 +308,6 @@ LIB_OBJS += $(OUTPUT)util/annotate.o
 LIB_OBJS += $(OUTPUT)util/build-id.o
 LIB_OBJS += $(OUTPUT)util/config.o
 LIB_OBJS += $(OUTPUT)util/ctype.o
-LIB_OBJS += $(OUTPUT)util/fs.o
 LIB_OBJS += $(OUTPUT)util/pmu.o
 LIB_OBJS += $(OUTPUT)util/environment.o
 LIB_OBJS += $(OUTPUT)util/event.o
@@ -408,6 +409,11 @@ endif
 LIB_OBJS += $(OUTPUT)tests/code-reading.o
 LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
 LIB_OBJS += $(OUTPUT)tests/parse-no-sample-id-all.o
+ifndef NO_DWARF_UNWIND
+ifeq ($(ARCH),x86)
+LIB_OBJS += $(OUTPUT)tests/dwarf-unwind.o
+endif
+endif
 
 BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
 BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
@@ -420,6 +426,9 @@ BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
 endif
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
+BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
+BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
+BUILTIN_OBJS += $(OUTPUT)bench/futex-requeue.o
 
 BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
 BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o
@@ -475,8 +484,13 @@ ifndef NO_DWARF
 endif # NO_DWARF
 endif # NO_LIBELF
 
+ifndef NO_LIBDW_DWARF_UNWIND
+  LIB_OBJS += $(OUTPUT)util/unwind-libdw.o
+  LIB_H += util/unwind-libdw.h
+endif
+
 ifndef NO_LIBUNWIND
-  LIB_OBJS += $(OUTPUT)util/unwind.o
+  LIB_OBJS += $(OUTPUT)util/unwind-libunwind.o
 endif
 LIB_OBJS += $(OUTPUT)tests/keep-tracking.o
 
@@ -533,6 +547,7 @@ ifeq ($(NO_PERF_REGS),0)
   ifeq ($(ARCH),x86)
     LIB_H += arch/x86/include/perf_regs.h
   endif
+  LIB_OBJS += $(OUTPUT)util/perf_regs.o
 endif
 
 ifndef NO_LIBNUMA
@@ -655,6 +670,9 @@ $(OUTPUT)tests/python-use.o: tests/python-use.c $(OUTPUT)PERF-CFLAGS
                -DPYTHON='"$(PYTHON_WORD)"' \
                $<
 
+$(OUTPUT)tests/dwarf-unwind.o: tests/dwarf-unwind.c
+       $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -fno-optimize-sibling-calls $<
+
 $(OUTPUT)util/config.o: util/config.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
 
@@ -707,9 +725,15 @@ $(patsubst perf-%,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
 # we depend the various files onto their directories.
 DIRECTORY_DEPS = $(LIB_OBJS) $(BUILTIN_OBJS) $(GTK_OBJS)
 DIRECTORY_DEPS += $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h
-$(DIRECTORY_DEPS): | $(sort $(dir $(DIRECTORY_DEPS)))
+# no need to add flex objects, because they depend on bison ones
+DIRECTORY_DEPS += $(OUTPUT)util/parse-events-bison.c
+DIRECTORY_DEPS += $(OUTPUT)util/pmu-bison.c
+
+OUTPUT_DIRECTORIES := $(sort $(dir $(DIRECTORY_DEPS)))
+
+$(DIRECTORY_DEPS): | $(OUTPUT_DIRECTORIES)
 # In the second step, we make a rule to actually create these directories
-$(sort $(dir $(DIRECTORY_DEPS))):
+$(OUTPUT_DIRECTORIES):
        $(QUIET_MKDIR)$(MKDIR) -p $@ 2>/dev/null
 
 $(LIB_FILE): $(LIB_OBJS)
@@ -886,7 +910,7 @@ config-clean:
 clean: $(LIBTRACEEVENT)-clean $(LIBAPIKFS)-clean config-clean
        $(call QUIET_CLEAN, core-objs)  $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(GTK_OBJS)
        $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf
-       $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
+       $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS $(OUTPUT)PERF-FEATURES $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
        $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
        $(python-clean)
 
index fe9b61e322a557b063ecc78eccb9a87c8de73dda..67e9b3d38e89209dec2810a9ca229127fd49f17c 100644 (file)
@@ -3,5 +3,5 @@ PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
 ifndef NO_LIBUNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o
 endif
similarity index 95%
rename from tools/perf/arch/arm/util/unwind.c
rename to tools/perf/arch/arm/util/unwind-libunwind.c
index da3dc950550c977c524f7685498ce976608cbd11..729ed69a66649cad6cecea5693c191cc91fc4e16 100644 (file)
@@ -4,7 +4,7 @@
 #include "perf_regs.h"
 #include "../../util/unwind.h"
 
-int unwind__arch_reg_id(int regnum)
+int libunwind__arch_reg_id(int regnum)
 {
        switch (regnum) {
        case UNW_ARM_R0:
index 8801fe02f206a93a97d7f7cc8a56094dffa1d05c..1641542e3636693abe587a9aab99d74ccb5b6bb9 100644 (file)
@@ -3,7 +3,14 @@ PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
 ifndef NO_LIBUNWIND
-LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libunwind.o
+endif
+ifndef NO_LIBDW_DWARF_UNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind-libdw.o
+endif
+ifndef NO_DWARF_UNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/regs_load.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/tests/dwarf-unwind.o
 endif
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/tsc.o
index e84ca76aae779ef484df1633b7cff3e35b327ba8..fc819ca34a7eeffd564cace9b9d0baea71ef36ed 100644 (file)
@@ -5,14 +5,20 @@
 #include "../../util/types.h"
 #include <asm/perf_regs.h>
 
+void perf_regs_load(u64 *regs);
+
 #ifndef HAVE_ARCH_X86_64_SUPPORT
 #define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1)
+#define PERF_REGS_MAX PERF_REG_X86_32_MAX
+#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
 #else
 #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
                       (1ULL << PERF_REG_X86_ES) | \
                       (1ULL << PERF_REG_X86_FS) | \
                       (1ULL << PERF_REG_X86_GS))
 #define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT)
+#define PERF_REGS_MAX PERF_REG_X86_64_MAX
+#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
 #endif
 #define PERF_REG_IP PERF_REG_X86_IP
 #define PERF_REG_SP PERF_REG_X86_SP
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
new file mode 100644 (file)
index 0000000..b602ad9
--- /dev/null
@@ -0,0 +1,59 @@
+#include <string.h>
+#include "perf_regs.h"
+#include "thread.h"
+#include "map.h"
+#include "event.h"
+#include "tests/tests.h"
+
+#define STACK_SIZE 8192
+
+static int sample_ustack(struct perf_sample *sample,
+                        struct thread *thread, u64 *regs)
+{
+       struct stack_dump *stack = &sample->user_stack;
+       struct map *map;
+       unsigned long sp;
+       u64 stack_size, *buf;
+
+       buf = malloc(STACK_SIZE);
+       if (!buf) {
+               pr_debug("failed to allocate sample uregs data\n");
+               return -1;
+       }
+
+       sp = (unsigned long) regs[PERF_REG_X86_SP];
+
+       map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp);
+       if (!map) {
+               pr_debug("failed to get stack map\n");
+               return -1;
+       }
+
+       stack_size = map->end - sp;
+       stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
+
+       memcpy(buf, (void *) sp, stack_size);
+       stack->data = (char *) buf;
+       stack->size = stack_size;
+       return 0;
+}
+
+int test__arch_unwind_sample(struct perf_sample *sample,
+                            struct thread *thread)
+{
+       struct regs_dump *regs = &sample->user_regs;
+       u64 *buf;
+
+       buf = malloc(sizeof(u64) * PERF_REGS_MAX);
+       if (!buf) {
+               pr_debug("failed to allocate sample uregs data\n");
+               return -1;
+       }
+
+       perf_regs_load(buf);
+       regs->abi  = PERF_SAMPLE_REGS_ABI;
+       regs->regs = buf;
+       regs->mask = PERF_REGS_MASK;
+
+       return sample_ustack(sample, thread, buf);
+}
diff --git a/tools/perf/arch/x86/tests/regs_load.S b/tools/perf/arch/x86/tests/regs_load.S
new file mode 100644 (file)
index 0000000..99167bf
--- /dev/null
@@ -0,0 +1,92 @@
+
+#include <linux/linkage.h>
+
+#define AX      0
+#define BX      1 * 8
+#define CX      2 * 8
+#define DX      3 * 8
+#define SI      4 * 8
+#define DI      5 * 8
+#define BP      6 * 8
+#define SP      7 * 8
+#define IP      8 * 8
+#define FLAGS   9 * 8
+#define CS     10 * 8
+#define SS     11 * 8
+#define DS     12 * 8
+#define ES     13 * 8
+#define FS     14 * 8
+#define GS     15 * 8
+#define R8     16 * 8
+#define R9     17 * 8
+#define R10    18 * 8
+#define R11    19 * 8
+#define R12    20 * 8
+#define R13    21 * 8
+#define R14    22 * 8
+#define R15    23 * 8
+
+.text
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+ENTRY(perf_regs_load)
+       movq %rax, AX(%rdi)
+       movq %rbx, BX(%rdi)
+       movq %rcx, CX(%rdi)
+       movq %rdx, DX(%rdi)
+       movq %rsi, SI(%rdi)
+       movq %rdi, DI(%rdi)
+       movq %rbp, BP(%rdi)
+
+       leaq 8(%rsp), %rax /* exclude this call.  */
+       movq %rax, SP(%rdi)
+
+       movq 0(%rsp), %rax
+       movq %rax, IP(%rdi)
+
+       movq $0, FLAGS(%rdi)
+       movq $0, CS(%rdi)
+       movq $0, SS(%rdi)
+       movq $0, DS(%rdi)
+       movq $0, ES(%rdi)
+       movq $0, FS(%rdi)
+       movq $0, GS(%rdi)
+
+       movq %r8,  R8(%rdi)
+       movq %r9,  R9(%rdi)
+       movq %r10, R10(%rdi)
+       movq %r11, R11(%rdi)
+       movq %r12, R12(%rdi)
+       movq %r13, R13(%rdi)
+       movq %r14, R14(%rdi)
+       movq %r15, R15(%rdi)
+       ret
+ENDPROC(perf_regs_load)
+#else
+ENTRY(perf_regs_load)
+       push %edi
+       movl 8(%esp), %edi
+       movl %eax, AX(%edi)
+       movl %ebx, BX(%edi)
+       movl %ecx, CX(%edi)
+       movl %edx, DX(%edi)
+       movl %esi, SI(%edi)
+       pop %eax
+       movl %eax, DI(%edi)
+       movl %ebp, BP(%edi)
+
+       leal 4(%esp), %eax /* exclude this call.  */
+       movl %eax, SP(%edi)
+
+       movl 0(%esp), %eax
+       movl %eax, IP(%edi)
+
+       movl $0, FLAGS(%edi)
+       movl $0, CS(%edi)
+       movl $0, SS(%edi)
+       movl $0, DS(%edi)
+       movl $0, ES(%edi)
+       movl $0, FS(%edi)
+       movl $0, GS(%edi)
+       ret
+ENDPROC(perf_regs_load)
+#endif
diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c
new file mode 100644 (file)
index 0000000..c4b7217
--- /dev/null
@@ -0,0 +1,51 @@
+#include <elfutils/libdwfl.h>
+#include "../../util/unwind-libdw.h"
+#include "../../util/perf_regs.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
+{
+       struct unwind_info *ui = arg;
+       struct regs_dump *user_regs = &ui->sample->user_regs;
+       Dwarf_Word dwarf_regs[17];
+       unsigned nregs;
+
+#define REG(r) ({                                              \
+       Dwarf_Word val = 0;                                     \
+       perf_reg_value(&val, user_regs, PERF_REG_X86_##r);      \
+       val;                                                    \
+})
+
+       if (user_regs->abi == PERF_SAMPLE_REGS_ABI_32) {
+               dwarf_regs[0] = REG(AX);
+               dwarf_regs[1] = REG(CX);
+               dwarf_regs[2] = REG(DX);
+               dwarf_regs[3] = REG(BX);
+               dwarf_regs[4] = REG(SP);
+               dwarf_regs[5] = REG(BP);
+               dwarf_regs[6] = REG(SI);
+               dwarf_regs[7] = REG(DI);
+               dwarf_regs[8] = REG(IP);
+               nregs = 9;
+       } else {
+               dwarf_regs[0]  = REG(AX);
+               dwarf_regs[1]  = REG(DX);
+               dwarf_regs[2]  = REG(CX);
+               dwarf_regs[3]  = REG(BX);
+               dwarf_regs[4]  = REG(SI);
+               dwarf_regs[5]  = REG(DI);
+               dwarf_regs[6]  = REG(BP);
+               dwarf_regs[7]  = REG(SP);
+               dwarf_regs[8]  = REG(R8);
+               dwarf_regs[9]  = REG(R9);
+               dwarf_regs[10] = REG(R10);
+               dwarf_regs[11] = REG(R11);
+               dwarf_regs[12] = REG(R12);
+               dwarf_regs[13] = REG(R13);
+               dwarf_regs[14] = REG(R14);
+               dwarf_regs[15] = REG(R15);
+               dwarf_regs[16] = REG(IP);
+               nregs = 17;
+       }
+
+       return dwfl_thread_state_registers(thread, 0, nregs, dwarf_regs);
+}
similarity index 95%
rename from tools/perf/arch/x86/util/unwind.c
rename to tools/perf/arch/x86/util/unwind-libunwind.c
index 456a88cf5b374bf3319ea2310e88f2cd179c9b28..3261f68c6a7c0c8834679bf24a543a5afe15df8b 100644 (file)
@@ -5,7 +5,7 @@
 #include "../../util/unwind.h"
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
-int unwind__arch_reg_id(int regnum)
+int libunwind__arch_reg_id(int regnum)
 {
        int id;
 
@@ -69,7 +69,7 @@ int unwind__arch_reg_id(int regnum)
        return id;
 }
 #else
-int unwind__arch_reg_id(int regnum)
+int libunwind__arch_reg_id(int regnum)
 {
        int id;
 
index 0fdc85269c4dc4c2bd72220526879a07e76679be..eba46709b279d42244f1ed85265c390e2778b500 100644 (file)
@@ -31,6 +31,9 @@ extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
 extern int bench_mem_memcpy(int argc, const char **argv,
                            const char *prefix __maybe_unused);
 extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
+extern int bench_futex_hash(int argc, const char **argv, const char *prefix);
+extern int bench_futex_wake(int argc, const char **argv, const char *prefix);
+extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
 
 #define BENCH_FORMAT_DEFAULT_STR       "default"
 #define BENCH_FORMAT_DEFAULT           0
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
new file mode 100644 (file)
index 0000000..a84206e
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
+ *
+ * futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing.
+ *
+ * This program is particularly useful for measuring the kernel's futex hash
+ * table/function implementation. In order for it to make sense, use with as
+ * many threads and futexes as possible.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+static unsigned int nthreads = 0;
+static unsigned int nsecs    = 10;
+/* amount of futexes per thread */
+static unsigned int nfutexes = 1024;
+static bool fshared = false, done = false, silent = false;
+
+struct timeval start, end, runtime;
+static pthread_mutex_t thread_lock;
+static unsigned int threads_starting;
+static struct stats throughput_stats;
+static pthread_cond_t thread_parent, thread_worker;
+
+struct worker {
+       int tid;
+       u_int32_t *futex;
+       pthread_t thread;
+       unsigned long ops;
+};
+
+static const struct option options[] = {
+       OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
+       OPT_UINTEGER('r', "runtime", &nsecs,    "Specify runtime (in seconds)"),
+       OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"),
+       OPT_BOOLEAN( 's', "silent",  &silent,   "Silent mode: do not display data/details"),
+       OPT_BOOLEAN( 'S', "shared",  &fshared,  "Use shared futexes instead of private ones"),
+       OPT_END()
+};
+
+static const char * const bench_futex_hash_usage[] = {
+       "perf bench futex hash <options>",
+       NULL
+};
+
+static void *workerfn(void *arg)
+{
+       int ret;
+       unsigned int i;
+       struct worker *w = (struct worker *) arg;
+
+       pthread_mutex_lock(&thread_lock);
+       threads_starting--;
+       if (!threads_starting)
+               pthread_cond_signal(&thread_parent);
+       pthread_cond_wait(&thread_worker, &thread_lock);
+       pthread_mutex_unlock(&thread_lock);
+
+       do {
+               for (i = 0; i < nfutexes; i++, w->ops++) {
+                       /*
+                        * We want the futex calls to fail in order to stress
+                        * the hashing of uaddr and not measure other steps,
+                        * such as internal waitqueue handling, thus enlarging
+                        * the critical region protected by hb->lock.
+                        */
+                       ret = futex_wait(&w->futex[i], 1234, NULL,
+                                        fshared ? 0 : FUTEX_PRIVATE_FLAG);
+                       if (!silent &&
+                           (!ret || errno != EAGAIN || errno != EWOULDBLOCK))
+                               warn("Non-expected futex return call");
+               }
+       }  while (!done);
+
+       return NULL;
+}
+
+static void toggle_done(int sig __maybe_unused,
+                       siginfo_t *info __maybe_unused,
+                       void *uc __maybe_unused)
+{
+       /* inform all threads that we're done for the day */
+       done = true;
+       gettimeofday(&end, NULL);
+       timersub(&end, &start, &runtime);
+}
+
+static void print_summary(void)
+{
+       unsigned long avg = avg_stats(&throughput_stats);
+       double stddev = stddev_stats(&throughput_stats);
+
+       printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
+              !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
+              (int) runtime.tv_sec);
+}
+
+int bench_futex_hash(int argc, const char **argv,
+                    const char *prefix __maybe_unused)
+{
+       int ret = 0;
+       cpu_set_t cpu;
+       struct sigaction act;
+       unsigned int i, ncpus;
+       pthread_attr_t thread_attr;
+       struct worker *worker = NULL;
+
+       argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
+       if (argc) {
+               usage_with_options(bench_futex_hash_usage, options);
+               exit(EXIT_FAILURE);
+       }
+
+       ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+       sigfillset(&act.sa_mask);
+       act.sa_sigaction = toggle_done;
+       sigaction(SIGINT, &act, NULL);
+
+       if (!nthreads) /* default to the number of CPUs */
+               nthreads = ncpus;
+
+       worker = calloc(nthreads, sizeof(*worker));
+       if (!worker)
+               goto errmem;
+
+       printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
+              getpid(), nthreads, nfutexes, fshared ? "shared":"private", nsecs);
+
+       init_stats(&throughput_stats);
+       pthread_mutex_init(&thread_lock, NULL);
+       pthread_cond_init(&thread_parent, NULL);
+       pthread_cond_init(&thread_worker, NULL);
+
+       threads_starting = nthreads;
+       pthread_attr_init(&thread_attr);
+       gettimeofday(&start, NULL);
+       for (i = 0; i < nthreads; i++) {
+               worker[i].tid = i;
+               worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
+               if (!worker[i].futex)
+                       goto errmem;
+
+               CPU_ZERO(&cpu);
+               CPU_SET(i % ncpus, &cpu);
+
+               ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu);
+               if (ret)
+                       err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+               ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
+                                    (void *)(struct worker *) &worker[i]);
+               if (ret)
+                       err(EXIT_FAILURE, "pthread_create");
+
+       }
+       pthread_attr_destroy(&thread_attr);
+
+       pthread_mutex_lock(&thread_lock);
+       while (threads_starting)
+               pthread_cond_wait(&thread_parent, &thread_lock);
+       pthread_cond_broadcast(&thread_worker);
+       pthread_mutex_unlock(&thread_lock);
+
+       sleep(nsecs);
+       toggle_done(0, NULL, NULL);
+
+       for (i = 0; i < nthreads; i++) {
+               ret = pthread_join(worker[i].thread, NULL);
+               if (ret)
+                       err(EXIT_FAILURE, "pthread_join");
+       }
+
+       /* cleanup & report results */
+       pthread_cond_destroy(&thread_parent);
+       pthread_cond_destroy(&thread_worker);
+       pthread_mutex_destroy(&thread_lock);
+
+       for (i = 0; i < nthreads; i++) {
+               unsigned long t = worker[i].ops/runtime.tv_sec;
+               update_stats(&throughput_stats, t);
+               if (!silent) {
+                       if (nfutexes == 1)
+                               printf("[thread %2d] futex: %p [ %ld ops/sec ]\n",
+                                      worker[i].tid, &worker[i].futex[0], t);
+                       else
+                               printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n",
+                                      worker[i].tid, &worker[i].futex[0],
+                                      &worker[i].futex[nfutexes-1], t);
+               }
+
+               free(worker[i].futex);
+       }
+
+       print_summary();
+
+       free(worker);
+       return ret;
+errmem:
+       err(EXIT_FAILURE, "calloc");
+}
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
new file mode 100644 (file)
index 0000000..a162558
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
+ *
+ * futex-requeue: Block a bunch of threads on futex1 and requeue them
+ *                on futex2, N at a time.
+ *
+ * This program is particularly useful to measure the latency of nthread
+ * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+static u_int32_t futex1 = 0, futex2 = 0;
+
+/*
+ * How many tasks to requeue at a time.
+ * Default to 1 in order to make the kernel work more.
+ */
+static unsigned int nrequeue = 1;
+
+/*
+ * There can be significant variance from run to run,
+ * the more repeats, the more exact the overall avg and
+ * the better idea of the futex latency.
+ */
+static unsigned int repeat = 10;
+
+static pthread_t *worker;
+static bool done = 0, silent = 0;
+static pthread_mutex_t thread_lock;
+static pthread_cond_t thread_parent, thread_worker;
+static struct stats requeuetime_stats, requeued_stats;
+static unsigned int ncpus, threads_starting, nthreads = 0;
+
+static const struct option options[] = {
+       OPT_UINTEGER('t', "threads",  &nthreads, "Specify amount of threads"),
+       OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
+       OPT_UINTEGER('r', "repeat",   &repeat,   "Specify amount of times to repeat the run"),
+       OPT_BOOLEAN( 's', "silent",   &silent,   "Silent mode: do not display data/details"),
+       OPT_END()
+};
+
+static const char * const bench_futex_requeue_usage[] = {
+       "perf bench futex requeue <options>",
+       NULL
+};
+
+static void print_summary(void)
+{
+       double requeuetime_avg = avg_stats(&requeuetime_stats);
+       double requeuetime_stddev = stddev_stats(&requeuetime_stats);
+       unsigned int requeued_avg = avg_stats(&requeued_stats);
+
+       printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n",
+              requeued_avg,
+              nthreads,
+              requeuetime_avg/1e3,
+              rel_stddev_stats(requeuetime_stddev, requeuetime_avg));
+}
+
+static void *workerfn(void *arg __maybe_unused)
+{
+       pthread_mutex_lock(&thread_lock);
+       threads_starting--;
+       if (!threads_starting)
+               pthread_cond_signal(&thread_parent);
+       pthread_cond_wait(&thread_worker, &thread_lock);
+       pthread_mutex_unlock(&thread_lock);
+
+       futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
+       return NULL;
+}
+
+static void block_threads(pthread_t *w,
+                         pthread_attr_t thread_attr)
+{
+       cpu_set_t cpu;
+       unsigned int i;
+
+       threads_starting = nthreads;
+
+       /* create and block all threads */
+       for (i = 0; i < nthreads; i++) {
+               CPU_ZERO(&cpu);
+               CPU_SET(i % ncpus, &cpu);
+
+               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+                       err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+               if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+                       err(EXIT_FAILURE, "pthread_create");
+       }
+}
+
+static void toggle_done(int sig __maybe_unused,
+                       siginfo_t *info __maybe_unused,
+                       void *uc __maybe_unused)
+{
+       done = true;
+}
+
+int bench_futex_requeue(int argc, const char **argv,
+                       const char *prefix __maybe_unused)
+{
+       int ret = 0;
+       unsigned int i, j;
+       struct sigaction act;
+       pthread_attr_t thread_attr;
+
+       argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
+       if (argc)
+               goto err;
+
+       ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+       sigfillset(&act.sa_mask);
+       act.sa_sigaction = toggle_done;
+       sigaction(SIGINT, &act, NULL);
+
+       if (!nthreads)
+               nthreads = ncpus;
+
+       worker = calloc(nthreads, sizeof(*worker));
+       if (!worker)
+               err(EXIT_FAILURE, "calloc");
+
+       printf("Run summary [PID %d]: Requeuing %d threads (from %p to %p), "
+              "%d at a time.\n\n",
+              getpid(), nthreads, &futex1, &futex2, nrequeue);
+
+       init_stats(&requeued_stats);
+       init_stats(&requeuetime_stats);
+       pthread_attr_init(&thread_attr);
+       pthread_mutex_init(&thread_lock, NULL);
+       pthread_cond_init(&thread_parent, NULL);
+       pthread_cond_init(&thread_worker, NULL);
+
+       for (j = 0; j < repeat && !done; j++) {
+               unsigned int nrequeued = 0;
+               struct timeval start, end, runtime;
+
+               /* create, launch & block all threads */
+               block_threads(worker, thread_attr);
+
+               /* make sure all threads are already blocked */
+               pthread_mutex_lock(&thread_lock);
+               while (threads_starting)
+                       pthread_cond_wait(&thread_parent, &thread_lock);
+               pthread_cond_broadcast(&thread_worker);
+               pthread_mutex_unlock(&thread_lock);
+
+               usleep(100000);
+
+               /* Ok, all threads are patiently blocked, start requeueing */
+               gettimeofday(&start, NULL);
+               for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue)
+                       /*
+                        * Do not wakeup any tasks blocked on futex1, allowing
+                        * us to really measure futex_wait functionality.
+                        */
+                       futex_cmp_requeue(&futex1, 0, &futex2, 0, nrequeue,
+                                         FUTEX_PRIVATE_FLAG);
+               gettimeofday(&end, NULL);
+               timersub(&end, &start, &runtime);
+
+               update_stats(&requeued_stats, nrequeued);
+               update_stats(&requeuetime_stats, runtime.tv_usec);
+
+               if (!silent) {
+                       printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n",
+                              j + 1, nrequeued, nthreads, runtime.tv_usec/1e3);
+               }
+
+               /* everybody should be blocked on futex2, wake'em up */
+               nrequeued = futex_wake(&futex2, nthreads, FUTEX_PRIVATE_FLAG);
+               if (nthreads != nrequeued)
+                       warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
+
+               for (i = 0; i < nthreads; i++) {
+                       ret = pthread_join(worker[i], NULL);
+                       if (ret)
+                               err(EXIT_FAILURE, "pthread_join");
+               }
+
+       }
+
+       /* cleanup & report results */
+       pthread_cond_destroy(&thread_parent);
+       pthread_cond_destroy(&thread_worker);
+       pthread_mutex_destroy(&thread_lock);
+       pthread_attr_destroy(&thread_attr);
+
+       print_summary();
+
+       free(worker);
+       return ret;
+err:
+       usage_with_options(bench_futex_requeue_usage, options);
+       exit(EXIT_FAILURE);
+}
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
new file mode 100644 (file)
index 0000000..d096169
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2013  Davidlohr Bueso <davidlohr@hp.com>
+ *
+ * futex-wake: Block a bunch of threads on a futex and wake'em up, N at a time.
+ *
+ * This program is particularly useful to measure the latency of nthread wakeups
+ * in non-error situations:  all waiters are queued and all wake calls wakeup
+ * one or more tasks, and thus the waitqueue is never empty.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+/* all threads will block on the same futex */
+static u_int32_t futex1 = 0;
+
+/*
+ * How many wakeups to do at a time.
+ * Default to 1 in order to make the kernel work more.
+ */
+static unsigned int nwakes = 1;
+
+/*
+ * There can be significant variance from run to run,
+ * the more repeats, the more exact the overall avg and
+ * the better idea of the futex latency.
+ */
+static unsigned int repeat = 10;
+
+pthread_t *worker;
+static bool done = 0, silent = 0;
+static pthread_mutex_t thread_lock;
+static pthread_cond_t thread_parent, thread_worker;
+static struct stats waketime_stats, wakeup_stats;
+static unsigned int ncpus, threads_starting, nthreads = 0;
+
+static const struct option options[] = {
+       OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
+       OPT_UINTEGER('w', "nwakes",  &nwakes,   "Specify amount of threads to wake at once"),
+       OPT_UINTEGER('r', "repeat",  &repeat,   "Specify amount of times to repeat the run"),
+       OPT_BOOLEAN( 's', "silent",  &silent,   "Silent mode: do not display data/details"),
+       OPT_END()
+};
+
+static const char * const bench_futex_wake_usage[] = {
+       "perf bench futex wake <options>",
+       NULL
+};
+
+static void *workerfn(void *arg __maybe_unused)
+{
+       pthread_mutex_lock(&thread_lock);
+       threads_starting--;
+       if (!threads_starting)
+               pthread_cond_signal(&thread_parent);
+       pthread_cond_wait(&thread_worker, &thread_lock);
+       pthread_mutex_unlock(&thread_lock);
+
+       futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
+       return NULL;
+}
+
+static void print_summary(void)
+{
+       double waketime_avg = avg_stats(&waketime_stats);
+       double waketime_stddev = stddev_stats(&waketime_stats);
+       unsigned int wakeup_avg = avg_stats(&wakeup_stats);
+
+       printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n",
+              wakeup_avg,
+              nthreads,
+              waketime_avg/1e3,
+              rel_stddev_stats(waketime_stddev, waketime_avg));
+}
+
+static void block_threads(pthread_t *w,
+                         pthread_attr_t thread_attr)
+{
+       cpu_set_t cpu;
+       unsigned int i;
+
+       threads_starting = nthreads;
+
+       /* create and block all threads */
+       for (i = 0; i < nthreads; i++) {
+               CPU_ZERO(&cpu);
+               CPU_SET(i % ncpus, &cpu);
+
+               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+                       err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+               if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+                       err(EXIT_FAILURE, "pthread_create");
+       }
+}
+
+static void toggle_done(int sig __maybe_unused,
+                       siginfo_t *info __maybe_unused,
+                       void *uc __maybe_unused)
+{
+       done = true;
+}
+
+int bench_futex_wake(int argc, const char **argv,
+                    const char *prefix __maybe_unused)
+{
+       int ret = 0;
+       unsigned int i, j;
+       struct sigaction act;
+       pthread_attr_t thread_attr;
+
+       argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
+       if (argc) {
+               usage_with_options(bench_futex_wake_usage, options);
+               exit(EXIT_FAILURE);
+       }
+
+       ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+       sigfillset(&act.sa_mask);
+       act.sa_sigaction = toggle_done;
+       sigaction(SIGINT, &act, NULL);
+
+       if (!nthreads)
+               nthreads = ncpus;
+
+       worker = calloc(nthreads, sizeof(*worker));
+       if (!worker)
+               err(EXIT_FAILURE, "calloc");
+
+       printf("Run summary [PID %d]: blocking on %d threads (at futex %p), "
+              "waking up %d at a time.\n\n",
+              getpid(), nthreads, &futex1, nwakes);
+
+       init_stats(&wakeup_stats);
+       init_stats(&waketime_stats);
+       pthread_attr_init(&thread_attr);
+       pthread_mutex_init(&thread_lock, NULL);
+       pthread_cond_init(&thread_parent, NULL);
+       pthread_cond_init(&thread_worker, NULL);
+
+       for (j = 0; j < repeat && !done; j++) {
+               unsigned int nwoken = 0;
+               struct timeval start, end, runtime;
+
+               /* create, launch & block all threads */
+               block_threads(worker, thread_attr);
+
+               /* make sure all threads are already blocked */
+               pthread_mutex_lock(&thread_lock);
+               while (threads_starting)
+                       pthread_cond_wait(&thread_parent, &thread_lock);
+               pthread_cond_broadcast(&thread_worker);
+               pthread_mutex_unlock(&thread_lock);
+
+               usleep(100000);
+
+               /* Ok, all threads are patiently blocked, start waking folks up */
+               gettimeofday(&start, NULL);
+               while (nwoken != nthreads)
+                       nwoken += futex_wake(&futex1, nwakes, FUTEX_PRIVATE_FLAG);
+               gettimeofday(&end, NULL);
+               timersub(&end, &start, &runtime);
+
+               update_stats(&wakeup_stats, nwoken);
+               update_stats(&waketime_stats, runtime.tv_usec);
+
+               if (!silent) {
+                       printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n",
+                              j + 1, nwoken, nthreads, runtime.tv_usec/1e3);
+               }
+
+               for (i = 0; i < nthreads; i++) {
+                       ret = pthread_join(worker[i], NULL);
+                       if (ret)
+                               err(EXIT_FAILURE, "pthread_join");
+               }
+
+       }
+
+       /* cleanup & report results */
+       pthread_cond_destroy(&thread_parent);
+       pthread_cond_destroy(&thread_worker);
+       pthread_mutex_destroy(&thread_lock);
+       pthread_attr_destroy(&thread_attr);
+
+       print_summary();
+
+       free(worker);
+       return ret;
+}
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
new file mode 100644 (file)
index 0000000..71f2844
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Glibc independent futex library for testing kernel functionality.
+ * Shamelessly stolen from Darren Hart <dvhltc@us.ibm.com>
+ *    http://git.kernel.org/cgit/linux/kernel/git/dvhart/futextest.git/
+ */
+
+#ifndef _FUTEX_H
+#define _FUTEX_H
+
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <linux/futex.h>
+
+/**
+ * futex() - SYS_futex syscall wrapper
+ * @uaddr:     address of first futex
+ * @op:                futex op code
+ * @val:       typically expected value of uaddr, but varies by op
+ * @timeout:   typically an absolute struct timespec (except where noted
+ *             otherwise). Overloaded by some ops
+ * @uaddr2:    address of second futex for some ops\
+ * @val3:      varies by op
+ * @opflags:   flags to be bitwise OR'd with op, such as FUTEX_PRIVATE_FLAG
+ *
+ * futex() is used by all the following futex op wrappers. It can also be
+ * used for misuse and abuse testing. Generally, the specific op wrappers
+ * should be used instead. It is a macro instead of an static inline function as
+ * some of the types over overloaded (timeout is used for nr_requeue for
+ * example).
+ *
+ * These argument descriptions are the defaults for all
+ * like-named arguments in the following wrappers except where noted below.
+ */
+#define futex(uaddr, op, val, timeout, uaddr2, val3, opflags) \
+       syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3)
+
+/**
+ * futex_wait() - block on uaddr with optional timeout
+ * @timeout:   relative timeout
+ */
+static inline int
+futex_wait(u_int32_t *uaddr, u_int32_t val, struct timespec *timeout, int opflags)
+{
+       return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags);
+}
+
+/**
+ * futex_wake() - wake one or more tasks blocked on uaddr
+ * @nr_wake:   wake up to this many tasks
+ */
+static inline int
+futex_wake(u_int32_t *uaddr, int nr_wake, int opflags)
+{
+       return futex(uaddr, FUTEX_WAKE, nr_wake, NULL, NULL, 0, opflags);
+}
+
+/**
+* futex_cmp_requeue() - requeue tasks from uaddr to uaddr2
+* @nr_wake:        wake up to this many tasks
+* @nr_requeue:        requeue up to this many tasks
+*/
+static inline int
+futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wake,
+                int nr_requeue, int opflags)
+{
+       return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
+                val, opflags);
+}
+
+#endif /* _FUTEX_H */
index 8a987d2527803ff71f10314152162b29d61b486a..1e6e77710545afc472eae5c0f5e5a63f5ec432d1 100644 (file)
@@ -12,6 +12,7 @@
  *  sched ... scheduler and IPC performance
  *  mem   ... memory access performance
  *  numa  ... NUMA scheduling and MM performance
+ *  futex ... Futex performance
  */
 #include "perf.h"
 #include "util/util.h"
@@ -54,6 +55,14 @@ static struct bench mem_benchmarks[] = {
        { NULL,         NULL,                                           NULL                    }
 };
 
+static struct bench futex_benchmarks[] = {
+       { "hash",       "Benchmark for futex hash table",               bench_futex_hash        },
+       { "wake",       "Benchmark for futex wake calls",               bench_futex_wake        },
+       { "requeue",    "Benchmark for futex requeue calls",            bench_futex_requeue     },
+       { "all",        "Test all futex benchmarks",                    NULL                    },
+       { NULL,         NULL,                                           NULL                    }
+};
+
 struct collection {
        const char      *name;
        const char      *summary;
@@ -61,11 +70,12 @@ struct collection {
 };
 
 static struct collection collections[] = {
-       { "sched",      "Scheduler and IPC benchmarks",         sched_benchmarks        },
+       { "sched",      "Scheduler and IPC benchmarks",                 sched_benchmarks        },
        { "mem",        "Memory access benchmarks",                     mem_benchmarks          },
 #ifdef HAVE_LIBNUMA_SUPPORT
        { "numa",       "NUMA scheduling and MM benchmarks",            numa_benchmarks         },
 #endif
+       {"futex",       "Futex stressing benchmarks",                   futex_benchmarks        },
        { "all",        "All benchmarks",                               NULL                    },
        { NULL,         NULL,                                           NULL                    }
 };
index a77e31246c00ba1841a0b8447d97a04302f2ea3b..204fffe225320f3aa710d9e50da63de25c9d56a2 100644 (file)
@@ -952,8 +952,8 @@ static int hpp__entry_global(struct perf_hpp_fmt *_fmt, struct perf_hpp *hpp,
                                 dfmt->header_width, buf);
 }
 
-static int hpp__header(struct perf_hpp_fmt *fmt,
-                      struct perf_hpp *hpp)
+static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+                      struct perf_evsel *evsel __maybe_unused)
 {
        struct diff_hpp_fmt *dfmt =
                container_of(fmt, struct diff_hpp_fmt, fmt);
@@ -963,7 +963,8 @@ static int hpp__header(struct perf_hpp_fmt *fmt,
 }
 
 static int hpp__width(struct perf_hpp_fmt *fmt,
-                     struct perf_hpp *hpp __maybe_unused)
+                     struct perf_hpp *hpp __maybe_unused,
+                     struct perf_evsel *evsel __maybe_unused)
 {
        struct diff_hpp_fmt *dfmt =
                container_of(fmt, struct diff_hpp_fmt, fmt);
index b3466018bbd7b514811dd6a5e2bad10ed275eb5b..3a7387551369c97ab38b249ced193cc386c2cabe 100644 (file)
@@ -312,7 +312,6 @@ found:
        sample_sw.period = sample->period;
        sample_sw.time   = sample->time;
        perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
-                                     evsel->attr.sample_regs_user,
                                      evsel->attr.read_format, &sample_sw,
                                      false);
        build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
index a7350519c63f528cfb627d736867742b57b1a2e8..21c164b8f9db2a70a9467e1341fbb6a49a3536a1 100644 (file)
@@ -1691,17 +1691,15 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
                OPT_END()
        };
 
-
-       const char * const kvm_usage[] = {
-               "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
-               NULL
-       };
+       const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
+                                               "buildid-list", "stat", NULL };
+       const char *kvm_usage[] = { NULL, NULL };
 
        perf_host  = 0;
        perf_guest = 1;
 
-       argc = parse_options(argc, argv, kvm_options, kvm_usage,
-                       PARSE_OPT_STOP_AT_NON_OPTION);
+       argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
+                                       PARSE_OPT_STOP_AT_NON_OPTION);
        if (!argc)
                usage_with_options(kvm_usage, kvm_options);
 
index 78948882e3de72c9a521f1d3c6d3bff29f2e493d..cdcd4eb3a57df5e48a77a3ba914251c896e0bfc1 100644 (file)
@@ -268,9 +268,9 @@ static int opt_set_filter(const struct option *opt __maybe_unused,
        return 0;
 }
 
-static void init_params(void)
+static int init_params(void)
 {
-       line_range__init(&params.line_range);
+       return line_range__init(&params.line_range);
 }
 
 static void cleanup_params(void)
@@ -515,9 +515,11 @@ int cmd_probe(int argc, const char **argv, const char *prefix)
 {
        int ret;
 
-       init_params();
-       ret = __cmd_probe(argc, argv, prefix);
-       cleanup_params();
+       ret = init_params();
+       if (!ret) {
+               ret = __cmd_probe(argc, argv, prefix);
+               cleanup_params();
+       }
 
        return ret;
 }
index af47531b82ecda73a9233b7c9aef27c70754666a..eb524f91bffe5d9098d582e07050734a1c3e5343 100644 (file)
@@ -649,7 +649,7 @@ error:
        return ret;
 }
 
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
 static int get_stack_size(char *str, unsigned long *_size)
 {
        char *endptr;
@@ -675,7 +675,7 @@ static int get_stack_size(char *str, unsigned long *_size)
               max_size, str);
        return -1;
 }
-#endif /* HAVE_LIBUNWIND_SUPPORT */
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
 
 int record_parse_callchain(const char *arg, struct record_opts *opts)
 {
@@ -704,7 +704,7 @@ int record_parse_callchain(const char *arg, struct record_opts *opts)
                                       "needed for -g fp\n");
                        break;
 
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
                /* Dwarf style */
                } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
                        const unsigned long default_stack_dump_size = 8192;
@@ -720,7 +720,7 @@ int record_parse_callchain(const char *arg, struct record_opts *opts)
                                ret = get_stack_size(tok, &size);
                                opts->stack_dump_size = size;
                        }
-#endif /* HAVE_LIBUNWIND_SUPPORT */
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
                } else {
                        pr_err("callchain: Unknown --call-graph option "
                               "value: %s\n", arg);
@@ -735,7 +735,9 @@ int record_parse_callchain(const char *arg, struct record_opts *opts)
 
 static void callchain_debug(struct record_opts *opts)
 {
-       pr_debug("callchain: type %d\n", opts->call_graph);
+       static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
+
+       pr_debug("callchain: type %s\n", str[opts->call_graph]);
 
        if (opts->call_graph == CALLCHAIN_DWARF)
                pr_debug("callchain: stack dump size %d\n",
@@ -749,6 +751,8 @@ int record_parse_callchain_opt(const struct option *opt,
        struct record_opts *opts = opt->value;
        int ret;
 
+       opts->call_graph_enabled = !unset;
+
        /* --no-call-graph */
        if (unset) {
                opts->call_graph = CALLCHAIN_NONE;
@@ -769,6 +773,8 @@ int record_callchain_opt(const struct option *opt,
 {
        struct record_opts *opts = opt->value;
 
+       opts->call_graph_enabled = !unset;
+
        if (opts->call_graph == CALLCHAIN_NONE)
                opts->call_graph = CALLCHAIN_FP;
 
@@ -776,6 +782,16 @@ int record_callchain_opt(const struct option *opt,
        return 0;
 }
 
+static int perf_record_config(const char *var, const char *value, void *cb)
+{
+       struct record *rec = cb;
+
+       if (!strcmp(var, "record.call-graph"))
+               return record_parse_callchain(value, &rec->opts);
+
+       return perf_default_config(var, value, cb);
+}
+
 static const char * const record_usage[] = {
        "perf record [<options>] [<command>]",
        "perf record [<options>] -- <command> [<options>]",
@@ -807,7 +823,7 @@ static struct record record = {
 
 #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
 
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
 const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
 #else
 const char record_callchain_help[] = CALLCHAIN_HELP "fp";
@@ -907,6 +923,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
        if (rec->evlist == NULL)
                return -ENOMEM;
 
+       perf_config(perf_record_config, rec);
+
        argc = parse_options(argc, argv, record_options, record_usage,
                            PARSE_OPT_STOP_AT_NON_OPTION);
        if (!argc && target__none(&rec->opts.target))
index 02f985f3a396916ff60e4fcd8123bc109fa0dab4..c8f21137dfd8ad7f80472acf020eab9e3f492bb6 100644 (file)
@@ -75,13 +75,10 @@ static int report__config(const char *var, const char *value, void *cb)
        return perf_default_config(var, value, cb);
 }
 
-static int report__add_mem_hist_entry(struct perf_tool *tool, struct addr_location *al,
-                                     struct perf_sample *sample, struct perf_evsel *evsel,
-                                     union perf_event *event)
+static int report__add_mem_hist_entry(struct report *rep, struct addr_location *al,
+                                     struct perf_sample *sample, struct perf_evsel *evsel)
 {
-       struct report *rep = container_of(tool, struct report, tool);
        struct symbol *parent = NULL;
-       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
        struct hist_entry *he;
        struct mem_info *mi, *mx;
        uint64_t cost;
@@ -90,7 +87,7 @@ static int report__add_mem_hist_entry(struct perf_tool *tool, struct addr_locati
        if (err)
                return err;
 
-       mi = machine__resolve_mem(al->machine, al->thread, sample, cpumode);
+       mi = sample__resolve_mem(sample, al);
        if (!mi)
                return -ENOMEM;
 
@@ -131,10 +128,9 @@ out:
        return err;
 }
 
-static int report__add_branch_hist_entry(struct perf_tool *tool, struct addr_location *al,
+static int report__add_branch_hist_entry(struct report *rep, struct addr_location *al,
                                         struct perf_sample *sample, struct perf_evsel *evsel)
 {
-       struct report *rep = container_of(tool, struct report, tool);
        struct symbol *parent = NULL;
        unsigned i;
        struct hist_entry *he;
@@ -144,8 +140,7 @@ static int report__add_branch_hist_entry(struct perf_tool *tool, struct addr_loc
        if (err)
                return err;
 
-       bi = machine__resolve_bstack(al->machine, al->thread,
-                                    sample->branch_stack);
+       bi = sample__resolve_bstack(sample, al);
        if (!bi)
                return -ENOMEM;
 
@@ -190,10 +185,9 @@ out:
        return err;
 }
 
-static int report__add_hist_entry(struct perf_tool *tool, struct perf_evsel *evsel,
+static int report__add_hist_entry(struct report *rep, struct perf_evsel *evsel,
                                  struct addr_location *al, struct perf_sample *sample)
 {
-       struct report *rep = container_of(tool, struct report, tool);
        struct symbol *parent = NULL;
        struct hist_entry *he;
        int err = sample__resolve_callchain(sample, &parent, evsel, al, rep->max_stack);
@@ -237,25 +231,25 @@ static int process_sample_event(struct perf_tool *tool,
                return -1;
        }
 
-       if (al.filtered || (rep->hide_unresolved && al.sym == NULL))
+       if (rep->hide_unresolved && al.sym == NULL)
                return 0;
 
        if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
                return 0;
 
        if (sort__mode == SORT_MODE__BRANCH) {
-               ret = report__add_branch_hist_entry(tool, &al, sample, evsel);
+               ret = report__add_branch_hist_entry(rep, &al, sample, evsel);
                if (ret < 0)
                        pr_debug("problem adding lbr entry, skipping event\n");
        } else if (rep->mem_mode == 1) {
-               ret = report__add_mem_hist_entry(tool, &al, sample, evsel, event);
+               ret = report__add_mem_hist_entry(rep, &al, sample, evsel);
                if (ret < 0)
                        pr_debug("problem adding mem entry, skipping event\n");
        } else {
                if (al.map != NULL)
                        al.map->dso->hit = 1;
 
-               ret = report__add_hist_entry(tool, evsel, &al, sample);
+               ret = report__add_hist_entry(rep, evsel, &al, sample);
                if (ret < 0)
                        pr_debug("problem incrementing symbol period, skipping event\n");
        }
@@ -934,7 +928,7 @@ repeat:
         * so don't allocate extra space that won't be used in the stdio
         * implementation.
         */
-       if (use_browser == 1 && sort__has_sym) {
+       if (ui__has_annotation()) {
                symbol_conf.priv_size = sizeof(struct annotation);
                machines__set_symbol_filter(&session->machines,
                                            symbol__annotate_init);
index 6a76a07b67890c253fbb1e227e9aaa980b499779..9ac0a495c954e5793ddfe9e0e42ef4ee995bec56 100644 (file)
@@ -1124,7 +1124,7 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
 
        avg = work_list->total_lat / work_list->nb_atoms;
 
-       printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
+       printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
              (double)work_list->total_runtime / 1e6,
                 work_list->nb_atoms, (double)avg / 1e6,
                 (double)work_list->max_lat / 1e6,
@@ -1527,9 +1527,9 @@ static int perf_sched__lat(struct perf_sched *sched)
 
        perf_sched__sort_lat(sched);
 
-       printf("\n ---------------------------------------------------------------------------------------------------------------\n");
-       printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
-       printf(" ---------------------------------------------------------------------------------------------------------------\n");
+       printf("\n -----------------------------------------------------------------------------------------------------------------\n");
+       printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at       |\n");
+       printf(" -----------------------------------------------------------------------------------------------------------------\n");
 
        next = rb_first(&sched->sorted_atom_root);
 
@@ -1541,7 +1541,7 @@ static int perf_sched__lat(struct perf_sched *sched)
                next = rb_next(next);
        }
 
-       printf(" -----------------------------------------------------------------------------------------\n");
+       printf(" -----------------------------------------------------------------------------------------------------------------\n");
        printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
                (double)sched->all_runtime / 1e6, sched->all_count);
 
index 25526d6eae59f65a4a0405c9186d93b9cd5d3d59..74db2568b867b7996726674edd20539a7820dcc9 100644 (file)
@@ -494,7 +494,7 @@ static const char *cat_backtrace(union perf_event *event,
                        continue;
                }
 
-               tal.filtered = false;
+               tal.filtered = 0;
                thread__find_addr_location(al.thread, machine, cpumode,
                                           MAP__FUNCTION, ip, &tal);
 
@@ -1238,7 +1238,7 @@ static int timechart__record(struct timechart *tchart, int argc, const char **ar
        for (i = 0; i < old_power_args_nr; i++)
                *p++ = strdup(old_power_args[i]);
 
-       for (j = 1; j < (unsigned int)argc; j++)
+       for (j = 0; j < (unsigned int)argc; j++)
                *p++ = argv[j];
 
        return cmd_record(rec_argc, rec_argv, NULL);
index 5f989a7d8bc2166e30d6bcbef710015f019ab252..65aaa5bbf7ec75830b9b58afae6c6d66d352a9f7 100644 (file)
@@ -993,6 +993,16 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
        return record_parse_callchain_opt(opt, arg, unset);
 }
 
+static int perf_top_config(const char *var, const char *value, void *cb)
+{
+       struct perf_top *top = cb;
+
+       if (!strcmp(var, "top.call-graph"))
+               return record_parse_callchain(value, &top->record_opts);
+
+       return perf_default_config(var, value, cb);
+}
+
 static int
 parse_percent_limit(const struct option *opt, const char *arg,
                    int unset __maybe_unused)
@@ -1117,6 +1127,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
        if (top.evlist == NULL)
                return -ENOMEM;
 
+       perf_config(perf_top_config, &top);
+
        argc = parse_options(argc, argv, options, top_usage, 0);
        if (argc)
                usage_with_options(top_usage, options);
index 0331ea2701a380c536facf82b8306729f23d9de3..c23418225c2c806086012b982faf7769f64efd98 100644 (file)
@@ -59,6 +59,18 @@ ifeq ($(NO_PERF_REGS),0)
   CFLAGS += -DHAVE_PERF_REGS_SUPPORT
 endif
 
+ifndef NO_LIBELF
+  # for linking with debug library, run like:
+  # make DEBUG=1 LIBDW_DIR=/opt/libdw/
+  ifdef LIBDW_DIR
+    LIBDW_CFLAGS  := -I$(LIBDW_DIR)/include
+    LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
+
+    FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS)
+    FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw
+  endif
+endif
+
 # include ARCH specific config
 -include $(src-perf)/arch/$(ARCH)/Makefile
 
@@ -147,7 +159,35 @@ CORE_FEATURE_TESTS =                       \
        libunwind                       \
        on-exit                         \
        stackprotector-all              \
-       timerfd
+       timerfd                         \
+       libdw-dwarf-unwind
+
+LIB_FEATURE_TESTS =                    \
+       dwarf                           \
+       glibc                           \
+       gtk2                            \
+       libaudit                        \
+       libbfd                          \
+       libelf                          \
+       libnuma                         \
+       libperl                         \
+       libpython                       \
+       libslang                        \
+       libunwind                       \
+       libdw-dwarf-unwind
+
+VF_FEATURE_TESTS =                     \
+       backtrace                       \
+       fortify-source                  \
+       gtk2-infobar                    \
+       libelf-getphdrnum               \
+       libelf-mmap                     \
+       libpython-version               \
+       on-exit                         \
+       stackprotector-all              \
+       timerfd                         \
+       libunwind-debug-frame           \
+       bionic
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
@@ -160,17 +200,6 @@ endef
 
 $(foreach feat,$(CORE_FEATURE_TESTS),$(call set_test_all_flags,$(feat)))
 
-#
-# So here we detect whether test-all was rebuilt, to be able
-# to skip the print-out of the long features list if the file
-# existed before and after it was built:
-#
-ifeq ($(wildcard $(OUTPUT)config/feature-checks/test-all.bin),)
-  test-all-failed := 1
-else
-  test-all-failed := 0
-endif
-
 #
 # Special fast-path for the 'all features are available' case:
 #
@@ -180,15 +209,6 @@ $(call feature_check,all,$(MSG))
 # Just in case the build freshly failed, make sure we print the
 # feature matrix:
 #
-ifeq ($(feature-all), 0)
-  test-all-failed := 1
-endif
-
-ifeq ($(test-all-failed),1)
-  $(info )
-  $(info Auto-detecting system features:)
-endif
-
 ifeq ($(feature-all), 1)
   #
   # test-all.c passed - just set all the core feature flags to 1:
@@ -199,27 +219,6 @@ else
   $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_check,$(feat)))
 endif
 
-#
-# Print the result of the feature test:
-#
-feature_print = $(eval $(feature_print_code)) $(info $(MSG))
-
-define feature_print_code
-  ifeq ($(feature-$(1)), 1)
-    MSG = $(shell printf '...%30s: [ \033[32mon\033[m  ]' $(1))
-  else
-    MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
-  endif
-endef
-
-#
-# Only print out our features if we rebuilt the testcases or if a test failed:
-#
-ifeq ($(test-all-failed), 1)
-  $(foreach feat,$(CORE_FEATURE_TESTS),$(call feature_print,$(feat)))
-  $(info )
-endif
-
 ifeq ($(feature-stackprotector-all), 1)
   CFLAGS += -fstack-protector-all
 endif
@@ -264,6 +263,7 @@ ifdef NO_LIBELF
   NO_DWARF := 1
   NO_DEMANGLE := 1
   NO_LIBUNWIND := 1
+  NO_LIBDW_DWARF_UNWIND := 1
 else
   ifeq ($(feature-libelf), 0)
     ifeq ($(feature-glibc), 1)
@@ -282,13 +282,12 @@ else
       msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
     endif
   else
-    # for linking with debug library, run like:
-    # make DEBUG=1 LIBDW_DIR=/opt/libdw/
-    ifdef LIBDW_DIR
-      LIBDW_CFLAGS  := -I$(LIBDW_DIR)/include
-      LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
+    ifndef NO_LIBDW_DWARF_UNWIND
+      ifneq ($(feature-libdw-dwarf-unwind),1)
+        NO_LIBDW_DWARF_UNWIND := 1
+        msg := $(warning No libdw DWARF unwind found, Please install elfutils-devel/libdw-dev >= 0.158 and/or set LIBDW_DIR);
+      endif
     endif
-
     ifneq ($(feature-dwarf), 1)
       msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
       NO_DWARF := 1
@@ -324,25 +323,51 @@ endif # NO_LIBELF
 
 ifndef NO_LIBUNWIND
   ifneq ($(feature-libunwind), 1)
-    msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
+    msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR);
     NO_LIBUNWIND := 1
+  endif
+endif
+
+dwarf-post-unwind := 1
+dwarf-post-unwind-text := BUG
+
+# setup DWARF post unwinder
+ifdef NO_LIBUNWIND
+  ifdef NO_LIBDW_DWARF_UNWIND
+    msg := $(warning Disabling post unwind, no support found.);
+    dwarf-post-unwind := 0
   else
-    ifeq ($(ARCH),arm)
-      $(call feature_check,libunwind-debug-frame)
-      ifneq ($(feature-libunwind-debug-frame), 1)
-        msg := $(warning No debug_frame support found in libunwind);
-        CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
-      endif
-    else
-      # non-ARM has no dwarf_find_debug_frame() function:
+    dwarf-post-unwind-text := libdw
+  endif
+else
+  dwarf-post-unwind-text := libunwind
+  # Enable libunwind support by default.
+  ifndef NO_LIBDW_DWARF_UNWIND
+    NO_LIBDW_DWARF_UNWIND := 1
+  endif
+endif
+
+ifeq ($(dwarf-post-unwind),1)
+  CFLAGS += -DHAVE_DWARF_UNWIND_SUPPORT
+else
+  NO_DWARF_UNWIND := 1
+endif
+
+ifndef NO_LIBUNWIND
+  ifeq ($(ARCH),arm)
+    $(call feature_check,libunwind-debug-frame)
+    ifneq ($(feature-libunwind-debug-frame), 1)
+      msg := $(warning No debug_frame support found in libunwind);
       CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
     endif
-
-    CFLAGS += -DHAVE_LIBUNWIND_SUPPORT
-    EXTLIBS += $(LIBUNWIND_LIBS)
-    CFLAGS += $(LIBUNWIND_CFLAGS)
-    LDFLAGS += $(LIBUNWIND_LDFLAGS)
-  endif # ifneq ($(feature-libunwind), 1)
+  else
+    # non-ARM has no dwarf_find_debug_frame() function:
+    CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+  endif
+  CFLAGS  += -DHAVE_LIBUNWIND_SUPPORT
+  EXTLIBS += $(LIBUNWIND_LIBS)
+  CFLAGS  += $(LIBUNWIND_CFLAGS)
+  LDFLAGS += $(LIBUNWIND_LDFLAGS)
 endif
 
 ifndef NO_LIBAUDIT
@@ -602,3 +627,84 @@ ifdef DESTDIR
 plugindir=$(libdir)/traceevent/plugins
 plugindir_SQ= $(subst ','\'',$(plugindir))
 endif
+
+#
+# Print the result of the feature test:
+#
+feature_print_status = $(eval $(feature_print_status_code)) $(info $(MSG))
+
+define feature_print_status_code
+  ifeq ($(feature-$(1)), 1)
+    MSG = $(shell printf '...%30s: [ \033[32mon\033[m  ]' $(1))
+  else
+    MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
+  endif
+endef
+
+feature_print_var = $(eval $(feature_print_var_code)) $(info $(MSG))
+define feature_print_var_code
+    MSG = $(shell printf '...%30s: %s' $(1) $($(1)))
+endef
+
+feature_print_text = $(eval $(feature_print_text_code)) $(info $(MSG))
+define feature_print_text_code
+    MSG = $(shell printf '...%30s: %s' $(1) $(2))
+endef
+
+PERF_FEATURES := $(foreach feat,$(LIB_FEATURE_TESTS),feature-$(feat)($(feature-$(feat))))
+PERF_FEATURES_FILE := $(shell touch $(OUTPUT)PERF-FEATURES; cat $(OUTPUT)PERF-FEATURES)
+
+ifeq ($(dwarf-post-unwind),1)
+  PERF_FEATURES += dwarf-post-unwind($(dwarf-post-unwind-text))
+endif
+
+# The $(display_lib) controls the default detection message
+# output. It's set if:
+# - detected features differes from stored features from
+#   last build (in PERF-FEATURES file)
+# - one of the $(LIB_FEATURE_TESTS) is not detected
+# - VF is enabled
+
+ifneq ("$(PERF_FEATURES)","$(PERF_FEATURES_FILE)")
+  $(shell echo "$(PERF_FEATURES)" > $(OUTPUT)PERF-FEATURES)
+  display_lib := 1
+endif
+
+feature_check = $(eval $(feature_check_code))
+define feature_check_code
+  ifneq ($(feature-$(1)), 1)
+    display_lib := 1
+  endif
+endef
+
+$(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_check,$(feat)))
+
+ifeq ($(VF),1)
+  display_lib := 1
+  display_vf := 1
+endif
+
+ifeq ($(display_lib),1)
+  $(info )
+  $(info Auto-detecting system features:)
+  $(foreach feat,$(LIB_FEATURE_TESTS),$(call feature_print_status,$(feat),))
+
+  ifeq ($(dwarf-post-unwind),1)
+    $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
+  endif
+endif
+
+ifeq ($(display_vf),1)
+  $(foreach feat,$(VF_FEATURE_TESTS),$(call feature_print_status,$(feat),))
+  $(info )
+  $(call feature_print_var,prefix)
+  $(call feature_print_var,bindir)
+  $(call feature_print_var,libdir)
+  $(call feature_print_var,sysconfdir)
+  $(call feature_print_var,LIBUNWIND_DIR)
+  $(call feature_print_var,LIBDW_DIR)
+endif
+
+ifeq ($(display_lib),1)
+  $(info )
+endif
index 523b7bc1055321051d62562f1f490165ea2872b5..2da103c53f892762b0ad634e1d60669a7c4b5b8a 100644 (file)
@@ -26,7 +26,8 @@ FILES=                                        \
        test-libunwind-debug-frame.bin  \
        test-on-exit.bin                \
        test-stackprotector-all.bin     \
-       test-timerfd.bin
+       test-timerfd.bin                \
+       test-libdw-dwarf-unwind.bin
 
 CC := $(CROSS_COMPILE)gcc -MD
 PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -141,6 +142,9 @@ test-backtrace.bin:
 test-timerfd.bin:
        $(BUILD)
 
+test-libdw-dwarf-unwind.bin:
+       $(BUILD)
+
 -include *.d
 
 ###############################
index 9b8a544155bbdf2a628f21f400dda8e65f9d51a7..fc37eb3ca17b9121c60ae47133b540896700756c 100644 (file)
 # include "test-stackprotector-all.c"
 #undef main
 
+#define main main_test_libdw_dwarf_unwind
+# include "test-libdw-dwarf-unwind.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -111,6 +115,7 @@ int main(int argc, char *argv[])
        main_test_libnuma();
        main_test_timerfd();
        main_test_stackprotector_all();
+       main_test_libdw_dwarf_unwind();
 
        return 0;
 }
diff --git a/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c b/tools/perf/config/feature-checks/test-libdw-dwarf-unwind.c
new file mode 100644 (file)
index 0000000..f676a3f
--- /dev/null
@@ -0,0 +1,13 @@
+
+#include <elfutils/libdwfl.h>
+
+int main(void)
+{
+       /*
+        * This function is guarded via: __nonnull_attribute__ (1, 2).
+        * Passing '1' as arguments value. This code is never executed,
+        * only compiled.
+        */
+       dwfl_thread_getframes((void *) 1, (void *) 1, NULL);
+       return 0;
+}
index 63a0e6f04a01191fb6e90f7e220280edfa2d4cea..a28dca2582aa63f38fb2997235ab1818a9a45e42 100644 (file)
@@ -18,7 +18,7 @@ underlying hardware counters.
 Performance counters are accessed via special file descriptors.
 There's one file descriptor per virtual counter used.
 
-The special file descriptor is opened via the perf_event_open()
+The special file descriptor is opened via the sys_perf_event_open()
 system call:
 
    int sys_perf_event_open(struct perf_event_attr *hw_event_uptr,
@@ -82,7 +82,7 @@ machine-specific.
 If 'raw_type' is 0, then the 'type' field says what kind of counter
 this is, with the following encoding:
 
-enum perf_event_types {
+enum perf_type_id {
        PERF_TYPE_HARDWARE              = 0,
        PERF_TYPE_SOFTWARE              = 1,
        PERF_TYPE_TRACEPOINT            = 2,
@@ -95,7 +95,7 @@ specified by 'event_id':
  * Generalized performance counter event types, used by the hw_event.event_id
  * parameter of the sys_perf_event_open() syscall:
  */
-enum hw_event_ids {
+enum perf_hw_id {
        /*
         * Common hardware events, generalized by the kernel:
         */
@@ -129,7 +129,7 @@ software events, selected by 'event_id':
  * physical and sw events of the kernel (and allow the profiling of them as
  * well):
  */
-enum sw_event_ids {
+enum perf_sw_ids {
        PERF_COUNT_SW_CPU_CLOCK         = 0,
        PERF_COUNT_SW_TASK_CLOCK        = 1,
        PERF_COUNT_SW_PAGE_FAULTS       = 2,
@@ -230,7 +230,7 @@ these events are recorded in the ring-buffer (see below).
 The 'comm' bit allows tracking of process comm data on process creation.
 This too is recorded in the ring-buffer (see below).
 
-The 'pid' parameter to the perf_event_open() system call allows the
+The 'pid' parameter to the sys_perf_event_open() system call allows the
 counter to be specific to a task:
 
  pid == 0: if the pid parameter is zero, the counter is attached to the
@@ -260,7 +260,7 @@ The 'flags' parameter is currently unused and must be zero.
 
 The 'group_fd' parameter allows counter "groups" to be set up.  A
 counter group has one counter which is the group "leader".  The leader
-is created first, with group_fd = -1 in the perf_event_open call
+is created first, with group_fd = -1 in the sys_perf_event_open call
 that creates it.  The rest of the group members are created
 subsequently, with group_fd giving the fd of the group leader.
 (A single counter on its own is created with group_fd = -1 and is
index 496e2abb54824e3fbcc937e3cfc267ea3bfa82b0..ae3a57694b6bda9937146c25e291ab9a80929ce7 100644 (file)
@@ -123,7 +123,7 @@ __perf_main ()
                __perfcomp_colon "$evts" "$cur"
        # List subcommands for 'perf kvm'
        elif [[ $prev == "kvm" ]]; then
-               subcmds="top record report diff buildid-list stat"
+               subcmds=$($cmd $prev --list-cmds)
                __perfcomp_colon "$subcmds" "$cur"
        # List long option names
        elif [[ $cur == --* ]];  then
index e84fa26bc1bec472e65696b1649656bb4cae3af4..e18a8b5e69531cca4e1fbf889098d779ff14cf4d 100644 (file)
@@ -12,6 +12,9 @@
 #ifndef __NR_perf_event_open
 # define __NR_perf_event_open 336
 #endif
+#ifndef __NR_futex
+# define __NR_futex 240
+#endif
 #endif
 
 #if defined(__x86_64__)
@@ -23,6 +26,9 @@
 #ifndef __NR_perf_event_open
 # define __NR_perf_event_open 298
 #endif
+#ifndef __NR_futex
+# define __NR_futex 202
+#endif
 #endif
 
 #ifdef __powerpc__
@@ -251,12 +257,14 @@ void pthread__unblock_sigwinch(void);
 enum perf_call_graph_mode {
        CALLCHAIN_NONE,
        CALLCHAIN_FP,
-       CALLCHAIN_DWARF
+       CALLCHAIN_DWARF,
+       CALLCHAIN_MAX
 };
 
 struct record_opts {
        struct target target;
        int          call_graph;
+       bool         call_graph_enabled;
        bool         group;
        bool         inherit_stat;
        bool         no_buffering;
index 1e67437fb4cae5c770087432d248591b05baa033..b11bf8a08430b1c37ea4ac0f70ba2f418986508d 100644 (file)
@@ -115,6 +115,14 @@ static struct test {
                .desc = "Test parsing with no sample_id_all bit set",
                .func = test__parse_no_sample_id_all,
        },
+#if defined(__x86_64__) || defined(__i386__)
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+       {
+               .desc = "Test dwarf unwind",
+               .func = test__dwarf_unwind,
+       },
+#endif
+#endif
        {
                .func = NULL,
        },
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
new file mode 100644 (file)
index 0000000..c059ee8
--- /dev/null
@@ -0,0 +1,144 @@
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include "tests.h"
+#include "debug.h"
+#include "machine.h"
+#include "event.h"
+#include "unwind.h"
+#include "perf_regs.h"
+#include "map.h"
+#include "thread.h"
+
+static int mmap_handler(struct perf_tool *tool __maybe_unused,
+                       union perf_event *event,
+                       struct perf_sample *sample __maybe_unused,
+                       struct machine *machine)
+{
+       return machine__process_mmap_event(machine, event, NULL);
+}
+
+static int init_live_machine(struct machine *machine)
+{
+       union perf_event event;
+       pid_t pid = getpid();
+
+       return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
+                                                 mmap_handler, machine, true);
+}
+
+#define MAX_STACK 6
+
+static int unwind_entry(struct unwind_entry *entry, void *arg)
+{
+       unsigned long *cnt = (unsigned long *) arg;
+       char *symbol = entry->sym ? entry->sym->name : NULL;
+       static const char *funcs[MAX_STACK] = {
+               "test__arch_unwind_sample",
+               "unwind_thread",
+               "krava_3",
+               "krava_2",
+               "krava_1",
+               "test__dwarf_unwind"
+       };
+
+       if (*cnt >= MAX_STACK) {
+               pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
+               return -1;
+       }
+
+       if (!symbol) {
+               pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
+                        entry->ip);
+               return -1;
+       }
+
+       pr_debug("got: %s 0x%" PRIx64 "\n", symbol, entry->ip);
+       return strcmp((const char *) symbol, funcs[(*cnt)++]);
+}
+
+__attribute__ ((noinline))
+static int unwind_thread(struct thread *thread, struct machine *machine)
+{
+       struct perf_sample sample;
+       unsigned long cnt = 0;
+       int err = -1;
+
+       memset(&sample, 0, sizeof(sample));
+
+       if (test__arch_unwind_sample(&sample, thread)) {
+               pr_debug("failed to get unwind sample\n");
+               goto out;
+       }
+
+       err = unwind__get_entries(unwind_entry, &cnt, machine, thread,
+                                 &sample, MAX_STACK);
+       if (err)
+               pr_debug("unwind failed\n");
+       else if (cnt != MAX_STACK) {
+               pr_debug("got wrong number of stack entries %lu != %d\n",
+                        cnt, MAX_STACK);
+               err = -1;
+       }
+
+ out:
+       free(sample.user_stack.data);
+       free(sample.user_regs.regs);
+       return err;
+}
+
+__attribute__ ((noinline))
+static int krava_3(struct thread *thread, struct machine *machine)
+{
+       return unwind_thread(thread, machine);
+}
+
+__attribute__ ((noinline))
+static int krava_2(struct thread *thread, struct machine *machine)
+{
+       return krava_3(thread, machine);
+}
+
+__attribute__ ((noinline))
+static int krava_1(struct thread *thread, struct machine *machine)
+{
+       return krava_2(thread, machine);
+}
+
+int test__dwarf_unwind(void)
+{
+       struct machines machines;
+       struct machine *machine;
+       struct thread *thread;
+       int err = -1;
+
+       machines__init(&machines);
+
+       machine = machines__find(&machines, HOST_KERNEL_ID);
+       if (!machine) {
+               pr_err("Could not get machine\n");
+               return -1;
+       }
+
+       if (init_live_machine(machine)) {
+               pr_err("Could not init machine\n");
+               goto out;
+       }
+
+       if (verbose > 1)
+               machine__fprintf(machine, stderr);
+
+       thread = machine__find_thread(machine, getpid(), getpid());
+       if (!thread) {
+               pr_err("Could not get thread\n");
+               goto out;
+       }
+
+       err = krava_1(thread, machine);
+
+ out:
+       machine__delete_threads(machine);
+       machine__exit(machine);
+       machines__exit(&machines);
+       return err;
+}
index 2b6519e0e36f09bdbdeac4865fed851f82a3a1ac..7ccbc7b6ae775e0c2793e77706a0acfaf86f5a27 100644 (file)
@@ -101,6 +101,7 @@ static struct machine *setup_fake_machine(struct machines *machines)
                        .mmap = {
                                .header = { .misc = PERF_RECORD_MISC_USER, },
                                .pid = fake_mmap_info[i].pid,
+                               .tid = fake_mmap_info[i].pid,
                                .start = fake_mmap_info[i].start,
                                .len = 0x1000ULL,
                                .pgoff = 0ULL,
index 00544b8b644b3bf28c97e392955bfcdb36a509fb..5daeae1cb4c01b3a87f4c54ee4018199c12aeeb6 100644 (file)
@@ -27,6 +27,7 @@ make_no_ui          := NO_NEWT=1 NO_SLANG=1 NO_GTK2=1
 make_no_demangle    := NO_DEMANGLE=1
 make_no_libelf      := NO_LIBELF=1
 make_no_libunwind   := NO_LIBUNWIND=1
+make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1
 make_no_backtrace   := NO_BACKTRACE=1
 make_no_libnuma     := NO_LIBNUMA=1
 make_no_libaudit    := NO_LIBAUDIT=1
@@ -35,8 +36,9 @@ make_tags           := tags
 make_cscope         := cscope
 make_help           := help
 make_doc            := doc
-make_perf_o         := perf.o
-make_util_map_o     := util/map.o
+make_perf_o           := perf.o
+make_util_map_o       := util/map.o
+make_util_pmu_bison_o := util/pmu-bison.o
 make_install        := install
 make_install_bin    := install-bin
 make_install_doc    := install-doc
@@ -49,6 +51,7 @@ make_install_pdf    := install-pdf
 make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
 make_minimal        += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
 make_minimal        += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
+make_minimal        += NO_LIBDW_DWARF_UNWIND=1
 
 # $(run) contains all available tests
 run := make_pure
@@ -65,6 +68,7 @@ run += make_no_ui
 run += make_no_demangle
 run += make_no_libelf
 run += make_no_libunwind
+run += make_no_libdw_dwarf_unwind
 run += make_no_backtrace
 run += make_no_libnuma
 run += make_no_libaudit
@@ -73,6 +77,7 @@ run += make_help
 run += make_doc
 run += make_perf_o
 run += make_util_map_o
+run += make_util_pmu_bison_o
 run += make_install
 run += make_install_bin
 # FIXME 'install-*' commented out till they're fixed
@@ -113,8 +118,9 @@ test_make_doc_O  := $(test_ok)
 
 test_make_python_perf_so := test -f $(PERF)/python/perf.so
 
-test_make_perf_o     := test -f $(PERF)/perf.o
-test_make_util_map_o := test -f $(PERF)/util/map.o
+test_make_perf_o           := test -f $(PERF)/perf.o
+test_make_util_map_o       := test -f $(PERF)/util/map.o
+test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o
 
 define test_dest_files
   for file in $(1); do                         \
@@ -167,13 +173,10 @@ test_make_install_info_O := $(test_ok)
 test_make_install_pdf    := $(test_ok)
 test_make_install_pdf_O  := $(test_ok)
 
-# Kbuild tests only
-#test_make_python_perf_so_O := test -f $$TMP/tools/perf/python/perf.so
-#test_make_perf_o_O         := test -f $$TMP/tools/perf/perf.o
-#test_make_util_map_o_O     := test -f $$TMP/tools/perf/util/map.o
-
-test_make_perf_o_O     := true
-test_make_util_map_o_O := true
+test_make_python_perf_so_O    := test -f $$TMP_O/python/perf.so
+test_make_perf_o_O            := test -f $$TMP_O/perf.o
+test_make_util_map_o_O        := test -f $$TMP_O/util/map.o
+test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o
 
 test_default = test -x $(PERF)/perf
 test = $(if $(test_$1),$(test_$1),$(test_default))
index 4db0ae617d7080cd7c97467a15fbe36d9bcedf30..8605ff5572aeeb402ae8b9c118e5f87439bc70f7 100644 (file)
@@ -2,7 +2,7 @@
 #include "parse-events.h"
 #include "evsel.h"
 #include "evlist.h"
-#include "fs.h"
+#include <api/fs/fs.h>
 #include <api/fs/debugfs.h>
 #include "tests.h"
 #include <linux/hw_breakpoint.h>
index 1b677202638d087b94903f3f395d9fab8ae15bc7..0014d3c8c21cd47d2868cac66bc65253913e888e 100644 (file)
@@ -22,8 +22,8 @@
 } while (0)
 
 static bool samples_same(const struct perf_sample *s1,
-                        const struct perf_sample *s2, u64 type, u64 regs_user,
-                        u64 read_format)
+                        const struct perf_sample *s2,
+                        u64 type, u64 read_format)
 {
        size_t i;
 
@@ -95,8 +95,9 @@ static bool samples_same(const struct perf_sample *s1,
        }
 
        if (type & PERF_SAMPLE_REGS_USER) {
-               size_t sz = hweight_long(regs_user) * sizeof(u64);
+               size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
 
+               COMP(user_regs.mask);
                COMP(user_regs.abi);
                if (s1->user_regs.abi &&
                    (!s1->user_regs.regs || !s2->user_regs.regs ||
@@ -174,6 +175,7 @@ static int do_test(u64 sample_type, u64 sample_regs_user, u64 read_format)
                .branch_stack   = &branch_stack.branch_stack,
                .user_regs      = {
                        .abi    = PERF_SAMPLE_REGS_ABI_64,
+                       .mask   = sample_regs_user,
                        .regs   = user_regs,
                },
                .user_stack     = {
@@ -201,8 +203,7 @@ static int do_test(u64 sample_type, u64 sample_regs_user, u64 read_format)
                sample.read.one.id    = 99;
        }
 
-       sz = perf_event__sample_event_size(&sample, sample_type,
-                                          sample_regs_user, read_format);
+       sz = perf_event__sample_event_size(&sample, sample_type, read_format);
        bufsz = sz + 4096; /* Add a bit for overrun checking */
        event = malloc(bufsz);
        if (!event) {
@@ -215,8 +216,7 @@ static int do_test(u64 sample_type, u64 sample_regs_user, u64 read_format)
        event->header.misc = 0;
        event->header.size = sz;
 
-       err = perf_event__synthesize_sample(event, sample_type,
-                                           sample_regs_user, read_format,
+       err = perf_event__synthesize_sample(event, sample_type, read_format,
                                            &sample, false);
        if (err) {
                pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
@@ -244,8 +244,7 @@ static int do_test(u64 sample_type, u64 sample_regs_user, u64 read_format)
                goto out_free;
        }
 
-       if (!samples_same(&sample, &sample_out, sample_type,
-                         sample_regs_user, read_format)) {
+       if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
                pr_debug("parsing failed for sample_type %#"PRIx64"\n",
                         sample_type);
                goto out_free;
index e0ac713857ba5708fee69a8b1000cd81d9ae0a76..a24795ca002db3a25742ee324fca6bc6d74e8e14 100644 (file)
@@ -40,5 +40,14 @@ int test__code_reading(void);
 int test__sample_parsing(void);
 int test__keep_tracking(void);
 int test__parse_no_sample_id_all(void);
+int test__dwarf_unwind(void);
 
+#if defined(__x86_64__) || defined(__i386__)
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+struct thread;
+struct perf_sample;
+int test__arch_unwind_sample(struct perf_sample *sample,
+                            struct thread *thread);
+#endif
+#endif
 #endif /* TESTS_H */
index b720b92eba6eb95cc7dd9bd4a57693f0a79d91b9..7ec871af3f6f8c5cc4796f40c14c801aa3ba3943 100644 (file)
@@ -587,95 +587,52 @@ struct hpp_arg {
        bool current_entry;
 };
 
-static int __hpp__color_callchain(struct hpp_arg *arg)
+static int __hpp__overhead_callback(struct perf_hpp *hpp, bool front)
 {
-       if (!symbol_conf.use_callchain)
-               return 0;
-
-       slsmg_printf("%c ", arg->folded_sign);
-       return 2;
-}
-
-static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
-                           u64 (*get_field)(struct hist_entry *),
-                           int (*callchain_cb)(struct hpp_arg *))
-{
-       int ret = 0;
-       double percent = 0.0;
-       struct hists *hists = he->hists;
        struct hpp_arg *arg = hpp->ptr;
 
-       if (hists->stats.total_period)
-               percent = 100.0 * get_field(he) / hists->stats.total_period;
-
-       ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
-
-       if (callchain_cb)
-               ret += callchain_cb(arg);
-
-       ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
-       slsmg_printf("%s", hpp->buf);
-
-       if (symbol_conf.event_group) {
-               int prev_idx, idx_delta;
-               struct perf_evsel *evsel = hists_to_evsel(hists);
-               struct hist_entry *pair;
-               int nr_members = evsel->nr_members;
-
-               if (nr_members <= 1)
-                       goto out;
+       if (arg->current_entry && arg->b->navkeypressed)
+               ui_browser__set_color(arg->b, HE_COLORSET_SELECTED);
+       else
+               ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
 
-               prev_idx = perf_evsel__group_idx(evsel);
+       if (front) {
+               if (!symbol_conf.use_callchain)
+                       return 0;
 
-               list_for_each_entry(pair, &he->pairs.head, pairs.node) {
-                       u64 period = get_field(pair);
-                       u64 total = pair->hists->stats.total_period;
+               slsmg_printf("%c ", arg->folded_sign);
+               return 2;
+       }
 
-                       if (!total)
-                               continue;
+       return 0;
+}
 
-                       evsel = hists_to_evsel(pair->hists);
-                       idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
+static int __hpp__color_callback(struct perf_hpp *hpp, bool front __maybe_unused)
+{
+       struct hpp_arg *arg = hpp->ptr;
 
-                       while (idx_delta--) {
-                               /*
-                                * zero-fill group members in the middle which
-                                * have no sample
-                                */
-                               ui_browser__set_percent_color(arg->b, 0.0,
-                                                       arg->current_entry);
-                               ret += scnprintf(hpp->buf, hpp->size,
-                                                " %6.2f%%", 0.0);
-                               slsmg_printf("%s", hpp->buf);
-                       }
+       if (!arg->current_entry || !arg->b->navkeypressed)
+               ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
+       return 0;
+}
 
-                       percent = 100.0 * period / total;
-                       ui_browser__set_percent_color(arg->b, percent,
-                                                     arg->current_entry);
-                       ret += scnprintf(hpp->buf, hpp->size,
-                                        " %6.2f%%", percent);
-                       slsmg_printf("%s", hpp->buf);
+static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
+{
+       struct hpp_arg *arg = hpp->ptr;
+       int ret;
+       va_list args;
+       double percent;
 
-                       prev_idx = perf_evsel__group_idx(evsel);
-               }
+       va_start(args, fmt);
+       percent = va_arg(args, double);
+       va_end(args);
 
-               idx_delta = nr_members - prev_idx - 1;
+       ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
 
-               while (idx_delta--) {
-                       /*
-                        * zero-fill group members at last which have no sample
-                        */
-                       ui_browser__set_percent_color(arg->b, 0.0,
-                                                     arg->current_entry);
-                       ret += scnprintf(hpp->buf, hpp->size,
-                                        " %6.2f%%", 0.0);
-                       slsmg_printf("%s", hpp->buf);
-               }
-       }
-out:
-       if (!arg->current_entry || !arg->b->navkeypressed)
-               ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
+       ret = scnprintf(hpp->buf, hpp->size, fmt, percent);
+       slsmg_printf("%s", hpp->buf);
 
+       advance_hpp(hpp, ret);
        return ret;
 }
 
@@ -690,14 +647,15 @@ hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\
                                struct perf_hpp *hpp,                   \
                                struct hist_entry *he)                  \
 {                                                                      \
-       return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb);      \
+       return __hpp__fmt(hpp, he, __hpp_get_##_field, _cb, " %6.2f%%", \
+                         __hpp__slsmg_color_printf, true);             \
 }
 
-__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain)
-__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL)
-__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL)
-__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL)
-__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
+__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__overhead_callback)
+__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, __hpp__color_callback)
+__HPP_COLOR_PERCENT_FN(overhead_us, period_us, __hpp__color_callback)
+__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, __hpp__color_callback)
+__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, __hpp__color_callback)
 
 #undef __HPP_COLOR_PERCENT_FN
 
index 5b95c44f34354215ba597c19d736d3e63c559d1e..e395ef9b0ae00494187c089fde8c6b6a7a5259c4 100644 (file)
@@ -8,16 +8,24 @@
 
 #define MAX_COLUMNS                    32
 
-static int __percent_color_snprintf(char *buf, size_t size, double percent)
+static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...)
 {
        int ret = 0;
+       va_list args;
+       double percent;
        const char *markup;
+       char *buf = hpp->buf;
+       size_t size = hpp->size;
+
+       va_start(args, fmt);
+       percent = va_arg(args, double);
+       va_end(args);
 
        markup = perf_gtk__get_percent_color(percent);
        if (markup)
                ret += scnprintf(buf, size, markup);
 
-       ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent);
+       ret += scnprintf(buf + ret, size - ret, fmt, percent);
 
        if (markup)
                ret += scnprintf(buf + ret, size - ret, "</span>");
@@ -25,66 +33,6 @@ static int __percent_color_snprintf(char *buf, size_t size, double percent)
        return ret;
 }
 
-
-static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
-                           u64 (*get_field)(struct hist_entry *))
-{
-       int ret;
-       double percent = 0.0;
-       struct hists *hists = he->hists;
-       struct perf_evsel *evsel = hists_to_evsel(hists);
-
-       if (hists->stats.total_period)
-               percent = 100.0 * get_field(he) / hists->stats.total_period;
-
-       ret = __percent_color_snprintf(hpp->buf, hpp->size, percent);
-
-       if (perf_evsel__is_group_event(evsel)) {
-               int prev_idx, idx_delta;
-               struct hist_entry *pair;
-               int nr_members = evsel->nr_members;
-
-               prev_idx = perf_evsel__group_idx(evsel);
-
-               list_for_each_entry(pair, &he->pairs.head, pairs.node) {
-                       u64 period = get_field(pair);
-                       u64 total = pair->hists->stats.total_period;
-
-                       evsel = hists_to_evsel(pair->hists);
-                       idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
-
-                       while (idx_delta--) {
-                               /*
-                                * zero-fill group members in the middle which
-                                * have no sample
-                                */
-                               ret += __percent_color_snprintf(hpp->buf + ret,
-                                                               hpp->size - ret,
-                                                               0.0);
-                       }
-
-                       percent = 100.0 * period / total;
-                       ret += __percent_color_snprintf(hpp->buf + ret,
-                                                       hpp->size - ret,
-                                                       percent);
-
-                       prev_idx = perf_evsel__group_idx(evsel);
-               }
-
-               idx_delta = nr_members - prev_idx - 1;
-
-               while (idx_delta--) {
-                       /*
-                        * zero-fill group members at last which have no sample
-                        */
-                       ret += __percent_color_snprintf(hpp->buf + ret,
-                                                       hpp->size - ret,
-                                                       0.0);
-               }
-       }
-       return ret;
-}
-
 #define __HPP_COLOR_PERCENT_FN(_type, _field)                                  \
 static u64 he_get_##_field(struct hist_entry *he)                              \
 {                                                                              \
@@ -95,7 +43,8 @@ static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,
                                       struct perf_hpp *hpp,                    \
                                       struct hist_entry *he)                   \
 {                                                                              \
-       return __hpp__color_fmt(hpp, he, he_get_##_field);                      \
+       return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%",           \
+                         __percent_color_snprintf, true);                      \
 }
 
 __HPP_COLOR_PERCENT_FN(overhead, period)
@@ -216,7 +165,6 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
        struct perf_hpp hpp = {
                .buf            = s,
                .size           = sizeof(s),
-               .ptr            = hists_to_evsel(hists),
        };
 
        nr_cols = 0;
@@ -243,7 +191,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
        col_idx = 0;
 
        perf_hpp__for_each_format(fmt) {
-               fmt->header(fmt, &hpp);
+               fmt->header(fmt, &hpp, hists_to_evsel(hists));
 
                gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
                                                            -1, ltrim(s),
index 78f4c92e9b73c1c55e5699bce4b628f551348beb..0f403b83e9d1c1da19b7be850c7c49ffba0b074d 100644 (file)
@@ -8,16 +8,27 @@
 
 /* hist period print (hpp) functions */
 
-typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...);
-
-static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
-                     u64 (*get_field)(struct hist_entry *),
-                     const char *fmt, hpp_snprint_fn print_fn,
-                     bool fmt_percent)
+#define hpp__call_print_fn(hpp, fn, fmt, ...)                  \
+({                                                             \
+       int __ret = fn(hpp, fmt, ##__VA_ARGS__);                \
+       advance_hpp(hpp, __ret);                                \
+       __ret;                                                  \
+})
+
+int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+              hpp_field_fn get_field, hpp_callback_fn callback,
+              const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent)
 {
-       int ret;
+       int ret = 0;
        struct hists *hists = he->hists;
        struct perf_evsel *evsel = hists_to_evsel(hists);
+       char *buf = hpp->buf;
+       size_t size = hpp->size;
+
+       if (callback) {
+               ret = callback(hpp, true);
+               advance_hpp(hpp, ret);
+       }
 
        if (fmt_percent) {
                double percent = 0.0;
@@ -26,9 +37,9 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
                        percent = 100.0 * get_field(he) /
                                  hists->stats.total_period;
 
-               ret = print_fn(hpp->buf, hpp->size, fmt, percent);
+               ret += hpp__call_print_fn(hpp, print_fn, fmt, percent);
        } else
-               ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he));
+               ret += hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
 
        if (perf_evsel__is_group_event(evsel)) {
                int prev_idx, idx_delta;
@@ -52,16 +63,22 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
                                 * zero-fill group members in the middle which
                                 * have no sample
                                 */
-                               ret += print_fn(hpp->buf + ret, hpp->size - ret,
-                                               fmt, 0);
+                               if (fmt_percent) {
+                                       ret += hpp__call_print_fn(hpp, print_fn,
+                                                                 fmt, 0.0);
+                               } else {
+                                       ret += hpp__call_print_fn(hpp, print_fn,
+                                                                 fmt, 0ULL);
+                               }
                        }
 
-                       if (fmt_percent)
-                               ret += print_fn(hpp->buf + ret, hpp->size - ret,
-                                               fmt, 100.0 * period / total);
-                       else
-                               ret += print_fn(hpp->buf + ret, hpp->size - ret,
-                                               fmt, period);
+                       if (fmt_percent) {
+                               ret += hpp__call_print_fn(hpp, print_fn, fmt,
+                                                         100.0 * period / total);
+                       } else {
+                               ret += hpp__call_print_fn(hpp, print_fn, fmt,
+                                                         period);
+                       }
 
                        prev_idx = perf_evsel__group_idx(evsel);
                }
@@ -72,41 +89,87 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
                        /*
                         * zero-fill group members at last which have no sample
                         */
-                       ret += print_fn(hpp->buf + ret, hpp->size - ret,
-                                       fmt, 0);
+                       if (fmt_percent) {
+                               ret += hpp__call_print_fn(hpp, print_fn,
+                                                         fmt, 0.0);
+                       } else {
+                               ret += hpp__call_print_fn(hpp, print_fn,
+                                                         fmt, 0ULL);
+                       }
                }
        }
+
+       if (callback) {
+               int __ret = callback(hpp, false);
+
+               advance_hpp(hpp, __ret);
+               ret += __ret;
+       }
+
+       /*
+        * Restore original buf and size as it's where caller expects
+        * the result will be saved.
+        */
+       hpp->buf = buf;
+       hpp->size = size;
+
        return ret;
 }
 
 #define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width)          \
 static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused,        \
-                              struct perf_hpp *hpp)                    \
+                              struct perf_hpp *hpp,                    \
+                              struct perf_evsel *evsel)                \
 {                                                                      \
        int len = _min_width;                                           \
                                                                        \
-       if (symbol_conf.event_group) {                                  \
-               struct perf_evsel *evsel = hpp->ptr;                    \
-                                                                       \
+       if (symbol_conf.event_group)                                    \
                len = max(len, evsel->nr_members * _unit_width);        \
-       }                                                               \
+                                                                       \
        return scnprintf(hpp->buf, hpp->size, "%*s", len, _str);        \
 }
 
 #define __HPP_WIDTH_FN(_type, _min_width, _unit_width)                         \
 static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
-                             struct perf_hpp *hpp __maybe_unused)      \
+                             struct perf_hpp *hpp __maybe_unused,      \
+                             struct perf_evsel *evsel)                 \
 {                                                                      \
        int len = _min_width;                                           \
                                                                        \
-       if (symbol_conf.event_group) {                                  \
-               struct perf_evsel *evsel = hpp->ptr;                    \
-                                                                       \
+       if (symbol_conf.event_group)                                    \
                len = max(len, evsel->nr_members * _unit_width);        \
-       }                                                               \
+                                                                       \
        return len;                                                     \
 }
 
+static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
+{
+       va_list args;
+       ssize_t ssize = hpp->size;
+       double percent;
+       int ret;
+
+       va_start(args, fmt);
+       percent = va_arg(args, double);
+       ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
+       va_end(args);
+
+       return (ret >= ssize) ? (ssize - 1) : ret;
+}
+
+static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
+{
+       va_list args;
+       ssize_t ssize = hpp->size;
+       int ret;
+
+       va_start(args, fmt);
+       ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
+       va_end(args);
+
+       return (ret >= ssize) ? (ssize - 1) : ret;
+}
+
 #define __HPP_COLOR_PERCENT_FN(_type, _field)                                  \
 static u64 he_get_##_field(struct hist_entry *he)                              \
 {                                                                              \
@@ -116,8 +179,8 @@ static u64 he_get_##_field(struct hist_entry *he)                           \
 static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,         \
                              struct perf_hpp *hpp, struct hist_entry *he)      \
 {                                                                              \
-       return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%",                 \
-                         percent_color_snprintf, true);                        \
+       return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%",           \
+                         hpp_color_scnprintf, true);                           \
 }
 
 #define __HPP_ENTRY_PERCENT_FN(_type, _field)                                  \
@@ -125,8 +188,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused,             \
                              struct perf_hpp *hpp, struct hist_entry *he)      \
 {                                                                              \
        const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%";         \
-       return __hpp__fmt(hpp, he, he_get_##_field, fmt,                        \
-                         scnprintf, true);                                     \
+       return __hpp__fmt(hpp, he, he_get_##_field, NULL, fmt,                  \
+                         hpp_entry_scnprintf, true);                           \
 }
 
 #define __HPP_ENTRY_RAW_FN(_type, _field)                                      \
@@ -139,7 +202,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused,             \
                              struct perf_hpp *hpp, struct hist_entry *he)      \
 {                                                                              \
        const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64;    \
-       return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf, false); \
+       return __hpp__fmt(hpp, he, he_get_raw_##_field, NULL, fmt,              \
+                         hpp_entry_scnprintf, false);                          \
 }
 
 #define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width)  \
@@ -263,15 +327,13 @@ unsigned int hists__sort_list_width(struct hists *hists)
        struct perf_hpp_fmt *fmt;
        struct sort_entry *se;
        int i = 0, ret = 0;
-       struct perf_hpp dummy_hpp = {
-               .ptr    = hists_to_evsel(hists),
-       };
+       struct perf_hpp dummy_hpp;
 
        perf_hpp__for_each_format(fmt) {
                if (i)
                        ret += 2;
 
-               ret += fmt->width(fmt, &dummy_hpp);
+               ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
        }
 
        list_for_each_entry(se, &hist_entry__sort_list, list)
index 831fbb77d1ff010ab786ff12ee747aaf8873bcbe..d59893edf03130549079a748ffe17440ab8231a9 100644 (file)
@@ -306,12 +306,6 @@ static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
        return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
 }
 
-static inline void advance_hpp(struct perf_hpp *hpp, int inc)
-{
-       hpp->buf  += inc;
-       hpp->size -= inc;
-}
-
 static int hist_entry__period_snprintf(struct perf_hpp *hpp,
                                       struct hist_entry *he)
 {
@@ -385,7 +379,6 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
        struct perf_hpp dummy_hpp = {
                .buf    = bf,
                .size   = sizeof(bf),
-               .ptr    = hists_to_evsel(hists),
        };
        bool first = true;
        size_t linesz;
@@ -404,7 +397,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                else
                        first = false;
 
-               fmt->header(fmt, &dummy_hpp);
+               fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
                fprintf(fp, "%s", bf);
        }
 
@@ -449,7 +442,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                else
                        first = false;
 
-               width = fmt->width(fmt, &dummy_hpp);
+               width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
                for (i = 0; i < width; i++)
                        fprintf(fp, ".");
        }
index 3aa555ff9d89e5d7ede4c6af067170197b9ce0e8..809b4c50beaed3e3a57845919b3a034fe7ab8c9e 100644 (file)
@@ -1236,6 +1236,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
        struct dso *dso = map->dso;
        char *filename;
        const char *d_filename;
+       const char *evsel_name = perf_evsel__name(evsel);
        struct annotation *notes = symbol__annotation(sym);
        struct disasm_line *pos, *queue = NULL;
        u64 start = map__rip_2objdump(map, sym->start);
@@ -1243,7 +1244,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
        int more = 0;
        u64 len;
        int width = 8;
-       int namelen;
+       int namelen, evsel_name_len, graph_dotted_len;
 
        filename = strdup(dso->long_name);
        if (!filename)
@@ -1256,14 +1257,17 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
 
        len = symbol__size(sym);
        namelen = strlen(d_filename);
+       evsel_name_len = strlen(evsel_name);
 
        if (perf_evsel__is_group_event(evsel))
                width *= evsel->nr_members;
 
-       printf(" %-*.*s|        Source code & Disassembly of %s\n",
-              width, width, "Percent", d_filename);
-       printf("-%-*.*s-------------------------------------\n",
-              width+namelen, width+namelen, graph_dotted_line);
+       printf(" %-*.*s|        Source code & Disassembly of %s for %s\n",
+              width, width, "Percent", d_filename, evsel_name);
+
+       graph_dotted_len = width + namelen + evsel_name_len;
+       printf("-%-*.*s-----------------------------------------\n",
+              graph_dotted_len, graph_dotted_len, graph_dotted_line);
 
        if (verbose)
                symbol__annotate_hits(sym, evsel);
index a9b48c42e81eb36e34cce24e091be66437270d44..7fe4994eeb638a63f7b52105ad326afa448915ab 100644 (file)
@@ -1,5 +1,5 @@
 #include "util.h"
-#include "fs.h"
+#include <api/fs/fs.h>
 #include "../perf.h"
 #include "cpumap.h"
 #include <assert.h>
index 4045d086d9d957823af08d79db4d8407fb9a460f..64453d63b971212ea3c626eb0d57fbefb62ea236 100644 (file)
@@ -45,8 +45,8 @@ int dso__read_binary_type_filename(const struct dso *dso,
                        debuglink--;
                if (*debuglink == '/')
                        debuglink++;
-               filename__read_debuglink(dso->long_name, debuglink,
-                                        size - (debuglink - filename));
+               ret = filename__read_debuglink(dso->long_name, debuglink,
+                                              size - (debuglink - filename));
                }
                break;
        case DSO_BINARY_TYPE__BUILD_ID_CACHE:
index cd7d6f078cddf618e40e709eab6c1d93750e155a..ab06f1c0365542cc35c000ba90d6879b34070ed6 100644 (file)
@@ -102,6 +102,16 @@ struct dso {
        char             name[0];
 };
 
+/* dso__for_each_symbol - iterate over the symbols of given type
+ *
+ * @dso: the 'struct dso *' in which symbols itereated
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @n: the 'struct rb_node *' to use as a temporary storage
+ * @type: the 'enum map_type' type of symbols
+ */
+#define dso__for_each_symbol(dso, pos, n, type)        \
+       symbols__for_each_entry(&(dso)->symbols[(type)], pos, n)
+
 static inline void dso__set_loaded(struct dso *dso, enum map_type type)
 {
        dso->loaded |= (1 << type);
index b0f3ca850e9e8ffbf5c1e70b9f04215e545a71ae..9d12aa6dd485336104068aef0f3c40f8436d10b2 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/types.h>
 #include "event.h"
 #include "debug.h"
+#include "hist.h"
 #include "machine.h"
 #include "sort.h"
 #include "string.h"
@@ -94,14 +95,10 @@ static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
 
 static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
                                         union perf_event *event, pid_t pid,
-                                        int full,
                                         perf_event__handler_t process,
                                         struct machine *machine)
 {
-       char filename[PATH_MAX];
        size_t size;
-       DIR *tasks;
-       struct dirent dirent, *next;
        pid_t tgid;
 
        memset(&event->comm, 0, sizeof(event->comm));
@@ -124,55 +121,35 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
        event->comm.header.size = (sizeof(event->comm) -
                                (sizeof(event->comm.comm) - size) +
                                machine->id_hdr_size);
-       if (!full) {
-               event->comm.tid = pid;
+       event->comm.tid = pid;
 
-               if (process(tool, event, &synth_sample, machine) != 0)
-                       return -1;
-
-               goto out;
-       }
-
-       if (machine__is_default_guest(machine))
-               return 0;
-
-       snprintf(filename, sizeof(filename), "%s/proc/%d/task",
-                machine->root_dir, pid);
-
-       tasks = opendir(filename);
-       if (tasks == NULL) {
-               pr_debug("couldn't open %s\n", filename);
-               return 0;
-       }
+       if (process(tool, event, &synth_sample, machine) != 0)
+               return -1;
 
-       while (!readdir_r(tasks, &dirent, &next) && next) {
-               char *end;
-               pid = strtol(dirent.d_name, &end, 10);
-               if (*end)
-                       continue;
+out:
+       return tgid;
+}
 
-               /* already have tgid; jut want to update the comm */
-               (void) perf_event__get_comm_tgid(pid, event->comm.comm,
-                                        sizeof(event->comm.comm));
+static int perf_event__synthesize_fork(struct perf_tool *tool,
+                                      union perf_event *event, pid_t pid,
+                                      pid_t tgid, perf_event__handler_t process,
+                                      struct machine *machine)
+{
+       memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
 
-               size = strlen(event->comm.comm) + 1;
-               size = PERF_ALIGN(size, sizeof(u64));
-               memset(event->comm.comm + size, 0, machine->id_hdr_size);
-               event->comm.header.size = (sizeof(event->comm) -
-                                         (sizeof(event->comm.comm) - size) +
-                                         machine->id_hdr_size);
+       /* this is really a clone event but we use fork to synthesize it */
+       event->fork.ppid = tgid;
+       event->fork.ptid = tgid;
+       event->fork.pid  = tgid;
+       event->fork.tid  = pid;
+       event->fork.header.type = PERF_RECORD_FORK;
 
-               event->comm.tid = pid;
+       event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 
-               if (process(tool, event, &synth_sample, machine) != 0) {
-                       tgid = -1;
-                       break;
-               }
-       }
+       if (process(tool, event, &synth_sample, machine) != 0)
+               return -1;
 
-       closedir(tasks);
-out:
-       return tgid;
+       return 0;
 }
 
 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
@@ -324,17 +301,71 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
 
 static int __event__synthesize_thread(union perf_event *comm_event,
                                      union perf_event *mmap_event,
+                                     union perf_event *fork_event,
                                      pid_t pid, int full,
                                          perf_event__handler_t process,
                                      struct perf_tool *tool,
                                      struct machine *machine, bool mmap_data)
 {
-       pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
+       char filename[PATH_MAX];
+       DIR *tasks;
+       struct dirent dirent, *next;
+       pid_t tgid;
+
+       /* special case: only send one comm event using passed in pid */
+       if (!full) {
+               tgid = perf_event__synthesize_comm(tool, comm_event, pid,
+                                                  process, machine);
+
+               if (tgid == -1)
+                       return -1;
+
+               return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+                                                         process, machine, mmap_data);
+       }
+
+       if (machine__is_default_guest(machine))
+               return 0;
+
+       snprintf(filename, sizeof(filename), "%s/proc/%d/task",
+                machine->root_dir, pid);
+
+       tasks = opendir(filename);
+       if (tasks == NULL) {
+               pr_debug("couldn't open %s\n", filename);
+               return 0;
+       }
+
+       while (!readdir_r(tasks, &dirent, &next) && next) {
+               char *end;
+               int rc = 0;
+               pid_t _pid;
+
+               _pid = strtol(dirent.d_name, &end, 10);
+               if (*end)
+                       continue;
+
+               tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
+                                                  process, machine);
+               if (tgid == -1)
+                       return -1;
+
+               if (_pid == pid) {
+                       /* process the parent's maps too */
+                       rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
+                                               process, machine, mmap_data);
+               } else {
+                       /* only fork the tid's map, to save time */
+                       rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
                                                 process, machine);
-       if (tgid == -1)
-               return -1;
-       return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
-                                                 process, machine, mmap_data);
+               }
+
+               if (rc)
+                       return rc;
+       }
+
+       closedir(tasks);
+       return 0;
 }
 
 int perf_event__synthesize_thread_map(struct perf_tool *tool,
@@ -343,7 +374,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
                                      struct machine *machine,
                                      bool mmap_data)
 {
-       union perf_event *comm_event, *mmap_event;
+       union perf_event *comm_event, *mmap_event, *fork_event;
        int err = -1, thread, j;
 
        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
@@ -354,9 +385,14 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
        if (mmap_event == NULL)
                goto out_free_comm;
 
+       fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+       if (fork_event == NULL)
+               goto out_free_mmap;
+
        err = 0;
        for (thread = 0; thread < threads->nr; ++thread) {
                if (__event__synthesize_thread(comm_event, mmap_event,
+                                              fork_event,
                                               threads->map[thread], 0,
                                               process, tool, machine,
                                               mmap_data)) {
@@ -382,6 +418,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
                        /* if not, generate events for it */
                        if (need_leader &&
                            __event__synthesize_thread(comm_event, mmap_event,
+                                                      fork_event,
                                                       comm_event->comm.pid, 0,
                                                       process, tool, machine,
                                                       mmap_data)) {
@@ -390,6 +427,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
                        }
                }
        }
+       free(fork_event);
+out_free_mmap:
        free(mmap_event);
 out_free_comm:
        free(comm_event);
@@ -404,9 +443,12 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
        DIR *proc;
        char proc_path[PATH_MAX];
        struct dirent dirent, *next;
-       union perf_event *comm_event, *mmap_event;
+       union perf_event *comm_event, *mmap_event, *fork_event;
        int err = -1;
 
+       if (machine__is_default_guest(machine))
+               return 0;
+
        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
        if (comm_event == NULL)
                goto out;
@@ -415,14 +457,15 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
        if (mmap_event == NULL)
                goto out_free_comm;
 
-       if (machine__is_default_guest(machine))
-               return 0;
+       fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
+       if (fork_event == NULL)
+               goto out_free_mmap;
 
        snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
        proc = opendir(proc_path);
 
        if (proc == NULL)
-               goto out_free_mmap;
+               goto out_free_fork;
 
        while (!readdir_r(proc, &dirent, &next) && next) {
                char *end;
@@ -434,12 +477,14 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
                 * We may race with exiting thread, so don't stop just because
                 * one thread couldn't be synthesized.
                 */
-               __event__synthesize_thread(comm_event, mmap_event, pid, 1,
-                                          process, tool, machine, mmap_data);
+               __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
+                                          1, process, tool, machine, mmap_data);
        }
 
        err = 0;
        closedir(proc);
+out_free_fork:
+       free(fork_event);
 out_free_mmap:
        free(mmap_event);
 out_free_comm:
@@ -661,7 +706,7 @@ void thread__find_addr_map(struct thread *thread,
        al->thread = thread;
        al->addr = addr;
        al->cpumode = cpumode;
-       al->filtered = false;
+       al->filtered = 0;
 
        if (machine == NULL) {
                al->map = NULL;
@@ -687,11 +732,11 @@ void thread__find_addr_map(struct thread *thread,
                if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
                        cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
                        !perf_guest)
-                       al->filtered = true;
+                       al->filtered |= (1 << HIST_FILTER__GUEST);
                if ((cpumode == PERF_RECORD_MISC_USER ||
                        cpumode == PERF_RECORD_MISC_KERNEL) &&
                        !perf_host)
-                       al->filtered = true;
+                       al->filtered |= (1 << HIST_FILTER__HOST);
 
                return;
        }
@@ -748,9 +793,6 @@ int perf_event__preprocess_sample(const union perf_event *event,
        if (thread == NULL)
                return -1;
 
-       if (thread__is_filtered(thread))
-               goto out_filtered;
-
        dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
        /*
         * Have we already created the kernel maps for this machine?
@@ -768,6 +810,10 @@ int perf_event__preprocess_sample(const union perf_event *event,
        dump_printf(" ...... dso: %s\n",
                    al->map ? al->map->dso->long_name :
                        al->level == 'H' ? "[hypervisor]" : "<not found>");
+
+       if (thread__is_filtered(thread))
+               al->filtered |= (1 << HIST_FILTER__THREAD);
+
        al->sym = NULL;
        al->cpu = sample->cpu;
 
@@ -779,8 +825,9 @@ int perf_event__preprocess_sample(const union perf_event *event,
                                                  dso->short_name) ||
                               (dso->short_name != dso->long_name &&
                                strlist__has_entry(symbol_conf.dso_list,
-                                                  dso->long_name)))))
-                       goto out_filtered;
+                                                  dso->long_name))))) {
+                       al->filtered |= (1 << HIST_FILTER__DSO);
+               }
 
                al->sym = map__find_symbol(al->map, al->addr,
                                           machine->symbol_filter);
@@ -788,12 +835,9 @@ int perf_event__preprocess_sample(const union perf_event *event,
 
        if (symbol_conf.sym_list &&
                (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
-                                               al->sym->name)))
-               goto out_filtered;
-
-       return 0;
+                                               al->sym->name))) {
+               al->filtered |= (1 << HIST_FILTER__SYMBOL);
+       }
 
-out_filtered:
-       al->filtered = true;
        return 0;
 }
index 851fa06f4a427d47b2c2af5ee70e345fda643978..38457d447a13106360fb60d7b93dd3eab5217f50 100644 (file)
@@ -85,6 +85,7 @@ struct sample_event {
 
 struct regs_dump {
        u64 abi;
+       u64 mask;
        u64 *regs;
 };
 
@@ -259,9 +260,9 @@ int perf_event__preprocess_sample(const union perf_event *event,
 const char *perf_event__name(unsigned int id);
 
 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
-                                    u64 sample_regs_user, u64 read_format);
+                                    u64 read_format);
 int perf_event__synthesize_sample(union perf_event *event, u64 type,
-                                 u64 sample_regs_user, u64 read_format,
+                                 u64 read_format,
                                  const struct perf_sample *sample,
                                  bool swapped);
 
index 55407c594b87f1657e8ff06f8e64ee27ba2662c2..5c28d82b76c472d2107e097656a5d45e4f151536 100644 (file)
@@ -500,6 +500,34 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
        return ret;
 }
 
+static void
+perf_evsel__config_callgraph(struct perf_evsel *evsel,
+                            struct record_opts *opts)
+{
+       bool function = perf_evsel__is_function_event(evsel);
+       struct perf_event_attr *attr = &evsel->attr;
+
+       perf_evsel__set_sample_bit(evsel, CALLCHAIN);
+
+       if (opts->call_graph == CALLCHAIN_DWARF) {
+               if (!function) {
+                       perf_evsel__set_sample_bit(evsel, REGS_USER);
+                       perf_evsel__set_sample_bit(evsel, STACK_USER);
+                       attr->sample_regs_user = PERF_REGS_MASK;
+                       attr->sample_stack_user = opts->stack_dump_size;
+                       attr->exclude_callchain_user = 1;
+               } else {
+                       pr_info("Cannot use DWARF unwind for function trace event,"
+                               " falling back to framepointers.\n");
+               }
+       }
+
+       if (function) {
+               pr_info("Disabling user space callchains for function trace event.\n");
+               attr->exclude_callchain_user = 1;
+       }
+}
+
 /*
  * The enable_on_exec/disabled value strategy:
  *
@@ -595,17 +623,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                attr->mmap_data = track;
        }
 
-       if (opts->call_graph) {
-               perf_evsel__set_sample_bit(evsel, CALLCHAIN);
-
-               if (opts->call_graph == CALLCHAIN_DWARF) {
-                       perf_evsel__set_sample_bit(evsel, REGS_USER);
-                       perf_evsel__set_sample_bit(evsel, STACK_USER);
-                       attr->sample_regs_user = PERF_REGS_MASK;
-                       attr->sample_stack_user = opts->stack_dump_size;
-                       attr->exclude_callchain_user = 1;
-               }
-       }
+       if (opts->call_graph_enabled)
+               perf_evsel__config_callgraph(evsel, opts);
 
        if (target__has_cpu(&opts->target))
                perf_evsel__set_sample_bit(evsel, CPU);
@@ -1004,7 +1023,7 @@ retry_sample_id:
 
                        group_fd = get_group_fd(evsel, cpu, thread);
 retry_open:
-                       pr_debug2("perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx\n",
+                       pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx\n",
                                  pid, cpus->map[cpu], group_fd, flags);
 
                        FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
@@ -1013,7 +1032,7 @@ retry_open:
                                                                     group_fd, flags);
                        if (FD(evsel, cpu, thread) < 0) {
                                err = -errno;
-                               pr_debug2("perf_event_open failed, error %d\n",
+                               pr_debug2("sys_perf_event_open failed, error %d\n",
                                          err);
                                goto try_fallback;
                        }
@@ -1220,7 +1239,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
        memset(data, 0, sizeof(*data));
        data->cpu = data->pid = data->tid = -1;
        data->stream_id = data->id = data->time = -1ULL;
-       data->period = 1;
+       data->period = evsel->attr.sample_period;
        data->weight = 0;
 
        if (event->header.type != PERF_RECORD_SAMPLE) {
@@ -1396,10 +1415,11 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
                array++;
 
                if (data->user_regs.abi) {
-                       u64 regs_user = evsel->attr.sample_regs_user;
+                       u64 mask = evsel->attr.sample_regs_user;
 
-                       sz = hweight_long(regs_user) * sizeof(u64);
+                       sz = hweight_long(mask) * sizeof(u64);
                        OVERFLOW_CHECK(array, sz, max_size);
+                       data->user_regs.mask = mask;
                        data->user_regs.regs = (u64 *)array;
                        array = (void *)array + sz;
                }
@@ -1451,7 +1471,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
 }
 
 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
-                                    u64 sample_regs_user, u64 read_format)
+                                    u64 read_format)
 {
        size_t sz, result = sizeof(struct sample_event);
 
@@ -1517,7 +1537,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        result += sizeof(u64);
-                       sz = hweight_long(sample_regs_user) * sizeof(u64);
+                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
                        result += sz;
                } else {
                        result += sizeof(u64);
@@ -1546,7 +1566,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
 }
 
 int perf_event__synthesize_sample(union perf_event *event, u64 type,
-                                 u64 sample_regs_user, u64 read_format,
+                                 u64 read_format,
                                  const struct perf_sample *sample,
                                  bool swapped)
 {
@@ -1687,7 +1707,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
        if (type & PERF_SAMPLE_REGS_USER) {
                if (sample->user_regs.abi) {
                        *array++ = sample->user_regs.abi;
-                       sz = hweight_long(sample_regs_user) * sizeof(u64);
+                       sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
                        memcpy(array, sample->user_regs.regs, sz);
                        array = (void *)array + sz;
                } else {
index f1b325665aae8201b9696b2c3c5a6a64e009ffb7..0c9926cfb292ba2667de9f270fc8103182d857ab 100644 (file)
@@ -315,6 +315,24 @@ static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
        return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
 }
 
+/**
+ * perf_evsel__is_function_event - Return whether given evsel is a function
+ * trace event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if event is function trace event
+ */
+static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
+{
+#define FUNCTION_EVENT "ftrace:function"
+
+       return evsel->name &&
+              !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
+
+#undef FUNCTION_EVENT
+}
+
 struct perf_attr_details {
        bool freq;
        bool verbose;
diff --git a/tools/perf/util/fs.h b/tools/perf/util/fs.h
deleted file mode 100644 (file)
index 5e09ce1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __PERF_FS
-#define __PERF_FS
-
-const char *sysfs__mountpoint(void);
-const char *procfs__mountpoint(void);
-
-#endif /* __PERF_FS */
index e4e6249b87d4eef1cd89d3df1aa3da022b550f84..f38590d7561b99733c5c32154eb1dee8c9777572 100644 (file)
@@ -13,13 +13,6 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
 static bool hists__filter_entry_by_symbol(struct hists *hists,
                                          struct hist_entry *he);
 
-enum hist_filter {
-       HIST_FILTER__DSO,
-       HIST_FILTER__THREAD,
-       HIST_FILTER__PARENT,
-       HIST_FILTER__SYMBOL,
-};
-
 struct callchain_param callchain_param = {
        .mode   = CHAIN_GRAPH_REL,
        .min_percent = 0.5,
@@ -290,7 +283,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
                if (he->branch_info) {
                        /*
                         * This branch info is (a part of) allocated from
-                        * machine__resolve_bstack() and will be freed after
+                        * sample__resolve_bstack() and will be freed after
                         * adding new entries.  So we need to save a copy.
                         */
                        he->branch_info = malloc(sizeof(*he->branch_info));
@@ -369,7 +362,7 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
                        he_stat__add_period(&he->stat, period, weight);
 
                        /*
-                        * This mem info was allocated from machine__resolve_mem
+                        * This mem info was allocated from sample__resolve_mem
                         * and will not be used anymore.
                         */
                        zfree(&entry->mem_info);
@@ -429,7 +422,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
                        .weight = weight,
                },
                .parent = sym_parent,
-               .filtered = symbol__parent_filter(sym_parent),
+               .filtered = symbol__parent_filter(sym_parent) | al->filtered,
                .hists  = hists,
                .branch_info = bi,
                .mem_info = mi,
index a59743fa3ef73d3aeb8c5c832a32aa2e04773a4c..1f1f513dfe7fb05f9cf0e77caeab43a327182fa1 100644 (file)
@@ -14,6 +14,15 @@ struct hist_entry;
 struct addr_location;
 struct symbol;
 
+enum hist_filter {
+       HIST_FILTER__DSO,
+       HIST_FILTER__THREAD,
+       HIST_FILTER__PARENT,
+       HIST_FILTER__SYMBOL,
+       HIST_FILTER__GUEST,
+       HIST_FILTER__HOST,
+};
+
 /*
  * The kernel collects the number of events it couldn't send in a stretch and
  * when possible sends this number in a PERF_RECORD_LOST event. The number of
@@ -132,8 +141,10 @@ struct perf_hpp {
 };
 
 struct perf_hpp_fmt {
-       int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
-       int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
+       int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+                     struct perf_evsel *evsel);
+       int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+                    struct perf_evsel *evsel);
        int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
                     struct hist_entry *he);
        int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -166,6 +177,20 @@ void perf_hpp__init(void);
 void perf_hpp__column_register(struct perf_hpp_fmt *format);
 void perf_hpp__column_enable(unsigned col);
 
+typedef u64 (*hpp_field_fn)(struct hist_entry *he);
+typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
+typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
+
+int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
+              hpp_field_fn get_field, hpp_callback_fn callback,
+              const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent);
+
+static inline void advance_hpp(struct perf_hpp *hpp, int inc)
+{
+       hpp->buf  += inc;
+       hpp->size -= inc;
+}
+
 static inline size_t perf_hpp__use_color(void)
 {
        return !symbol_conf.field_sep;
diff --git a/tools/perf/util/include/linux/hash.h b/tools/perf/util/include/linux/hash.h
deleted file mode 100644 (file)
index 201f573..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "../../../../include/linux/hash.h"
-
-#ifndef PERF_HASH_H
-#define PERF_HASH_H
-#endif
index d8c927c868eee79e9dc82d6c24a66f689f75288d..9844c31b7c2bb26c25a71e5a0203800e3a2daa26 100644 (file)
@@ -94,12 +94,6 @@ static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
        return (i >= ssize) ? (ssize - 1) : i;
 }
 
-static inline unsigned long
-simple_strtoul(const char *nptr, char **endptr, int base)
-{
-       return strtoul(nptr, endptr, base);
-}
-
 int eprintf(int level,
            const char *fmt, ...) __attribute__((format(printf, 2, 3)));
 
index 1d928a0ce9972014f435d93646c576bad56ad84b..bfe0a2afd0d28bc736b8ab8ae9c063ca0248a158 100644 (file)
@@ -1,5 +1,4 @@
 #include <linux/kernel.h>
-#include <linux/prefetch.h>
 
 #include "../../../../include/linux/list.h"
 
diff --git a/tools/perf/util/include/linux/prefetch.h b/tools/perf/util/include/linux/prefetch.h
deleted file mode 100644 (file)
index 7841e48..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef PERF_LINUX_PREFETCH_H
-#define PERF_LINUX_PREFETCH_H
-
-static inline void prefetch(void *a __attribute__((unused))) { }
-
-#endif
index 620a1983b76b9921157c78f9ad3888851db1eb11..a53cd0b8c151cdb898d3711c36e5081846813a15 100644 (file)
@@ -327,9 +327,10 @@ struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
        return __machine__findnew_thread(machine, pid, tid, true);
 }
 
-struct thread *machine__find_thread(struct machine *machine, pid_t tid)
+struct thread *machine__find_thread(struct machine *machine, pid_t pid,
+                                   pid_t tid)
 {
-       return __machine__findnew_thread(machine, 0, tid, false);
+       return __machine__findnew_thread(machine, pid, tid, false);
 }
 
 int machine__process_comm_event(struct machine *machine, union perf_event *event,
@@ -1026,7 +1027,7 @@ int machine__process_mmap2_event(struct machine *machine,
        }
 
        thread = machine__findnew_thread(machine, event->mmap2.pid,
-                                       event->mmap2.pid);
+                                       event->mmap2.tid);
        if (thread == NULL)
                goto out_problem;
 
@@ -1074,7 +1075,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
        }
 
        thread = machine__findnew_thread(machine, event->mmap.pid,
-                                        event->mmap.pid);
+                                        event->mmap.tid);
        if (thread == NULL)
                goto out_problem;
 
@@ -1114,7 +1115,9 @@ static void machine__remove_thread(struct machine *machine, struct thread *th)
 int machine__process_fork_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample)
 {
-       struct thread *thread = machine__find_thread(machine, event->fork.tid);
+       struct thread *thread = machine__find_thread(machine,
+                                                    event->fork.pid,
+                                                    event->fork.tid);
        struct thread *parent = machine__findnew_thread(machine,
                                                        event->fork.ppid,
                                                        event->fork.ptid);
@@ -1140,7 +1143,9 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
 int machine__process_exit_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample __maybe_unused)
 {
-       struct thread *thread = machine__find_thread(machine, event->fork.tid);
+       struct thread *thread = machine__find_thread(machine,
+                                                    event->fork.pid,
+                                                    event->fork.tid);
 
        if (dump_trace)
                perf_event__fprintf_task(event, stdout);
@@ -1184,39 +1189,22 @@ static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
        return 0;
 }
 
-static const u8 cpumodes[] = {
-       PERF_RECORD_MISC_USER,
-       PERF_RECORD_MISC_KERNEL,
-       PERF_RECORD_MISC_GUEST_USER,
-       PERF_RECORD_MISC_GUEST_KERNEL
-};
-#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
-
 static void ip__resolve_ams(struct machine *machine, struct thread *thread,
                            struct addr_map_symbol *ams,
                            u64 ip)
 {
        struct addr_location al;
-       size_t i;
-       u8 m;
 
        memset(&al, 0, sizeof(al));
+       /*
+        * We cannot use the header.misc hint to determine whether a
+        * branch stack address is user, kernel, guest, hypervisor.
+        * Branches may straddle the kernel/user/hypervisor boundaries.
+        * Thus, we have to try consecutively until we find a match
+        * or else, the symbol is unknown
+        */
+       thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
 
-       for (i = 0; i < NCPUMODES; i++) {
-               m = cpumodes[i];
-               /*
-                * We cannot use the header.misc hint to determine whether a
-                * branch stack address is user, kernel, guest, hypervisor.
-                * Branches may straddle the kernel/user/hypervisor boundaries.
-                * Thus, we have to try consecutively until we find a match
-                * or else, the symbol is unknown
-                */
-               thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
-                               ip, &al);
-               if (al.map)
-                       goto found;
-       }
-found:
        ams->addr = ip;
        ams->al_addr = al.addr;
        ams->sym = al.sym;
@@ -1238,37 +1226,35 @@ static void ip__resolve_data(struct machine *machine, struct thread *thread,
        ams->map = al.map;
 }
 
-struct mem_info *machine__resolve_mem(struct machine *machine,
-                                     struct thread *thr,
-                                     struct perf_sample *sample,
-                                     u8 cpumode)
+struct mem_info *sample__resolve_mem(struct perf_sample *sample,
+                                    struct addr_location *al)
 {
        struct mem_info *mi = zalloc(sizeof(*mi));
 
        if (!mi)
                return NULL;
 
-       ip__resolve_ams(machine, thr, &mi->iaddr, sample->ip);
-       ip__resolve_data(machine, thr, cpumode, &mi->daddr, sample->addr);
+       ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
+       ip__resolve_data(al->machine, al->thread, al->cpumode,
+                        &mi->daddr, sample->addr);
        mi->data_src.val = sample->data_src;
 
        return mi;
 }
 
-struct branch_info *machine__resolve_bstack(struct machine *machine,
-                                           struct thread *thr,
-                                           struct branch_stack *bs)
+struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
+                                          struct addr_location *al)
 {
-       struct branch_info *bi;
        unsigned int i;
+       const struct branch_stack *bs = sample->branch_stack;
+       struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
 
-       bi = calloc(bs->nr, sizeof(struct branch_info));
        if (!bi)
                return NULL;
 
        for (i = 0; i < bs->nr; i++) {
-               ip__resolve_ams(machine, thr, &bi[i].to, bs->entries[i].to);
-               ip__resolve_ams(machine, thr, &bi[i].from, bs->entries[i].from);
+               ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
+               ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
                bi[i].flags = bs->entries[i].flags;
        }
        return bi;
@@ -1326,7 +1312,7 @@ static int machine__resolve_callchain_sample(struct machine *machine,
                        continue;
                }
 
-               al.filtered = false;
+               al.filtered = 0;
                thread__find_addr_location(thread, machine, cpumode,
                                           MAP__FUNCTION, ip, &al);
                if (al.sym != NULL) {
@@ -1385,8 +1371,7 @@ int machine__resolve_callchain(struct machine *machine,
                return 0;
 
        return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
-                                  thread, evsel->attr.sample_regs_user,
-                                  sample, max_stack);
+                                  thread, sample, max_stack);
 
 }
 
index f77e91e483dcf769597bfe6c18c7c106436398bf..c8c74a1193983ab6dbc66db56dacdb157a0ba315 100644 (file)
@@ -41,7 +41,8 @@ struct map *machine__kernel_map(struct machine *machine, enum map_type type)
        return machine->vmlinux_maps[type];
 }
 
-struct thread *machine__find_thread(struct machine *machine, pid_t tid);
+struct thread *machine__find_thread(struct machine *machine, pid_t pid,
+                                   pid_t tid);
 
 int machine__process_comm_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample);
@@ -91,12 +92,10 @@ void machine__delete_dead_threads(struct machine *machine);
 void machine__delete_threads(struct machine *machine);
 void machine__delete(struct machine *machine);
 
-struct branch_info *machine__resolve_bstack(struct machine *machine,
-                                           struct thread *thread,
-                                           struct branch_stack *bs);
-struct mem_info *machine__resolve_mem(struct machine *machine,
-                                     struct thread *thread,
-                                     struct perf_sample *sample, u8 cpumode);
+struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
+                                          struct addr_location *al);
+struct mem_info *sample__resolve_mem(struct perf_sample *sample,
+                                    struct addr_location *al);
 int machine__resolve_callchain(struct machine *machine,
                               struct perf_evsel *evsel,
                               struct thread *thread,
index 257e513205ceb850dbcaf9d6414254ba95a671e5..f00f058afb3b6b510da22c7c04d2f2c4aee3b0e9 100644 (file)
@@ -90,6 +90,16 @@ u64 map__objdump_2mem(struct map *map, u64 ip);
 
 struct symbol;
 
+/* map__for_each_symbol - iterate over the symbols in the given map
+ *
+ * @map: the 'struct map *' in which symbols itereated
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @n: the 'struct rb_node *' to use as a temporary storage
+ * Note: caller must ensure map->dso is not NULL (map is loaded).
+ */
+#define map__for_each_symbol(map, pos, n)      \
+       dso__for_each_symbol(map->dso, pos, n, map->type)
+
 typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
 
 void map__init(struct map *map, enum map_type type,
index d22e3f8017dc429a67458bc18457a6de7cd9f778..bf48092983c65fe8cec8bb3ff4662e1b1803b18e 100644 (file)
@@ -407,7 +407,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
                if (internal_help && !strcmp(arg + 2, "help"))
                        return usage_with_options_internal(usagestr, options, 0);
                if (!strcmp(arg + 2, "list-opts"))
-                       return PARSE_OPT_LIST;
+                       return PARSE_OPT_LIST_OPTS;
+               if (!strcmp(arg + 2, "list-cmds"))
+                       return PARSE_OPT_LIST_SUBCMDS;
                switch (parse_long_opt(ctx, arg + 2, options)) {
                case -1:
                        return parse_options_usage(usagestr, options, arg + 2, 0);
@@ -433,25 +435,45 @@ int parse_options_end(struct parse_opt_ctx_t *ctx)
        return ctx->cpidx + ctx->argc;
 }
 
-int parse_options(int argc, const char **argv, const struct option *options,
-                 const char * const usagestr[], int flags)
+int parse_options_subcommand(int argc, const char **argv, const struct option *options,
+                       const char *const subcommands[], const char *usagestr[], int flags)
 {
        struct parse_opt_ctx_t ctx;
 
        perf_header__set_cmdline(argc, argv);
 
+       /* build usage string if it's not provided */
+       if (subcommands && !usagestr[0]) {
+               struct strbuf buf = STRBUF_INIT;
+
+               strbuf_addf(&buf, "perf %s [<options>] {", argv[0]);
+               for (int i = 0; subcommands[i]; i++) {
+                       if (i)
+                               strbuf_addstr(&buf, "|");
+                       strbuf_addstr(&buf, subcommands[i]);
+               }
+               strbuf_addstr(&buf, "}");
+
+               usagestr[0] = strdup(buf.buf);
+               strbuf_release(&buf);
+       }
+
        parse_options_start(&ctx, argc, argv, flags);
        switch (parse_options_step(&ctx, options, usagestr)) {
        case PARSE_OPT_HELP:
                exit(129);
        case PARSE_OPT_DONE:
                break;
-       case PARSE_OPT_LIST:
+       case PARSE_OPT_LIST_OPTS:
                while (options->type != OPTION_END) {
                        printf("--%s ", options->long_name);
                        options++;
                }
                exit(130);
+       case PARSE_OPT_LIST_SUBCMDS:
+               for (int i = 0; subcommands[i]; i++)
+                       printf("%s ", subcommands[i]);
+               exit(130);
        default: /* PARSE_OPT_UNKNOWN */
                if (ctx.argv[0][1] == '-') {
                        error("unknown option `%s'", ctx.argv[0] + 2);
@@ -464,6 +486,13 @@ int parse_options(int argc, const char **argv, const struct option *options,
        return parse_options_end(&ctx);
 }
 
+int parse_options(int argc, const char **argv, const struct option *options,
+                 const char * const usagestr[], int flags)
+{
+       return parse_options_subcommand(argc, argv, options, NULL,
+                                       (const char **) usagestr, flags);
+}
+
 #define USAGE_OPTS_WIDTH 24
 #define USAGE_GAP         2
 
index cbf0149cf221783aea2aeafd8643900b80e6765d..d8dac8ac5f371403cf23f0ea0953d5db767540d4 100644 (file)
@@ -140,6 +140,11 @@ extern int parse_options(int argc, const char **argv,
                          const struct option *options,
                          const char * const usagestr[], int flags);
 
+extern int parse_options_subcommand(int argc, const char **argv,
+                               const struct option *options,
+                               const char *const subcommands[],
+                               const char *usagestr[], int flags);
+
 extern NORETURN void usage_with_options(const char * const *usagestr,
                                         const struct option *options);
 
@@ -148,7 +153,8 @@ extern NORETURN void usage_with_options(const char * const *usagestr,
 enum {
        PARSE_OPT_HELP = -1,
        PARSE_OPT_DONE,
-       PARSE_OPT_LIST,
+       PARSE_OPT_LIST_OPTS,
+       PARSE_OPT_LIST_SUBCMDS,
        PARSE_OPT_UNKNOWN,
 };
 
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
new file mode 100644 (file)
index 0000000..a3539ef
--- /dev/null
@@ -0,0 +1,19 @@
+#include <errno.h>
+#include "perf_regs.h"
+
+int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
+{
+       int i, idx = 0;
+       u64 mask = regs->mask;
+
+       if (!(mask & (1 << id)))
+               return -EINVAL;
+
+       for (i = 0; i < id; i++) {
+               if (mask & (1 << i))
+                       idx++;
+       }
+
+       *valp = regs->regs[idx];
+       return 0;
+}
index a3d42cd749196b83c4c8226bfd069a68cd3a6917..d6e8b6a8d7f38f03632921789281569433826816 100644 (file)
@@ -1,8 +1,14 @@
 #ifndef __PERF_REGS_H
 #define __PERF_REGS_H
 
+#include "types.h"
+#include "event.h"
+
 #ifdef HAVE_PERF_REGS_SUPPORT
 #include <perf_regs.h>
+
+int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
+
 #else
 #define PERF_REGS_MASK 0
 
@@ -10,5 +16,12 @@ static inline const char *perf_reg_name(int id __maybe_unused)
 {
        return NULL;
 }
+
+static inline int perf_reg_value(u64 *valp __maybe_unused,
+                                struct regs_dump *regs __maybe_unused,
+                                int id __maybe_unused)
+{
+       return 0;
+}
 #endif /* HAVE_PERF_REGS_SUPPORT */
 #endif /* __PERF_REGS_H */
index b752ecb40d86af6245b7f8646bb3607992bcd9f6..00a7dcb2f55cb0d7734e7eb331384d00aa42809c 100644 (file)
@@ -3,7 +3,7 @@
 #include <unistd.h>
 #include <stdio.h>
 #include <dirent.h>
-#include "fs.h"
+#include <api/fs/fs.h>
 #include <locale.h>
 #include "util.h"
 #include "pmu.h"
index d8b048c20cdee51ac894b5394b15c70a1897c106..0d1542f33d879a6f761fe6f1fbd61b6299635237 100644 (file)
@@ -70,34 +70,32 @@ static int e_snprintf(char *str, size_t size, const char *format, ...)
 }
 
 static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
-static int convert_name_to_addr(struct perf_probe_event *pev,
-                               const char *exec);
 static void clear_probe_trace_event(struct probe_trace_event *tev);
-static struct machine machine;
+static struct machine *host_machine;
 
 /* Initialize symbol maps and path of vmlinux/modules */
-static int init_vmlinux(void)
+static int init_symbol_maps(bool user_only)
 {
        int ret;
 
        symbol_conf.sort_by_name = true;
-       if (symbol_conf.vmlinux_name == NULL)
-               symbol_conf.try_vmlinux_path = true;
-       else
-               pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
        ret = symbol__init();
        if (ret < 0) {
                pr_debug("Failed to init symbol map.\n");
                goto out;
        }
 
-       ret = machine__init(&machine, "", HOST_KERNEL_ID);
-       if (ret < 0)
-               goto out;
+       if (host_machine || user_only)  /* already initialized */
+               return 0;
 
-       if (machine__create_kernel_maps(&machine) < 0) {
-               pr_debug("machine__create_kernel_maps() failed.\n");
-               goto out;
+       if (symbol_conf.vmlinux_name)
+               pr_debug("Use vmlinux: %s\n", symbol_conf.vmlinux_name);
+
+       host_machine = machine__new_host();
+       if (!host_machine) {
+               pr_debug("machine__new_host() failed.\n");
+               symbol__exit();
+               ret = -1;
        }
 out:
        if (ret < 0)
@@ -105,21 +103,66 @@ out:
        return ret;
 }
 
+static void exit_symbol_maps(void)
+{
+       if (host_machine) {
+               machine__delete(host_machine);
+               host_machine = NULL;
+       }
+       symbol__exit();
+}
+
 static struct symbol *__find_kernel_function_by_name(const char *name,
                                                     struct map **mapp)
 {
-       return machine__find_kernel_function_by_name(&machine, name, mapp,
+       return machine__find_kernel_function_by_name(host_machine, name, mapp,
                                                     NULL);
 }
 
+static struct symbol *__find_kernel_function(u64 addr, struct map **mapp)
+{
+       return machine__find_kernel_function(host_machine, addr, mapp, NULL);
+}
+
+static struct ref_reloc_sym *kernel_get_ref_reloc_sym(void)
+{
+       /* kmap->ref_reloc_sym should be set if host_machine is initialized */
+       struct kmap *kmap;
+
+       if (map__load(host_machine->vmlinux_maps[MAP__FUNCTION], NULL) < 0)
+               return NULL;
+
+       kmap = map__kmap(host_machine->vmlinux_maps[MAP__FUNCTION]);
+       return kmap->ref_reloc_sym;
+}
+
+static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
+{
+       struct ref_reloc_sym *reloc_sym;
+       struct symbol *sym;
+       struct map *map;
+
+       /* ref_reloc_sym is just a label. Need a special fix*/
+       reloc_sym = kernel_get_ref_reloc_sym();
+       if (reloc_sym && strcmp(name, reloc_sym->name) == 0)
+               return (reloc) ? reloc_sym->addr : reloc_sym->unrelocated_addr;
+       else {
+               sym = __find_kernel_function_by_name(name, &map);
+               if (sym)
+                       return map->unmap_ip(map, sym->start) -
+                               (reloc) ? 0 : map->reloc;
+       }
+       return 0;
+}
+
 static struct map *kernel_get_module_map(const char *module)
 {
        struct rb_node *nd;
-       struct map_groups *grp = &machine.kmaps;
+       struct map_groups *grp = &host_machine->kmaps;
 
        /* A file path -- this is an offline module */
        if (module && strchr(module, '/'))
-               return machine__new_module(&machine, 0, module);
+               return machine__new_module(host_machine, 0, module);
 
        if (!module)
                module = "kernel";
@@ -141,7 +184,7 @@ static struct dso *kernel_get_module_dso(const char *module)
        const char *vmlinux_name;
 
        if (module) {
-               list_for_each_entry(dso, &machine.kernel_dsos, node) {
+               list_for_each_entry(dso, &host_machine->kernel_dsos, node) {
                        if (strncmp(dso->short_name + 1, module,
                                    dso->short_name_len - 2) == 0)
                                goto found;
@@ -150,7 +193,7 @@ static struct dso *kernel_get_module_dso(const char *module)
                return NULL;
        }
 
-       map = machine.vmlinux_maps[MAP__FUNCTION];
+       map = host_machine->vmlinux_maps[MAP__FUNCTION];
        dso = map->dso;
 
        vmlinux_name = symbol_conf.vmlinux_name;
@@ -173,20 +216,6 @@ const char *kernel_get_module_path(const char *module)
        return (dso) ? dso->long_name : NULL;
 }
 
-static int init_user_exec(void)
-{
-       int ret = 0;
-
-       symbol_conf.try_vmlinux_path = false;
-       symbol_conf.sort_by_name = true;
-       ret = symbol__init();
-
-       if (ret < 0)
-               pr_debug("Failed to init symbol map.\n");
-
-       return ret;
-}
-
 static int convert_exec_to_group(const char *exec, char **result)
 {
        char *ptr1, *ptr2, *exec_copy;
@@ -218,32 +247,23 @@ out:
        return ret;
 }
 
-static int convert_to_perf_probe_point(struct probe_trace_point *tp,
-                                       struct perf_probe_point *pp)
+static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
 {
-       pp->function = strdup(tp->symbol);
-
-       if (pp->function == NULL)
-               return -ENOMEM;
-
-       pp->offset = tp->offset;
-       pp->retprobe = tp->retprobe;
+       int i;
 
-       return 0;
+       for (i = 0; i < ntevs; i++)
+               clear_probe_trace_event(tevs + i);
 }
 
 #ifdef HAVE_DWARF_SUPPORT
+
 /* Open new debuginfo of given module */
 static struct debuginfo *open_debuginfo(const char *module)
 {
-       const char *path;
+       const char *path = module;
 
-       /* A file path -- this is an offline module */
-       if (module && strchr(module, '/'))
-               path = module;
-       else {
+       if (!module || !strchr(module, '/')) {
                path = kernel_get_module_path(module);
-
                if (!path) {
                        pr_err("Failed to find path of %s module.\n",
                               module ?: "kernel");
@@ -253,46 +273,6 @@ static struct debuginfo *open_debuginfo(const char *module)
        return debuginfo__new(path);
 }
 
-/*
- * Convert trace point to probe point with debuginfo
- * Currently only handles kprobes.
- */
-static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
-                                       struct perf_probe_point *pp)
-{
-       struct symbol *sym;
-       struct map *map;
-       u64 addr;
-       int ret = -ENOENT;
-       struct debuginfo *dinfo;
-
-       sym = __find_kernel_function_by_name(tp->symbol, &map);
-       if (sym) {
-               addr = map->unmap_ip(map, sym->start + tp->offset);
-               pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol,
-                        tp->offset, addr);
-
-               dinfo = debuginfo__new_online_kernel(addr);
-               if (dinfo) {
-                       ret = debuginfo__find_probe_point(dinfo,
-                                                (unsigned long)addr, pp);
-                       debuginfo__delete(dinfo);
-               } else {
-                       pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n",
-                                addr);
-                       ret = -ENOENT;
-               }
-       }
-       if (ret <= 0) {
-               pr_debug("Failed to find corresponding probes from "
-                        "debuginfo. Use kprobe event information.\n");
-               return convert_to_perf_probe_point(tp, pp);
-       }
-       pp->retprobe = tp->retprobe;
-
-       return 0;
-}
-
 static int get_text_start_address(const char *exec, unsigned long *address)
 {
        Elf *elf;
@@ -321,12 +301,62 @@ out:
        return ret;
 }
 
+/*
+ * Convert trace point to probe point with debuginfo
+ */
+static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
+                                           struct perf_probe_point *pp,
+                                           bool is_kprobe)
+{
+       struct debuginfo *dinfo = NULL;
+       unsigned long stext = 0;
+       u64 addr = tp->address;
+       int ret = -ENOENT;
+
+       /* convert the address to dwarf address */
+       if (!is_kprobe) {
+               if (!addr) {
+                       ret = -EINVAL;
+                       goto error;
+               }
+               ret = get_text_start_address(tp->module, &stext);
+               if (ret < 0)
+                       goto error;
+               addr += stext;
+       } else {
+               addr = kernel_get_symbol_address_by_name(tp->symbol, false);
+               if (addr == 0)
+                       goto error;
+               addr += tp->offset;
+       }
+
+       pr_debug("try to find information at %" PRIx64 " in %s\n", addr,
+                tp->module ? : "kernel");
+
+       dinfo = open_debuginfo(tp->module);
+       if (dinfo) {
+               ret = debuginfo__find_probe_point(dinfo,
+                                                (unsigned long)addr, pp);
+               debuginfo__delete(dinfo);
+       } else {
+               pr_debug("Failed to open debuginfo at 0x%" PRIx64 "\n", addr);
+               ret = -ENOENT;
+       }
+
+       if (ret > 0) {
+               pp->retprobe = tp->retprobe;
+               return 0;
+       }
+error:
+       pr_debug("Failed to find corresponding probes from debuginfo.\n");
+       return ret ? : -ENOENT;
+}
+
 static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
                                          int ntevs, const char *exec)
 {
        int i, ret = 0;
-       unsigned long offset, stext = 0;
-       char buf[32];
+       unsigned long stext = 0;
 
        if (!exec)
                return 0;
@@ -337,15 +367,9 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
 
        for (i = 0; i < ntevs && ret >= 0; i++) {
                /* point.address is the addres of point.symbol + point.offset */
-               offset = tevs[i].point.address - stext;
-               tevs[i].point.offset = 0;
-               zfree(&tevs[i].point.symbol);
-               ret = e_snprintf(buf, 32, "0x%lx", offset);
-               if (ret < 0)
-                       break;
+               tevs[i].point.address -= stext;
                tevs[i].point.module = strdup(exec);
-               tevs[i].point.symbol = strdup(buf);
-               if (!tevs[i].point.symbol || !tevs[i].point.module) {
+               if (!tevs[i].point.module) {
                        ret = -ENOMEM;
                        break;
                }
@@ -388,12 +412,40 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
        return ret;
 }
 
-static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
+/* Post processing the probe events */
+static int post_process_probe_trace_events(struct probe_trace_event *tevs,
+                                          int ntevs, const char *module,
+                                          bool uprobe)
 {
+       struct ref_reloc_sym *reloc_sym;
+       char *tmp;
        int i;
 
-       for (i = 0; i < ntevs; i++)
-               clear_probe_trace_event(tevs + i);
+       if (uprobe)
+               return add_exec_to_probe_trace_events(tevs, ntevs, module);
+
+       /* Note that currently ref_reloc_sym based probe is not for drivers */
+       if (module)
+               return add_module_to_probe_trace_events(tevs, ntevs, module);
+
+       reloc_sym = kernel_get_ref_reloc_sym();
+       if (!reloc_sym) {
+               pr_warning("Relocated base symbol is not found!\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ntevs; i++) {
+               if (tevs[i].point.address) {
+                       tmp = strdup(reloc_sym->name);
+                       if (!tmp)
+                               return -ENOMEM;
+                       free(tevs[i].point.symbol);
+                       tevs[i].point.symbol = tmp;
+                       tevs[i].point.offset = tevs[i].point.address -
+                                              reloc_sym->unrelocated_addr;
+               }
+       }
+       return 0;
 }
 
 /* Try to find perf_probe_event with debuginfo */
@@ -416,21 +468,16 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                return 0;
        }
 
+       pr_debug("Try to find probe point from debuginfo.\n");
        /* Searching trace events corresponding to a probe event */
        ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs);
 
        debuginfo__delete(dinfo);
 
        if (ntevs > 0) {        /* Succeeded to find trace events */
-               pr_debug("find %d probe_trace_events.\n", ntevs);
-               if (target) {
-                       if (pev->uprobes)
-                               ret = add_exec_to_probe_trace_events(*tevs,
-                                                ntevs, target);
-                       else
-                               ret = add_module_to_probe_trace_events(*tevs,
-                                                ntevs, target);
-               }
+               pr_debug("Found %d probe_trace_events.\n", ntevs);
+               ret = post_process_probe_trace_events(*tevs, ntevs,
+                                                       target, pev->uprobes);
                if (ret < 0) {
                        clear_probe_trace_events(*tevs, ntevs);
                        zfree(tevs);
@@ -563,20 +610,16 @@ static int _show_one_line(FILE *fp, int l, bool skip, bool show_num)
  * Show line-range always requires debuginfo to find source file and
  * line number.
  */
-int show_line_range(struct line_range *lr, const char *module)
+static int __show_line_range(struct line_range *lr, const char *module)
 {
        int l = 1;
-       struct line_node *ln;
+       struct int_node *ln;
        struct debuginfo *dinfo;
        FILE *fp;
        int ret;
        char *tmp;
 
        /* Search a line range */
-       ret = init_vmlinux();
-       if (ret < 0)
-               return ret;
-
        dinfo = open_debuginfo(module);
        if (!dinfo) {
                pr_warning("Failed to open debuginfo file.\n");
@@ -623,8 +666,8 @@ int show_line_range(struct line_range *lr, const char *module)
                        goto end;
        }
 
-       list_for_each_entry(ln, &lr->line_list, list) {
-               for (; ln->line > l; l++) {
+       intlist__for_each(ln, lr->line_list) {
+               for (; ln->i > l; l++) {
                        ret = show_one_line(fp, l - lr->offset);
                        if (ret < 0)
                                goto end;
@@ -646,6 +689,19 @@ end:
        return ret;
 }
 
+int show_line_range(struct line_range *lr, const char *module)
+{
+       int ret;
+
+       ret = init_symbol_maps(false);
+       if (ret < 0)
+               return ret;
+       ret = __show_line_range(lr, module);
+       exit_symbol_maps();
+
+       return ret;
+}
+
 static int show_available_vars_at(struct debuginfo *dinfo,
                                  struct perf_probe_event *pev,
                                  int max_vls, struct strfilter *_filter,
@@ -707,14 +763,15 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
        int i, ret = 0;
        struct debuginfo *dinfo;
 
-       ret = init_vmlinux();
+       ret = init_symbol_maps(false);
        if (ret < 0)
                return ret;
 
        dinfo = open_debuginfo(module);
        if (!dinfo) {
                pr_warning("Failed to open debuginfo file.\n");
-               return -ENOENT;
+               ret = -ENOENT;
+               goto out;
        }
 
        setup_pager();
@@ -724,23 +781,19 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
                                             externs);
 
        debuginfo__delete(dinfo);
+out:
+       exit_symbol_maps();
        return ret;
 }
 
 #else  /* !HAVE_DWARF_SUPPORT */
 
-static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp,
-                                       struct perf_probe_point *pp)
+static int
+find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
+                                struct perf_probe_point *pp __maybe_unused,
+                                bool is_kprobe __maybe_unused)
 {
-       struct symbol *sym;
-
-       sym = __find_kernel_function_by_name(tp->symbol, NULL);
-       if (!sym) {
-               pr_err("Failed to find symbol %s in kernel.\n", tp->symbol);
-               return -ENOENT;
-       }
-
-       return convert_to_perf_probe_point(tp, pp);
+       return -ENOSYS;
 }
 
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
@@ -776,24 +829,22 @@ int show_available_vars(struct perf_probe_event *pevs __maybe_unused,
 
 void line_range__clear(struct line_range *lr)
 {
-       struct line_node *ln;
-
        free(lr->function);
        free(lr->file);
        free(lr->path);
        free(lr->comp_dir);
-       while (!list_empty(&lr->line_list)) {
-               ln = list_first_entry(&lr->line_list, struct line_node, list);
-               list_del(&ln->list);
-               free(ln);
-       }
+       intlist__delete(lr->line_list);
        memset(lr, 0, sizeof(*lr));
 }
 
-void line_range__init(struct line_range *lr)
+int line_range__init(struct line_range *lr)
 {
        memset(lr, 0, sizeof(*lr));
-       INIT_LIST_HEAD(&lr->line_list);
+       lr->line_list = intlist__new(NULL);
+       if (!lr->line_list)
+               return -ENOMEM;
+       else
+               return 0;
 }
 
 static int parse_line_num(char **ptr, int *val, const char *what)
@@ -1267,16 +1318,21 @@ static int parse_probe_trace_command(const char *cmd,
        } else
                p = argv[1];
        fmt1_str = strtok_r(p, "+", &fmt);
-       tp->symbol = strdup(fmt1_str);
-       if (tp->symbol == NULL) {
-               ret = -ENOMEM;
-               goto out;
+       if (fmt1_str[0] == '0') /* only the address started with 0x */
+               tp->address = strtoul(fmt1_str, NULL, 0);
+       else {
+               /* Only the symbol-based probe has offset */
+               tp->symbol = strdup(fmt1_str);
+               if (tp->symbol == NULL) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               fmt2_str = strtok_r(NULL, "", &fmt);
+               if (fmt2_str == NULL)
+                       tp->offset = 0;
+               else
+                       tp->offset = strtoul(fmt2_str, NULL, 10);
        }
-       fmt2_str = strtok_r(NULL, "", &fmt);
-       if (fmt2_str == NULL)
-               tp->offset = 0;
-       else
-               tp->offset = strtoul(fmt2_str, NULL, 10);
 
        tev->nargs = argc - 2;
        tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
@@ -1518,20 +1574,27 @@ char *synthesize_probe_trace_command(struct probe_trace_event *tev)
        if (buf == NULL)
                return NULL;
 
+       len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s ", tp->retprobe ? 'r' : 'p',
+                        tev->group, tev->event);
+       if (len <= 0)
+               goto error;
+
+       /* Uprobes must have tp->address and tp->module */
+       if (tev->uprobes && (!tp->address || !tp->module))
+               goto error;
+
+       /* Use the tp->address for uprobes */
        if (tev->uprobes)
-               len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s:%s",
-                                tp->retprobe ? 'r' : 'p',
-                                tev->group, tev->event,
-                                tp->module, tp->symbol);
+               ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx",
+                                tp->module, tp->address);
        else
-               len = e_snprintf(buf, MAX_CMDLEN, "%c:%s/%s %s%s%s+%lu",
-                                tp->retprobe ? 'r' : 'p',
-                                tev->group, tev->event,
+               ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu",
                                 tp->module ?: "", tp->module ? ":" : "",
                                 tp->symbol, tp->offset);
 
-       if (len <= 0)
+       if (ret <= 0)
                goto error;
+       len += ret;
 
        for (i = 0; i < tev->nargs; i++) {
                ret = synthesize_probe_trace_arg(&tev->args[i], buf + len,
@@ -1547,6 +1610,79 @@ error:
        return NULL;
 }
 
+static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
+                                         struct perf_probe_point *pp,
+                                         bool is_kprobe)
+{
+       struct symbol *sym = NULL;
+       struct map *map;
+       u64 addr;
+       int ret = -ENOENT;
+
+       if (!is_kprobe) {
+               map = dso__new_map(tp->module);
+               if (!map)
+                       goto out;
+               addr = tp->address;
+               sym = map__find_symbol(map, addr, NULL);
+       } else {
+               addr = kernel_get_symbol_address_by_name(tp->symbol, true);
+               if (addr) {
+                       addr += tp->offset;
+                       sym = __find_kernel_function(addr, &map);
+               }
+       }
+       if (!sym)
+               goto out;
+
+       pp->retprobe = tp->retprobe;
+       pp->offset = addr - map->unmap_ip(map, sym->start);
+       pp->function = strdup(sym->name);
+       ret = pp->function ? 0 : -ENOMEM;
+
+out:
+       if (map && !is_kprobe) {
+               dso__delete(map->dso);
+               map__delete(map);
+       }
+
+       return ret;
+}
+
+static int convert_to_perf_probe_point(struct probe_trace_point *tp,
+                                       struct perf_probe_point *pp,
+                                       bool is_kprobe)
+{
+       char buf[128];
+       int ret;
+
+       ret = find_perf_probe_point_from_dwarf(tp, pp, is_kprobe);
+       if (!ret)
+               return 0;
+       ret = find_perf_probe_point_from_map(tp, pp, is_kprobe);
+       if (!ret)
+               return 0;
+
+       pr_debug("Failed to find probe point from both of dwarf and map.\n");
+
+       if (tp->symbol) {
+               pp->function = strdup(tp->symbol);
+               pp->offset = tp->offset;
+       } else if (!tp->module && !is_kprobe) {
+               ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address);
+               if (ret < 0)
+                       return ret;
+               pp->function = strdup(buf);
+               pp->offset = 0;
+       }
+       if (pp->function == NULL)
+               return -ENOMEM;
+
+       pp->retprobe = tp->retprobe;
+
+       return 0;
+}
+
 static int convert_to_perf_probe_event(struct probe_trace_event *tev,
                               struct perf_probe_event *pev, bool is_kprobe)
 {
@@ -1560,11 +1696,7 @@ static int convert_to_perf_probe_event(struct probe_trace_event *tev,
                return -ENOMEM;
 
        /* Convert trace_point to probe_point */
-       if (is_kprobe)
-               ret = kprobe_convert_to_perf_probe(&tev->point, &pev->point);
-       else
-               ret = convert_to_perf_probe_point(&tev->point, &pev->point);
-
+       ret = convert_to_perf_probe_point(&tev->point, &pev->point, is_kprobe);
        if (ret < 0)
                return ret;
 
@@ -1731,7 +1863,8 @@ static struct strlist *get_probe_trace_command_rawlist(int fd)
 }
 
 /* Show an event */
-static int show_perf_probe_event(struct perf_probe_event *pev)
+static int show_perf_probe_event(struct perf_probe_event *pev,
+                                const char *module)
 {
        int i, ret;
        char buf[128];
@@ -1747,6 +1880,8 @@ static int show_perf_probe_event(struct perf_probe_event *pev)
                return ret;
 
        printf("  %-20s (on %s", buf, place);
+       if (module)
+               printf(" in %s", module);
 
        if (pev->nargs > 0) {
                printf(" with");
@@ -1784,7 +1919,8 @@ static int __show_perf_probe_events(int fd, bool is_kprobe)
                        ret = convert_to_perf_probe_event(&tev, &pev,
                                                                is_kprobe);
                        if (ret >= 0)
-                               ret = show_perf_probe_event(&pev);
+                               ret = show_perf_probe_event(&pev,
+                                                           tev.point.module);
                }
                clear_perf_probe_event(&pev);
                clear_probe_trace_event(&tev);
@@ -1807,7 +1943,7 @@ int show_perf_probe_events(void)
        if (fd < 0)
                return fd;
 
-       ret = init_vmlinux();
+       ret = init_symbol_maps(false);
        if (ret < 0)
                return ret;
 
@@ -1820,6 +1956,7 @@ int show_perf_probe_events(void)
                close(fd);
        }
 
+       exit_symbol_maps();
        return ret;
 }
 
@@ -1982,7 +2119,7 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
                group = pev->group;
                pev->event = tev->event;
                pev->group = tev->group;
-               show_perf_probe_event(pev);
+               show_perf_probe_event(pev, tev->point.module);
                /* Trick here - restore current event/group */
                pev->event = (char *)event;
                pev->group = (char *)group;
@@ -2008,113 +2145,175 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        return ret;
 }
 
-static int convert_to_probe_trace_events(struct perf_probe_event *pev,
-                                         struct probe_trace_event **tevs,
-                                         int max_tevs, const char *target)
+static char *looking_function_name;
+static int num_matched_functions;
+
+static int probe_function_filter(struct map *map __maybe_unused,
+                                     struct symbol *sym)
 {
+       if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
+           strcmp(looking_function_name, sym->name) == 0) {
+               num_matched_functions++;
+               return 0;
+       }
+       return 1;
+}
+
+#define strdup_or_goto(str, label)     \
+       ({ char *__p = strdup(str); if (!__p) goto label; __p; })
+
+/*
+ * Find probe function addresses from map.
+ * Return an error or the number of found probe_trace_event
+ */
+static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
+                                           struct probe_trace_event **tevs,
+                                           int max_tevs, const char *target)
+{
+       struct map *map = NULL;
+       struct kmap *kmap = NULL;
+       struct ref_reloc_sym *reloc_sym = NULL;
        struct symbol *sym;
-       int ret, i;
+       struct rb_node *nd;
        struct probe_trace_event *tev;
+       struct perf_probe_point *pp = &pev->point;
+       struct probe_trace_point *tp;
+       int ret, i;
 
-       if (pev->uprobes && !pev->group) {
-               /* Replace group name if not given */
-               ret = convert_exec_to_group(target, &pev->group);
-               if (ret != 0) {
-                       pr_warning("Failed to make a group name.\n");
-                       return ret;
-               }
+       /* Init maps of given executable or kernel */
+       if (pev->uprobes)
+               map = dso__new_map(target);
+       else
+               map = kernel_get_module_map(target);
+       if (!map) {
+               ret = -EINVAL;
+               goto out;
        }
 
-       /* Convert perf_probe_event with debuginfo */
-       ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
-       if (ret != 0)
-               return ret;     /* Found in debuginfo or got an error */
-
-       if (pev->uprobes) {
-               ret = convert_name_to_addr(pev, target);
-               if (ret < 0)
-                       return ret;
+       /*
+        * Load matched symbols: Since the different local symbols may have
+        * same name but different addresses, this lists all the symbols.
+        */
+       num_matched_functions = 0;
+       looking_function_name = pp->function;
+       ret = map__load(map, probe_function_filter);
+       if (ret || num_matched_functions == 0) {
+               pr_err("Failed to find symbol %s in %s\n", pp->function,
+                       target ? : "kernel");
+               ret = -ENOENT;
+               goto out;
+       } else if (num_matched_functions > max_tevs) {
+               pr_err("Too many functions matched in %s\n",
+                       target ? : "kernel");
+               ret = -E2BIG;
+               goto out;
        }
 
-       /* Allocate trace event buffer */
-       tev = *tevs = zalloc(sizeof(struct probe_trace_event));
-       if (tev == NULL)
-               return -ENOMEM;
+       if (!pev->uprobes) {
+               kmap = map__kmap(map);
+               reloc_sym = kmap->ref_reloc_sym;
+               if (!reloc_sym) {
+                       pr_warning("Relocated base symbol is not found!\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
 
-       /* Copy parameters */
-       tev->point.symbol = strdup(pev->point.function);
-       if (tev->point.symbol == NULL) {
+       /* Setup result trace-probe-events */
+       *tevs = zalloc(sizeof(*tev) * num_matched_functions);
+       if (!*tevs) {
                ret = -ENOMEM;
-               goto error;
+               goto out;
        }
 
-       if (target) {
-               tev->point.module = strdup(target);
-               if (tev->point.module == NULL) {
-                       ret = -ENOMEM;
-                       goto error;
+       ret = 0;
+       map__for_each_symbol(map, sym, nd) {
+               tev = (*tevs) + ret;
+               tp = &tev->point;
+               if (ret == num_matched_functions) {
+                       pr_warning("Too many symbols are listed. Skip it.\n");
+                       break;
                }
-       }
-
-       tev->point.offset = pev->point.offset;
-       tev->point.retprobe = pev->point.retprobe;
-       tev->nargs = pev->nargs;
-       tev->uprobes = pev->uprobes;
+               ret++;
 
-       if (tev->nargs) {
-               tev->args = zalloc(sizeof(struct probe_trace_arg)
-                                  * tev->nargs);
-               if (tev->args == NULL) {
-                       ret = -ENOMEM;
-                       goto error;
+               if (pp->offset > sym->end - sym->start) {
+                       pr_warning("Offset %ld is bigger than the size of %s\n",
+                                  pp->offset, sym->name);
+                       ret = -ENOENT;
+                       goto err_out;
+               }
+               /* Add one probe point */
+               tp->address = map->unmap_ip(map, sym->start) + pp->offset;
+               if (reloc_sym) {
+                       tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out);
+                       tp->offset = tp->address - reloc_sym->addr;
+               } else {
+                       tp->symbol = strdup_or_goto(sym->name, nomem_out);
+                       tp->offset = pp->offset;
+               }
+               tp->retprobe = pp->retprobe;
+               if (target)
+                       tev->point.module = strdup_or_goto(target, nomem_out);
+               tev->uprobes = pev->uprobes;
+               tev->nargs = pev->nargs;
+               if (tev->nargs) {
+                       tev->args = zalloc(sizeof(struct probe_trace_arg) *
+                                          tev->nargs);
+                       if (tev->args == NULL)
+                               goto nomem_out;
                }
                for (i = 0; i < tev->nargs; i++) {
-                       if (pev->args[i].name) {
-                               tev->args[i].name = strdup(pev->args[i].name);
-                               if (tev->args[i].name == NULL) {
-                                       ret = -ENOMEM;
-                                       goto error;
-                               }
-                       }
-                       tev->args[i].value = strdup(pev->args[i].var);
-                       if (tev->args[i].value == NULL) {
-                               ret = -ENOMEM;
-                               goto error;
-                       }
-                       if (pev->args[i].type) {
-                               tev->args[i].type = strdup(pev->args[i].type);
-                               if (tev->args[i].type == NULL) {
-                                       ret = -ENOMEM;
-                                       goto error;
-                               }
-                       }
+                       if (pev->args[i].name)
+                               tev->args[i].name =
+                                       strdup_or_goto(pev->args[i].name,
+                                                       nomem_out);
+
+                       tev->args[i].value = strdup_or_goto(pev->args[i].var,
+                                                           nomem_out);
+                       if (pev->args[i].type)
+                               tev->args[i].type =
+                                       strdup_or_goto(pev->args[i].type,
+                                                       nomem_out);
                }
        }
 
-       if (pev->uprobes)
-               return 1;
+out:
+       if (map && pev->uprobes) {
+               /* Only when using uprobe(exec) map needs to be released */
+               dso__delete(map->dso);
+               map__delete(map);
+       }
+       return ret;
 
-       /* Currently just checking function name from symbol map */
-       sym = __find_kernel_function_by_name(tev->point.symbol, NULL);
-       if (!sym) {
-               pr_warning("Kernel symbol \'%s\' not found.\n",
-                          tev->point.symbol);
-               ret = -ENOENT;
-               goto error;
-       } else if (tev->point.offset > sym->end - sym->start) {
-               pr_warning("Offset specified is greater than size of %s\n",
-                          tev->point.symbol);
-               ret = -ENOENT;
-               goto error;
+nomem_out:
+       ret = -ENOMEM;
+err_out:
+       clear_probe_trace_events(*tevs, num_matched_functions);
+       zfree(tevs);
+       goto out;
+}
+
+static int convert_to_probe_trace_events(struct perf_probe_event *pev,
+                                         struct probe_trace_event **tevs,
+                                         int max_tevs, const char *target)
+{
+       int ret;
 
+       if (pev->uprobes && !pev->group) {
+               /* Replace group name if not given */
+               ret = convert_exec_to_group(target, &pev->group);
+               if (ret != 0) {
+                       pr_warning("Failed to make a group name.\n");
+                       return ret;
+               }
        }
 
-       return 1;
-error:
-       clear_probe_trace_event(tev);
-       free(tev);
-       *tevs = NULL;
-       return ret;
+       /* Convert perf_probe_event with debuginfo */
+       ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
+       if (ret != 0)
+               return ret;     /* Found in debuginfo or got an error */
+
+       return find_probe_trace_events_from_map(pev, tevs, max_tevs, target);
 }
 
 struct __event_package {
@@ -2135,12 +2334,7 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
        if (pkgs == NULL)
                return -ENOMEM;
 
-       if (!pevs->uprobes)
-               /* Init vmlinux path */
-               ret = init_vmlinux();
-       else
-               ret = init_user_exec();
-
+       ret = init_symbol_maps(pevs->uprobes);
        if (ret < 0) {
                free(pkgs);
                return ret;
@@ -2174,6 +2368,7 @@ end:
                zfree(&pkgs[i].tevs);
        }
        free(pkgs);
+       exit_symbol_maps();
 
        return ret;
 }
@@ -2323,159 +2518,51 @@ static struct strfilter *available_func_filter;
 static int filter_available_functions(struct map *map __maybe_unused,
                                      struct symbol *sym)
 {
-       if (sym->binding == STB_GLOBAL &&
+       if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
            strfilter__compare(available_func_filter, sym->name))
                return 0;
        return 1;
 }
 
-static int __show_available_funcs(struct map *map)
-{
-       if (map__load(map, filter_available_functions)) {
-               pr_err("Failed to load map.\n");
-               return -EINVAL;
-       }
-       if (!dso__sorted_by_name(map->dso, map->type))
-               dso__sort_by_name(map->dso, map->type);
-
-       dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
-       return 0;
-}
-
-static int available_kernel_funcs(const char *module)
+int show_available_funcs(const char *target, struct strfilter *_filter,
+                                       bool user)
 {
        struct map *map;
        int ret;
 
-       ret = init_vmlinux();
+       ret = init_symbol_maps(user);
        if (ret < 0)
                return ret;
 
-       map = kernel_get_module_map(module);
+       /* Get a symbol map */
+       if (user)
+               map = dso__new_map(target);
+       else
+               map = kernel_get_module_map(target);
        if (!map) {
-               pr_err("Failed to find %s map.\n", (module) ? : "kernel");
+               pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
                return -EINVAL;
        }
-       return __show_available_funcs(map);
-}
-
-static int available_user_funcs(const char *target)
-{
-       struct map *map;
-       int ret;
-
-       ret = init_user_exec();
-       if (ret < 0)
-               return ret;
-
-       map = dso__new_map(target);
-       ret = __show_available_funcs(map);
-       dso__delete(map->dso);
-       map__delete(map);
-       return ret;
-}
 
-int show_available_funcs(const char *target, struct strfilter *_filter,
-                                       bool user)
-{
-       setup_pager();
+       /* Load symbols with given filter */
        available_func_filter = _filter;
-
-       if (!user)
-               return available_kernel_funcs(target);
-
-       return available_user_funcs(target);
-}
-
-/*
- * uprobe_events only accepts address:
- * Convert function and any offset to address
- */
-static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec)
-{
-       struct perf_probe_point *pp = &pev->point;
-       struct symbol *sym;
-       struct map *map = NULL;
-       char *function = NULL;
-       int ret = -EINVAL;
-       unsigned long long vaddr = 0;
-
-       if (!pp->function) {
-               pr_warning("No function specified for uprobes");
-               goto out;
-       }
-
-       function = strdup(pp->function);
-       if (!function) {
-               pr_warning("Failed to allocate memory by strdup.\n");
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       map = dso__new_map(exec);
-       if (!map) {
-               pr_warning("Cannot find appropriate DSO for %s.\n", exec);
-               goto out;
-       }
-       available_func_filter = strfilter__new(function, NULL);
        if (map__load(map, filter_available_functions)) {
-               pr_err("Failed to load map.\n");
-               goto out;
-       }
-
-       sym = map__find_symbol_by_name(map, function, NULL);
-       if (!sym) {
-               pr_warning("Cannot find %s in DSO %s\n", function, exec);
-               goto out;
-       }
-
-       if (map->start > sym->start)
-               vaddr = map->start;
-       vaddr += sym->start + pp->offset + map->pgoff;
-       pp->offset = 0;
-
-       if (!pev->event) {
-               pev->event = function;
-               function = NULL;
-       }
-       if (!pev->group) {
-               char *ptr1, *ptr2, *exec_copy;
-
-               pev->group = zalloc(sizeof(char *) * 64);
-               exec_copy = strdup(exec);
-               if (!exec_copy) {
-                       ret = -ENOMEM;
-                       pr_warning("Failed to copy exec string.\n");
-                       goto out;
-               }
-
-               ptr1 = strdup(basename(exec_copy));
-               if (ptr1) {
-                       ptr2 = strpbrk(ptr1, "-._");
-                       if (ptr2)
-                               *ptr2 = '\0';
-                       e_snprintf(pev->group, 64, "%s_%s", PERFPROBE_GROUP,
-                                       ptr1);
-                       free(ptr1);
-               }
-               free(exec_copy);
-       }
-       free(pp->function);
-       pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS);
-       if (!pp->function) {
-               ret = -ENOMEM;
-               pr_warning("Failed to allocate memory by zalloc.\n");
-               goto out;
+               pr_err("Failed to load symbols in %s\n", (target) ? : "kernel");
+               goto end;
        }
-       e_snprintf(pp->function, MAX_PROBE_ARGS, "0x%llx", vaddr);
-       ret = 0;
+       if (!dso__sorted_by_name(map->dso, map->type))
+               dso__sort_by_name(map->dso, map->type);
 
-out:
-       if (map) {
+       /* Show all (filtered) symbols */
+       setup_pager();
+       dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
+end:
+       if (user) {
                dso__delete(map->dso);
                map__delete(map);
        }
-       if (function)
-               free(function);
+       exit_symbol_maps();
+
        return ret;
 }
+
index fcaf7273e85a35f41ac465c2f343ec07cbd0f1a4..776c9347a3b64252e2f4c9b30b4710b580923b72 100644 (file)
@@ -2,6 +2,7 @@
 #define _PROBE_EVENT_H
 
 #include <stdbool.h>
+#include "intlist.h"
 #include "strlist.h"
 #include "strfilter.h"
 
@@ -76,13 +77,6 @@ struct perf_probe_event {
        struct perf_probe_arg   *args;  /* Arguments */
 };
 
-
-/* Line number container */
-struct line_node {
-       struct list_head        list;
-       int                     line;
-};
-
 /* Line range */
 struct line_range {
        char                    *file;          /* File name */
@@ -92,7 +86,7 @@ struct line_range {
        int                     offset;         /* Start line offset */
        char                    *path;          /* Real path name */
        char                    *comp_dir;      /* Compile directory */
-       struct list_head        line_list;      /* Visible lines */
+       struct intlist          *line_list;     /* Visible lines */
 };
 
 /* List of variables */
@@ -124,7 +118,7 @@ extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
 extern void line_range__clear(struct line_range *lr);
 
 /* Initialize line range */
-extern void line_range__init(struct line_range *lr);
+extern int line_range__init(struct line_range *lr);
 
 /* Internal use: Return kernel/module path */
 extern const char *kernel_get_module_path(const char *module);
index 061edb162b5ba03f9b4f883543faa5c0f27e371d..df0238654698d8967120f538a68b6aec5da18426 100644 (file)
@@ -34,7 +34,9 @@
 
 #include <linux/bitops.h>
 #include "event.h"
+#include "dso.h"
 #include "debug.h"
+#include "intlist.h"
 #include "util.h"
 #include "symbol.h"
 #include "probe-finder.h"
 /* Kprobe tracer basic type is up to u64 */
 #define MAX_BASIC_TYPE_BITS    64
 
-/* Line number list operations */
-
-/* Add a line to line number list */
-static int line_list__add_line(struct list_head *head, int line)
-{
-       struct line_node *ln;
-       struct list_head *p;
-
-       /* Reverse search, because new line will be the last one */
-       list_for_each_entry_reverse(ln, head, list) {
-               if (ln->line < line) {
-                       p = &ln->list;
-                       goto found;
-               } else if (ln->line == line)    /* Already exist */
-                       return 1;
-       }
-       /* List is empty, or the smallest entry */
-       p = head;
-found:
-       pr_debug("line list: add a line %u\n", line);
-       ln = zalloc(sizeof(struct line_node));
-       if (ln == NULL)
-               return -ENOMEM;
-       ln->line = line;
-       INIT_LIST_HEAD(&ln->list);
-       list_add(&ln->list, p);
-       return 0;
-}
-
-/* Check if the line in line number list */
-static int line_list__has_line(struct list_head *head, int line)
-{
-       struct line_node *ln;
-
-       /* Reverse search, because new line will be the last one */
-       list_for_each_entry(ln, head, list)
-               if (ln->line == line)
-                       return 1;
-
-       return 0;
-}
-
-/* Init line number list */
-static void line_list__init(struct list_head *head)
-{
-       INIT_LIST_HEAD(head);
-}
-
-/* Free line number list */
-static void line_list__free(struct list_head *head)
-{
-       struct line_node *ln;
-       while (!list_empty(head)) {
-               ln = list_first_entry(head, struct line_node, list);
-               list_del(&ln->list);
-               free(ln);
-       }
-}
-
 /* Dwarf FL wrappers */
 static char *debuginfo_path;   /* Currently dummy */
 
@@ -147,80 +90,7 @@ error:
        return -ENOENT;
 }
 
-#if _ELFUTILS_PREREQ(0, 148)
-/* This method is buggy if elfutils is older than 0.148 */
-static int __linux_kernel_find_elf(Dwfl_Module *mod,
-                                  void **userdata,
-                                  const char *module_name,
-                                  Dwarf_Addr base,
-                                  char **file_name, Elf **elfp)
-{
-       int fd;
-       const char *path = kernel_get_module_path(module_name);
-
-       pr_debug2("Use file %s for %s\n", path, module_name);
-       if (path) {
-               fd = open(path, O_RDONLY);
-               if (fd >= 0) {
-                       *file_name = strdup(path);
-                       return fd;
-               }
-       }
-       /* If failed, try to call standard method */
-       return dwfl_linux_kernel_find_elf(mod, userdata, module_name, base,
-                                         file_name, elfp);
-}
-
-static const Dwfl_Callbacks kernel_callbacks = {
-       .find_debuginfo = dwfl_standard_find_debuginfo,
-       .debuginfo_path = &debuginfo_path,
-
-       .find_elf = __linux_kernel_find_elf,
-       .section_address = dwfl_linux_kernel_module_section_address,
-};
-
-/* Get a Dwarf from live kernel image */
-static int debuginfo__init_online_kernel_dwarf(struct debuginfo *dbg,
-                                              Dwarf_Addr addr)
-{
-       dbg->dwfl = dwfl_begin(&kernel_callbacks);
-       if (!dbg->dwfl)
-               return -EINVAL;
-
-       /* Load the kernel dwarves: Don't care the result here */
-       dwfl_linux_kernel_report_kernel(dbg->dwfl);
-       dwfl_linux_kernel_report_modules(dbg->dwfl);
-
-       dbg->dbg = dwfl_addrdwarf(dbg->dwfl, addr, &dbg->bias);
-       /* Here, check whether we could get a real dwarf */
-       if (!dbg->dbg) {
-               pr_debug("Failed to find kernel dwarf at %lx\n",
-                        (unsigned long)addr);
-               dwfl_end(dbg->dwfl);
-               memset(dbg, 0, sizeof(*dbg));
-               return -ENOENT;
-       }
-
-       return 0;
-}
-#else
-/* With older elfutils, this just support kernel module... */
-static int debuginfo__init_online_kernel_dwarf(struct debuginfo *dbg,
-                                              Dwarf_Addr addr __maybe_unused)
-{
-       const char *path = kernel_get_module_path("kernel");
-
-       if (!path) {
-               pr_err("Failed to find vmlinux path\n");
-               return -ENOENT;
-       }
-
-       pr_debug2("Use file %s for debuginfo\n", path);
-       return debuginfo__init_offline_dwarf(dbg, path);
-}
-#endif
-
-struct debuginfo *debuginfo__new(const char *path)
+static struct debuginfo *__debuginfo__new(const char *path)
 {
        struct debuginfo *dbg = zalloc(sizeof(*dbg));
        if (!dbg)
@@ -228,21 +98,44 @@ struct debuginfo *debuginfo__new(const char *path)
 
        if (debuginfo__init_offline_dwarf(dbg, path) < 0)
                zfree(&dbg);
-
+       if (dbg)
+               pr_debug("Open Debuginfo file: %s\n", path);
        return dbg;
 }
 
-struct debuginfo *debuginfo__new_online_kernel(unsigned long addr)
-{
-       struct debuginfo *dbg = zalloc(sizeof(*dbg));
+enum dso_binary_type distro_dwarf_types[] = {
+       DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
+       DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
+       DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
+       DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
+       DSO_BINARY_TYPE__NOT_FOUND,
+};
 
-       if (!dbg)
-               return NULL;
+struct debuginfo *debuginfo__new(const char *path)
+{
+       enum dso_binary_type *type;
+       char buf[PATH_MAX], nil = '\0';
+       struct dso *dso;
+       struct debuginfo *dinfo = NULL;
+
+       /* Try to open distro debuginfo files */
+       dso = dso__new(path);
+       if (!dso)
+               goto out;
 
-       if (debuginfo__init_online_kernel_dwarf(dbg, (Dwarf_Addr)addr) < 0)
-               zfree(&dbg);
+       for (type = distro_dwarf_types;
+            !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND;
+            type++) {
+               if (dso__read_binary_type_filename(dso, *type, &nil,
+                                                  buf, PATH_MAX) < 0)
+                       continue;
+               dinfo = __debuginfo__new(buf);
+       }
+       dso__delete(dso);
 
-       return dbg;
+out:
+       /* if failed to open all distro debuginfo, open given binary */
+       return dinfo ? : __debuginfo__new(path);
 }
 
 void debuginfo__delete(struct debuginfo *dbg)
@@ -880,7 +773,7 @@ static int find_probe_point_by_line(struct probe_finder *pf)
 }
 
 /* Find lines which match lazy pattern */
-static int find_lazy_match_lines(struct list_head *head,
+static int find_lazy_match_lines(struct intlist *list,
                                 const char *fname, const char *pat)
 {
        FILE *fp;
@@ -901,7 +794,7 @@ static int find_lazy_match_lines(struct list_head *head,
                        line[len - 1] = '\0';
 
                if (strlazymatch(line, pat)) {
-                       line_list__add_line(head, linenum);
+                       intlist__add(list, linenum);
                        count++;
                }
                linenum++;
@@ -924,7 +817,7 @@ static int probe_point_lazy_walker(const char *fname, int lineno,
        Dwarf_Die *sc_die, die_mem;
        int ret;
 
-       if (!line_list__has_line(&pf->lcache, lineno) ||
+       if (!intlist__has_entry(pf->lcache, lineno) ||
            strtailcmp(fname, pf->fname) != 0)
                return 0;
 
@@ -952,9 +845,9 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf)
 {
        int ret = 0;
 
-       if (list_empty(&pf->lcache)) {
+       if (intlist__empty(pf->lcache)) {
                /* Matching lazy line pattern */
-               ret = find_lazy_match_lines(&pf->lcache, pf->fname,
+               ret = find_lazy_match_lines(pf->lcache, pf->fname,
                                            pf->pev->point.lazy_line);
                if (ret <= 0)
                        return ret;
@@ -1096,7 +989,9 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
 #endif
 
        off = 0;
-       line_list__init(&pf->lcache);
+       pf->lcache = intlist__new(NULL);
+       if (!pf->lcache)
+               return -ENOMEM;
 
        /* Fastpath: lookup by function name from .debug_pubnames section */
        if (pp->function) {
@@ -1149,7 +1044,8 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
        }
 
 found:
-       line_list__free(&pf->lcache);
+       intlist__delete(pf->lcache);
+       pf->lcache = NULL;
 
        return ret;
 }
@@ -1537,7 +1433,7 @@ static int line_range_add_line(const char *src, unsigned int lineno,
                if (lr->path == NULL)
                        return -ENOMEM;
        }
-       return line_list__add_line(&lr->line_list, lineno);
+       return intlist__add(lr->line_list, lineno);
 }
 
 static int line_range_walk_cb(const char *fname, int lineno,
@@ -1565,7 +1461,7 @@ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf)
 
        /* Update status */
        if (ret >= 0)
-               if (!list_empty(&lf->lr->line_list))
+               if (!intlist__empty(lf->lr->line_list))
                        ret = lf->found = 1;
                else
                        ret = 0;        /* Lines are not found */
index ffc33cdd25cc343816fd9e05586832bf48b82205..92590b2c7e1ce650080652839e5506bf98d9093c 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <stdbool.h>
 #include "util.h"
+#include "intlist.h"
 #include "probe-event.h"
 
 #define MAX_PROBE_BUFFER       1024
@@ -29,8 +30,8 @@ struct debuginfo {
        Dwarf_Addr      bias;
 };
 
+/* This also tries to open distro debuginfo */
 extern struct debuginfo *debuginfo__new(const char *path);
-extern struct debuginfo *debuginfo__new_online_kernel(unsigned long addr);
 extern void debuginfo__delete(struct debuginfo *dbg);
 
 /* Find probe_trace_events specified by perf_probe_event from debuginfo */
@@ -66,7 +67,7 @@ struct probe_finder {
        const char              *fname;         /* Real file name */
        Dwarf_Die               cu_die;         /* Current CU */
        Dwarf_Die               sp_die;
-       struct list_head        lcache;         /* Line cache for lazy match */
+       struct intlist          *lcache;        /* Line cache for lazy match */
 
        /* For variable searching */
 #if _ELFUTILS_PREREQ(0, 142)
index 595bfc73d2ed28cf14feee7daf73c76d92527223..16a475a7d492177623062143434488116cdf2a38 100644 (file)
@@ -17,6 +17,6 @@ util/xyarray.c
 util/cgroup.c
 util/rblist.c
 util/strlist.c
-util/fs.c
+../lib/api/fs/fs.c
 util/trace-event.c
 ../../lib/rbtree.c
index 373762501dadced6c476235ad8170a5241e72801..049e0a09ccd362b6f79192fe769f75648f3730b1 100644 (file)
@@ -2,7 +2,7 @@
 #include "evsel.h"
 #include "cpumap.h"
 #include "parse-events.h"
-#include "fs.h"
+#include <api/fs/fs.h>
 #include "util.h"
 
 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
index 5da6ce74c676722ff13ae858c1bb579ba353edbf..55960f22233c4c994feff352d2e458113a4f1db4 100644 (file)
@@ -702,11 +702,12 @@ static void regs_dump__printf(u64 mask, u64 *regs)
        }
 }
 
-static void regs_user__printf(struct perf_sample *sample, u64 mask)
+static void regs_user__printf(struct perf_sample *sample)
 {
        struct regs_dump *user_regs = &sample->user_regs;
 
        if (user_regs->regs) {
+               u64 mask = user_regs->mask;
                printf("... user regs: mask 0x%" PRIx64 "\n", mask);
                regs_dump__printf(mask, user_regs->regs);
        }
@@ -793,7 +794,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
        if (!dump_trace)
                return;
 
-       printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
+       printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
               event->header.misc, sample->pid, sample->tid, sample->ip,
               sample->period, sample->addr);
 
@@ -806,7 +807,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
                branch_stack__printf(sample);
 
        if (sample_type & PERF_SAMPLE_REGS_USER)
-               regs_user__printf(sample, evsel->attr.sample_regs_user);
+               regs_user__printf(sample);
 
        if (sample_type & PERF_SAMPLE_STACK_USER)
                stack_user__printf(&sample->user_stack);
index 516d19fb999bcfaddb6511e0292ea341a0e74aa6..3b7dbf51d4a93bd425fc41363d98a47acd7288ff 100644 (file)
@@ -506,6 +506,8 @@ int filename__read_debuglink(const char *filename, char *debuglink,
        /* the start of this section is a zero-terminated string */
        strncpy(debuglink, data->d_buf, size);
 
+       err = 0;
+
 out_elf_end:
        elf_end(elf);
 out_close:
index e89afc097d8ad2d4b818f07e02321084110e6ddc..95e249779931216f5cbeb5b09c26ea0655a0fcf7 100644 (file)
@@ -410,7 +410,7 @@ struct symbol *dso__find_symbol(struct dso *dso,
        return symbols__find(&dso->symbols[type], addr);
 }
 
-struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
+static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
 {
        return symbols__first(&dso->symbols[type]);
 }
@@ -1251,6 +1251,46 @@ out_failure:
        return -1;
 }
 
+static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
+                                          enum dso_binary_type type)
+{
+       switch (type) {
+       case DSO_BINARY_TYPE__JAVA_JIT:
+       case DSO_BINARY_TYPE__DEBUGLINK:
+       case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
+       case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
+       case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
+       case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
+       case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
+               return !kmod && dso->kernel == DSO_TYPE_USER;
+
+       case DSO_BINARY_TYPE__KALLSYMS:
+       case DSO_BINARY_TYPE__VMLINUX:
+       case DSO_BINARY_TYPE__KCORE:
+               return dso->kernel == DSO_TYPE_KERNEL;
+
+       case DSO_BINARY_TYPE__GUEST_KALLSYMS:
+       case DSO_BINARY_TYPE__GUEST_VMLINUX:
+       case DSO_BINARY_TYPE__GUEST_KCORE:
+               return dso->kernel == DSO_TYPE_GUEST_KERNEL;
+
+       case DSO_BINARY_TYPE__GUEST_KMODULE:
+       case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
+               /*
+                * kernel modules know their symtab type - it's set when
+                * creating a module dso in machine__new_module().
+                */
+               return kmod && dso->symtab_type == type;
+
+       case DSO_BINARY_TYPE__BUILD_ID_CACHE:
+               return true;
+
+       case DSO_BINARY_TYPE__NOT_FOUND:
+       default:
+               return false;
+       }
+}
+
 int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
 {
        char *name;
@@ -1261,6 +1301,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
        int ss_pos = 0;
        struct symsrc ss_[2];
        struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
+       bool kmod;
 
        dso__set_loaded(dso, map->type);
 
@@ -1301,7 +1342,11 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
        if (!name)
                return -1;
 
-       /* Iterate over candidate debug images.
+       kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
+
+       /*
+        * Iterate over candidate debug images.
         * Keep track of "interesting" ones (those which have a symtab, dynsym,
         * and/or opd section) for processing.
         */
@@ -1311,6 +1356,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
 
                enum dso_binary_type symtab_type = binary_type_symtab[i];
 
+               if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
+                       continue;
+
                if (dso__read_binary_type_filename(dso, symtab_type,
                                                   root_dir, name, PATH_MAX))
                        continue;
@@ -1353,15 +1401,10 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
        if (!runtime_ss && syms_ss)
                runtime_ss = syms_ss;
 
-       if (syms_ss) {
-               int km;
-
-               km = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
-                    dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
-               ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, km);
-       } else {
+       if (syms_ss)
+               ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, kmod);
+       else
                ret = -1;
-       }
 
        if (ret > 0) {
                int nr_plt;
index fffe2888a1c7c123d6e7de3f11eb74dfd4f4ebf8..501e4e722e8e853258e3e06b8448cc5792005971 100644 (file)
@@ -79,6 +79,17 @@ struct symbol {
 void symbol__delete(struct symbol *sym);
 void symbols__delete(struct rb_root *symbols);
 
+/* symbols__for_each_entry - iterate over symbols (rb_root)
+ *
+ * @symbols: the rb_root of symbols
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @nd: the 'struct rb_node *' to use as a temporary storage
+ */
+#define symbols__for_each_entry(symbols, pos, nd)                      \
+       for (nd = rb_first(symbols);                                    \
+            nd && (pos = rb_entry(nd, struct symbol, rb_node));        \
+            nd = rb_next(nd))
+
 static inline size_t symbol__size(const struct symbol *sym)
 {
        return sym->end - sym->start + 1;
@@ -175,7 +186,7 @@ struct addr_location {
        struct symbol *sym;
        u64           addr;
        char          level;
-       bool          filtered;
+       u8            filtered;
        u8            cpumode;
        s32           cpu;
 };
@@ -223,7 +234,6 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
                                u64 addr);
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
                                        const char *name);
-struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
 
 int filename__read_build_id(const char *filename, void *bf, size_t size);
 int sysfs__read_build_id(const char *filename, void *bf, size_t size);
index 0358882c89108723616c4eca0431fed0aa6be200..3ce0498bdae60bb24fe558006e1e02b4ed9ba2b9 100644 (file)
@@ -142,3 +142,24 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
 
        return 0;
 }
+
+void thread__find_cpumode_addr_location(struct thread *thread,
+                                       struct machine *machine,
+                                       enum map_type type, u64 addr,
+                                       struct addr_location *al)
+{
+       size_t i;
+       const u8 const cpumodes[] = {
+               PERF_RECORD_MISC_USER,
+               PERF_RECORD_MISC_KERNEL,
+               PERF_RECORD_MISC_GUEST_USER,
+               PERF_RECORD_MISC_GUEST_KERNEL
+       };
+
+       for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
+               thread__find_addr_location(thread, machine, cpumodes[i], type,
+                                          addr, al);
+               if (al->map)
+                       break;
+       }
+}
index 5b856bf942e11fa9691c28551a88b53b4ac3d5f2..9b29f085aedeaa637da7bb25e571c07a19697cd0 100644 (file)
@@ -44,12 +44,6 @@ void thread__insert_map(struct thread *thread, struct map *map);
 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
 size_t thread__fprintf(struct thread *thread, FILE *fp);
 
-static inline struct map *thread__find_map(struct thread *thread,
-                                          enum map_type type, u64 addr)
-{
-       return thread ? map_groups__find(&thread->mg, type, addr) : NULL;
-}
-
 void thread__find_addr_map(struct thread *thread, struct machine *machine,
                           u8 cpumode, enum map_type type, u64 addr,
                           struct addr_location *al);
@@ -58,6 +52,11 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine,
                                u8 cpumode, enum map_type type, u64 addr,
                                struct addr_location *al);
 
+void thread__find_cpumode_addr_location(struct thread *thread,
+                                       struct machine *machine,
+                                       enum map_type type, u64 addr,
+                                       struct addr_location *al);
+
 static inline void *thread__priv(struct thread *thread)
 {
        return thread->priv;
index e0d6d07f68485167f2b05260fd932a288b42eec9..c36636fd825b46481dfcadbc745014ed94f38650 100644 (file)
@@ -126,6 +126,7 @@ void event_format__print(struct event_format *event,
        trace_seq_init(&s);
        pevent_event_info(&s, event, &record);
        trace_seq_do_printf(&s);
+       trace_seq_destroy(&s);
 }
 
 void parse_proc_kallsyms(struct pevent *pevent,
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
new file mode 100644 (file)
index 0000000..67db73e
--- /dev/null
@@ -0,0 +1,210 @@
+#include <linux/compiler.h>
+#include <elfutils/libdw.h>
+#include <elfutils/libdwfl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include "unwind.h"
+#include "unwind-libdw.h"
+#include "machine.h"
+#include "thread.h"
+#include "types.h"
+#include "event.h"
+#include "perf_regs.h"
+
+static char *debuginfo_path;
+
+static const Dwfl_Callbacks offline_callbacks = {
+       .find_debuginfo         = dwfl_standard_find_debuginfo,
+       .debuginfo_path         = &debuginfo_path,
+       .section_address        = dwfl_offline_section_address,
+};
+
+static int __report_module(struct addr_location *al, u64 ip,
+                           struct unwind_info *ui)
+{
+       Dwfl_Module *mod;
+       struct dso *dso = NULL;
+
+       thread__find_addr_location(ui->thread, ui->machine,
+                                  PERF_RECORD_MISC_USER,
+                                  MAP__FUNCTION, ip, al);
+
+       if (al->map)
+               dso = al->map->dso;
+
+       if (!dso)
+               return 0;
+
+       mod = dwfl_addrmodule(ui->dwfl, ip);
+       if (!mod)
+               mod = dwfl_report_elf(ui->dwfl, dso->short_name,
+                                     dso->long_name, -1, al->map->start,
+                                     false);
+
+       return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
+}
+
+static int report_module(u64 ip, struct unwind_info *ui)
+{
+       struct addr_location al;
+
+       return __report_module(&al, ip, ui);
+}
+
+static int entry(u64 ip, struct unwind_info *ui)
+
+{
+       struct unwind_entry e;
+       struct addr_location al;
+
+       if (__report_module(&al, ip, ui))
+               return -1;
+
+       e.ip  = ip;
+       e.map = al.map;
+       e.sym = al.sym;
+
+       pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
+                al.sym ? al.sym->name : "''",
+                ip,
+                al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
+
+       return ui->cb(&e, ui->arg);
+}
+
+static pid_t next_thread(Dwfl *dwfl, void *arg, void **thread_argp)
+{
+       /* We want only single thread to be processed. */
+       if (*thread_argp != NULL)
+               return 0;
+
+       *thread_argp = arg;
+       return dwfl_pid(dwfl);
+}
+
+static int access_dso_mem(struct unwind_info *ui, Dwarf_Addr addr,
+                         Dwarf_Word *data)
+{
+       struct addr_location al;
+       ssize_t size;
+
+       thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER,
+                             MAP__FUNCTION, addr, &al);
+       if (!al.map) {
+               pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
+               return -1;
+       }
+
+       if (!al.map->dso)
+               return -1;
+
+       size = dso__data_read_addr(al.map->dso, al.map, ui->machine,
+                                  addr, (u8 *) data, sizeof(*data));
+
+       return !(size == sizeof(*data));
+}
+
+static bool memory_read(Dwfl *dwfl __maybe_unused, Dwarf_Addr addr, Dwarf_Word *result,
+                       void *arg)
+{
+       struct unwind_info *ui = arg;
+       struct stack_dump *stack = &ui->sample->user_stack;
+       u64 start, end;
+       int offset;
+       int ret;
+
+       ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
+       if (ret)
+               return false;
+
+       end = start + stack->size;
+
+       /* Check overflow. */
+       if (addr + sizeof(Dwarf_Word) < addr)
+               return false;
+
+       if (addr < start || addr + sizeof(Dwarf_Word) > end) {
+               ret = access_dso_mem(ui, addr, result);
+               if (ret) {
+                       pr_debug("unwind: access_mem 0x%" PRIx64 " not inside range"
+                                " 0x%" PRIx64 "-0x%" PRIx64 "\n",
+                               addr, start, end);
+                       return false;
+               }
+               return true;
+       }
+
+       offset  = addr - start;
+       *result = *(Dwarf_Word *)&stack->data[offset];
+       pr_debug("unwind: access_mem addr 0x%" PRIx64 ", val %lx, offset %d\n",
+                addr, (unsigned long)*result, offset);
+       return true;
+}
+
+static const Dwfl_Thread_Callbacks callbacks = {
+       .next_thread            = next_thread,
+       .memory_read            = memory_read,
+       .set_initial_registers  = libdw__arch_set_initial_registers,
+};
+
+static int
+frame_callback(Dwfl_Frame *state, void *arg)
+{
+       struct unwind_info *ui = arg;
+       Dwarf_Addr pc;
+
+       if (!dwfl_frame_pc(state, &pc, NULL)) {
+               pr_err("%s", dwfl_errmsg(-1));
+               return DWARF_CB_ABORT;
+       }
+
+       return entry(pc, ui) || !(--ui->max_stack) ?
+              DWARF_CB_ABORT : DWARF_CB_OK;
+}
+
+int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+                       struct machine *machine, struct thread *thread,
+                       struct perf_sample *data,
+                       int max_stack)
+{
+       struct unwind_info ui = {
+               .sample         = data,
+               .thread         = thread,
+               .machine        = machine,
+               .cb             = cb,
+               .arg            = arg,
+               .max_stack      = max_stack,
+       };
+       Dwarf_Word ip;
+       int err = -EINVAL;
+
+       if (!data->user_regs.regs)
+               return -EINVAL;
+
+       ui.dwfl = dwfl_begin(&offline_callbacks);
+       if (!ui.dwfl)
+               goto out;
+
+       err = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
+       if (err)
+               goto out;
+
+       err = report_module(ip, &ui);
+       if (err)
+               goto out;
+
+       if (!dwfl_attach_state(ui.dwfl, EM_NONE, thread->tid, &callbacks, &ui))
+               goto out;
+
+       err = dwfl_getthread_frames(ui.dwfl, thread->tid, frame_callback, &ui);
+
+       if (err && !ui.max_stack)
+               err = 0;
+
+ out:
+       if (err)
+               pr_debug("unwind: failed with '%s'\n", dwfl_errmsg(-1));
+
+       dwfl_end(ui.dwfl);
+       return 0;
+}
diff --git a/tools/perf/util/unwind-libdw.h b/tools/perf/util/unwind-libdw.h
new file mode 100644 (file)
index 0000000..417a142
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef __PERF_UNWIND_LIBDW_H
+#define __PERF_UNWIND_LIBDW_H
+
+#include <elfutils/libdwfl.h>
+#include "event.h"
+#include "thread.h"
+#include "unwind.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg);
+
+struct unwind_info {
+       Dwfl                    *dwfl;
+       struct perf_sample      *sample;
+       struct machine          *machine;
+       struct thread           *thread;
+       unwind_entry_cb_t       cb;
+       void                    *arg;
+       int                     max_stack;
+};
+
+#endif /* __PERF_UNWIND_LIBDW_H */
similarity index 92%
rename from tools/perf/util/unwind.c
rename to tools/perf/util/unwind-libunwind.c
index 742f23bf35ff7fabaae020100ebe3d359191a3da..bd5768d74f0182e8c60c00e94ef19de2e4fbe02f 100644 (file)
@@ -86,7 +86,6 @@ struct unwind_info {
        struct perf_sample      *sample;
        struct machine          *machine;
        struct thread           *thread;
-       u64                     sample_uregs;
 };
 
 #define dw_read(ptr, type, end) ({     \
@@ -391,30 +390,13 @@ static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
        return !(size == sizeof(*data));
 }
 
-static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id,
-                    u64 sample_regs)
-{
-       int i, idx = 0;
-
-       if (!(sample_regs & (1 << id)))
-               return -EINVAL;
-
-       for (i = 0; i < id; i++) {
-               if (sample_regs & (1 << i))
-                       idx++;
-       }
-
-       *valp = regs->regs[idx];
-       return 0;
-}
-
 static int access_mem(unw_addr_space_t __maybe_unused as,
                      unw_word_t addr, unw_word_t *valp,
                      int __write, void *arg)
 {
        struct unwind_info *ui = arg;
        struct stack_dump *stack = &ui->sample->user_stack;
-       unw_word_t start, end;
+       u64 start, end;
        int offset;
        int ret;
 
@@ -424,8 +406,7 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
                return 0;
        }
 
-       ret = reg_value(&start, &ui->sample->user_regs, PERF_REG_SP,
-                       ui->sample_uregs);
+       ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
        if (ret)
                return ret;
 
@@ -438,8 +419,9 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
        if (addr < start || addr + sizeof(unw_word_t) >= end) {
                ret = access_dso_mem(ui, addr, valp);
                if (ret) {
-                       pr_debug("unwind: access_mem %p not inside range %p-%p\n",
-                               (void *)addr, (void *)start, (void *)end);
+                       pr_debug("unwind: access_mem %p not inside range"
+                                " 0x%" PRIx64 "-0x%" PRIx64 "\n",
+                                (void *) addr, start, end);
                        *valp = 0;
                        return ret;
                }
@@ -448,8 +430,8 @@ static int access_mem(unw_addr_space_t __maybe_unused as,
 
        offset = addr - start;
        *valp  = *(unw_word_t *)&stack->data[offset];
-       pr_debug("unwind: access_mem addr %p, val %lx, offset %d\n",
-                (void *)addr, (unsigned long)*valp, offset);
+       pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
+                (void *) addr, (unsigned long)*valp, offset);
        return 0;
 }
 
@@ -459,6 +441,7 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
 {
        struct unwind_info *ui = arg;
        int id, ret;
+       u64 val;
 
        /* Don't support write, I suspect we don't need it. */
        if (__write) {
@@ -471,16 +454,17 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
                return 0;
        }
 
-       id = unwind__arch_reg_id(regnum);
+       id = libunwind__arch_reg_id(regnum);
        if (id < 0)
                return -EINVAL;
 
-       ret = reg_value(valp, &ui->sample->user_regs, id, ui->sample_uregs);
+       ret = perf_reg_value(&val, &ui->sample->user_regs, id);
        if (ret) {
                pr_err("unwind: can't read reg %d\n", regnum);
                return ret;
        }
 
+       *valp = (unw_word_t) val;
        pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp);
        return 0;
 }
@@ -563,7 +547,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
                unw_word_t ip;
 
                unw_get_reg(&c, UNW_REG_IP, &ip);
-               ret = entry(ip, ui->thread, ui->machine, cb, arg);
+               ret = ip ? entry(ip, ui->thread, ui->machine, cb, arg) : 0;
        }
 
        unw_destroy_addr_space(addr_space);
@@ -572,13 +556,11 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
 
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct machine *machine, struct thread *thread,
-                       u64 sample_uregs, struct perf_sample *data,
-                       int max_stack)
+                       struct perf_sample *data, int max_stack)
 {
-       unw_word_t ip;
+       u64 ip;
        struct unwind_info ui = {
                .sample       = data,
-               .sample_uregs = sample_uregs,
                .thread       = thread,
                .machine      = machine,
        };
@@ -587,7 +569,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
        if (!data->user_regs.regs)
                return -EINVAL;
 
-       ret = reg_value(&ip, &data->user_regs, PERF_REG_IP, sample_uregs);
+       ret = perf_reg_value(&ip, &data->user_regs, PERF_REG_IP);
        if (ret)
                return ret;
 
@@ -595,5 +577,5 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
        if (ret)
                return -ENOMEM;
 
-       return get_entries(&ui, cb, arg, max_stack);
+       return --max_stack > 0 ? get_entries(&ui, cb, arg, max_stack) : 0;
 }
index d5966f49e22cc24fb91e8f53307504cfed7e290d..b031316f221a7de76c1930d8289ba03300748672 100644 (file)
@@ -13,24 +13,25 @@ struct unwind_entry {
 
 typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
 
-#ifdef HAVE_LIBUNWIND_SUPPORT
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct machine *machine,
                        struct thread *thread,
-                       u64 sample_uregs,
                        struct perf_sample *data, int max_stack);
-int unwind__arch_reg_id(int regnum);
+/* libunwind specific */
+#ifdef HAVE_LIBUNWIND_SUPPORT
+int libunwind__arch_reg_id(int regnum);
+#endif
 #else
 static inline int
 unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
                    void *arg __maybe_unused,
                    struct machine *machine __maybe_unused,
                    struct thread *thread __maybe_unused,
-                   u64 sample_uregs __maybe_unused,
                    struct perf_sample *data __maybe_unused,
                    int max_stack __maybe_unused)
 {
        return 0;
 }
-#endif /* HAVE_LIBUNWIND_SUPPORT */
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
 #endif /* __UNWIND_H */
index 42ad667bb317570e6f22216598181d1e2a8c9798..9f66549562bd6959907323573edcb91badd97380 100644 (file)
@@ -1,6 +1,6 @@
 #include "../perf.h"
 #include "util.h"
-#include "fs.h"
+#include <api/fs/fs.h>
 #include <sys/mman.h>
 #ifdef HAVE_BACKTRACE_SUPPORT
 #include <execinfo.h>
index 587561d7c035a053f590fe8d33a591f8ee78c477..9b17e810ddc39c96f4b7f7e2fead951455bb9c8e 100644 (file)
@@ -96,6 +96,7 @@ identify_qemu () {
                echo qemu-system-ppc64
        else
                echo Cannot figure out what qemu command to use! 1>&2
+               echo file $1 output: $u
                # Usually this will be one of /usr/bin/qemu-system-*
                # Use RCU_QEMU_CMD environment variable or appropriate
                # argument to top-level script.
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh
new file mode 100755 (executable)
index 0000000..829186e
--- /dev/null
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Analyze a given results directory for locktorture progress.
+#
+# Usage: sh kvm-recheck-lock.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2014
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+if test -d $i
+then
+       :
+else
+       echo Unreadable results directory: $i
+       exit 1
+fi
+
+configfile=`echo $i | sed -e 's/^.*\///'`
+ncs=`grep "Writes:  Total:" $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* Total: //' -e 's/ .*$//'`
+if test -z "$ncs"
+then
+       echo $configfile
+else
+       title="$configfile ------- $ncs acquisitions/releases"
+       dur=`sed -e 's/^.* locktorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+       if test -z "$dur"
+       then
+               :
+       else
+               ncsps=`awk -v ncs=$ncs -v dur=$dur '
+                       BEGIN { print ncs / dur }' < /dev/null`
+               title="$title ($ncsps per second)"
+       fi
+       echo $title
+fi
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
new file mode 100755 (executable)
index 0000000..d75b1dc
--- /dev/null
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcutorture progress.
+#
+# Usage: sh kvm-recheck-rcu.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2014
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+if test -d $i
+then
+       :
+else
+       echo Unreadable results directory: $i
+       exit 1
+fi
+
+configfile=`echo $i | sed -e 's/^.*\///'`
+ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
+if test -z "$ngps"
+then
+       echo $configfile
+else
+       title="$configfile ------- $ngps grace periods"
+       dur=`sed -e 's/^.* rcutorture.shutdown_secs=//' -e 's/ .*$//' < $i/qemu-cmd 2> /dev/null`
+       if test -z "$dur"
+       then
+               :
+       else
+               ngpsps=`awk -v ngps=$ngps -v dur=$dur '
+                       BEGIN { print ngps / dur }' < /dev/null`
+               title="$title ($ngpsps per second)"
+       fi
+       echo $title
+fi
index baef09f3469b1dbb914ad0459076adbd027a1cd0..a44daaa259a912e91837e102e6d35c290a17d21d 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Given the results directories for previous KVM runs of rcutorture,
+# Given the results directories for previous KVM-based torture runs,
 # check the build and console output for errors.  Given a directory
 # containing results directories, this recursively checks them all.
 #
 PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
 for rd in "$@"
 do
+       firsttime=1
        dirs=`find $rd -name Make.defconfig.out -print | sort | sed -e 's,/[^/]*$,,' | sort -u`
        for i in $dirs
        do
-               configfile=`echo $i | sed -e 's/^.*\///'`
-               echo $configfile
+               if test -n "$firsttime"
+               then
+                       firsttime=""
+                       resdir=`echo $i | sed -e 's,/$,,' -e 's,/[^/]*$,,'`
+                       head -1 $resdir/log
+               fi
+               TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+               kvm-recheck-${TORTURE_SUITE}.sh $i
                configcheck.sh $i/.config $i/ConfigFragment
                parse-build.sh $i/Make.out $configfile
                parse-rcutorture.sh $i/console.log $configfile
similarity index 79%
rename from tools/testing/selftests/rcutorture/bin/kvm-test-1-rcu.sh
rename to tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
index 151b2378893584e028798e0a4aa82f115c8e67ce..94b28bb37d36f9d29ddc8edc104feeaac7d3be91 100755 (executable)
@@ -6,15 +6,15 @@
 # Execute this in the source tree.  Do not run it as a background task
 # because qemu does not seem to like that much.
 #
-# Usage: sh kvm-test-1-rcu.sh config builddir resdir minutes qemu-args bootargs
+# Usage: sh kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
 #
-# qemu-args defaults to "" -- you will want "-nographic" if running headless.
-# bootargs defaults to "root=/dev/sda noapic selinux=0 console=ttyS0"
-#                      "initcall_debug debug rcutorture.stat_interval=15"
-#                      "rcutorture.shutdown_secs=$((minutes * 60))"
-#                      "rcutorture.rcutorture_runnable=1"
+# qemu-args defaults to "-nographic", along with arguments specifying the
+#                      number of CPUs and other options generated from
+#                      the underlying CPU architecture.
+# boot_args defaults to value returned by the per_version_boot_params
+#                      shell function.
 #
-# Anything you specify for either qemu-args or bootargs is appended to
+# Anything you specify for either qemu-args or boot_args is appended to
 # the default values.  The "-smp" value is deduced from the contents of
 # the config fragment.
 #
 
 grace=120
 
-T=/tmp/kvm-test-1-rcu.sh.$$
+T=/tmp/kvm-test-1-run.sh.$$
 trap 'rm -rf $T' 0
 
 . $KVM/bin/functions.sh
 . $KVPATH/ver_functions.sh
 
 config_template=${1}
+config_dir=`echo $config_template | sed -e 's,/[^/]*$,,'`
 title=`echo $config_template | sed -e 's/^.*\///'`
 builddir=${2}
 if test -z "$builddir" -o ! -d "$builddir" -o ! -w "$builddir"
 then
-       echo "kvm-test-1-rcu.sh :$builddir: Not a writable directory, cannot build into it"
+       echo "kvm-test-1-run.sh :$builddir: Not a writable directory, cannot build into it"
        exit 1
 fi
 resdir=${3}
 if test -z "$resdir" -o ! -d "$resdir" -o ! -w "$resdir"
 then
-       echo "kvm-test-1-rcu.sh :$resdir: Not a writable directory, cannot build into it"
+       echo "kvm-test-1-run.sh :$resdir: Not a writable directory, cannot store results into it"
        exit 1
 fi
 cp $config_template $resdir/ConfigFragment
 echo ' ---' `date`: Starting build
 echo ' ---' Kconfig fragment at: $config_template >> $resdir/log
-cat << '___EOF___' >> $T
-CONFIG_RCU_TORTURE_TEST=y
-___EOF___
+if test -r "$config_dir/CFcommon"
+then
+       cat < $config_dir/CFcommon >> $T
+fi
 # Optimizations below this point
 # CONFIG_USB=n
 # CONFIG_SECURITY=n
@@ -96,11 +98,23 @@ then
        cp $builddir/.config $resdir
        cp $builddir/arch/x86/boot/bzImage $resdir
        parse-build.sh $resdir/Make.out $title
+       if test -f $builddir.wait
+       then
+               mv $builddir.wait $builddir.ready
+       fi
 else
        cp $builddir/Make*.out $resdir
        echo Build failed, not running KVM, see $resdir.
+       if test -f $builddir.wait
+       then
+               mv $builddir.wait $builddir.ready
+       fi
        exit 1
 fi
+while test -f $builddir.ready
+do
+       sleep 1
+done
 minutes=$4
 seconds=$(($minutes * 60))
 qemu_args=$5
@@ -111,9 +125,10 @@ kstarttime=`awk 'BEGIN { print systime() }' < /dev/null`
 echo ' ---' `date`: Starting kernel
 
 # Determine the appropriate flavor of qemu command.
-QEMU="`identify_qemu $builddir/vmlinux.o`"
+QEMU="`identify_qemu $builddir/vmlinux`"
 
 # Generate -smp qemu argument.
+qemu_args="-nographic $qemu_args"
 cpu_count=`configNR_CPUS.sh $config_template`
 vcpus=`identify_qemu_vcpus`
 if test $cpu_count -gt $vcpus
@@ -133,12 +148,8 @@ qemu_append="`identify_qemu_append "$QEMU"`"
 
 # Pull in Kconfig-fragment boot parameters
 boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
-# Generate CPU-hotplug boot parameters
-boot_args="`rcutorture_param_onoff "$boot_args" $builddir/.config`"
-# Generate rcu_barrier() boot parameter
-boot_args="`rcutorture_param_n_barrier_cbs "$boot_args"`"
-# Pull in standard rcutorture boot arguments
-boot_args="$boot_args rcutorture.stat_interval=15 rcutorture.shutdown_secs=$seconds rcutorture.rcutorture_runnable=1"
+# Generate kernel-version-specific boot parameters
+boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`"
 
 echo $QEMU $qemu_args -m 512 -kernel $builddir/arch/x86/boot/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 if test -n "$RCU_BUILDONLY"
@@ -188,5 +199,5 @@ then
 fi
 
 cp $builddir/console.log $resdir
-parse-rcutorture.sh $resdir/console.log $title
+parse-${TORTURE_SUITE}torture.sh $resdir/console.log $title
 parse-console.sh $resdir/console.log $title
index 1b7923bf6a702a921040c86970cbe8d9dc47a2a6..5a78cbf55f066dd3a657e9bfd210d4129e002f83 100644 (file)
 scriptname=$0
 args="$*"
 
+T=/tmp/kvm.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
+
 dur=30
+dryrun=""
 KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
 PATH=${KVM}/bin:$PATH; export PATH
 builddir="${KVM}/b1"
 RCU_INITRD="$KVM/initrd"; export RCU_INITRD
 RCU_KMAKE_ARG=""; export RCU_KMAKE_ARG
+TORTURE_SUITE=rcu
 resdir=""
 configs=""
+cpus=0
 ds=`date +%Y.%m.%d-%H:%M:%S`
 kversion=""
 
@@ -49,7 +56,9 @@ usage () {
        echo "       --builddir absolute-pathname"
        echo "       --buildonly"
        echo "       --configs \"config-file list\""
+       echo "       --cpus N"
        echo "       --datestamp string"
+       echo "       --dryrun sched|script"
        echo "       --duration minutes"
        echo "       --interactive"
        echo "       --kmake-arg kernel-make-arguments"
@@ -58,8 +67,9 @@ usage () {
        echo "       --no-initrd"
        echo "       --qemu-args qemu-system-..."
        echo "       --qemu-cmd qemu-system-..."
-       echo "       --results absolute-pathname"
        echo "       --relbuilddir relative-pathname"
+       echo "       --results absolute-pathname"
+       echo "       --torture rcu"
        exit 1
 }
 
@@ -85,11 +95,21 @@ do
                configs="$2"
                shift
                ;;
+       --cpus)
+               checkarg --cpus "(number)" "$#" "$2" '^[0-9]*$' '^--'
+               cpus=$2
+               shift
+               ;;
        --datestamp)
                checkarg --datestamp "(relative pathname)" "$#" "$2" '^[^/]*$' '^--'
                ds=$2
                shift
                ;;
+       --dryrun)
+               checkarg --dryrun "sched|script" $# "$2" 'sched\|script' '^--'
+               dryrun=$2
+               shift
+               ;;
        --duration)
                checkarg --duration "(minutes)" $# "$2" '^[0-9]*$' '^error'
                dur=$2
@@ -138,6 +158,11 @@ do
                resdir=$2
                shift
                ;;
+       --torture)
+               checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
+               TORTURE_SUITE=$2
+               shift
+               ;;
        *)
                echo Unknown argument $1
                usage
@@ -146,7 +171,7 @@ do
        shift
 done
 
-CONFIGFRAG=${KVM}/configs; export CONFIGFRAG
+CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
 KVPATH=${CONFIGFRAG}/$kversion; export KVPATH
 
 if test -z "$configs"
@@ -157,54 +182,231 @@ fi
 if test -z "$resdir"
 then
        resdir=$KVM/res
-       if ! test -e $resdir
-       then
-               mkdir $resdir || :
-       fi
-else
+fi
+
+if test "$dryrun" = ""
+then
        if ! test -e $resdir
        then
                mkdir -p "$resdir" || :
        fi
-fi
-mkdir $resdir/$ds
-touch $resdir/$ds/log
-echo $scriptname $args >> $resdir/$ds/log
+       mkdir $resdir/$ds
 
-pwd > $resdir/$ds/testid.txt
-if test -d .git
-then
-       git status >> $resdir/$ds/testid.txt
-       git rev-parse HEAD >> $resdir/$ds/testid.txt
-fi
-builddir=$KVM/b1
-if ! test -e $builddir
-then
-       mkdir $builddir || :
+       # Be noisy only if running the script.
+       echo Results directory: $resdir/$ds
+       echo $scriptname $args
+
+       touch $resdir/$ds/log
+       echo $scriptname $args >> $resdir/$ds/log
+       echo ${TORTURE_SUITE} > $resdir/$ds/TORTURE_SUITE
+
+       pwd > $resdir/$ds/testid.txt
+       if test -d .git
+       then
+               git status >> $resdir/$ds/testid.txt
+               git rev-parse HEAD >> $resdir/$ds/testid.txt
+       fi
 fi
 
+# Create a file of test-name/#cpus pairs, sorted by decreasing #cpus.
+touch $T/cfgcpu
 for CF in $configs
 do
-       # Running TREE01 multiple times creates TREE01, TREE01.2, TREE01.3, ...
-       rd=$resdir/$ds/$CF
-       if test -d "${rd}"
+       if test -f "$CONFIGFRAG/$kversion/$CF"
        then
-               n="`ls -d "${rd}"* | grep '\.[0-9]\+$' |
-                       sed -e 's/^.*\.\([0-9]\+\)/\1/' |
-                       sort -k1n | tail -1`"
-               if test -z "$n"
-               then
-                       rd="${rd}.2"
-               else
-                       n="`expr $n + 1`"
-                       rd="${rd}.${n}"
-               fi
+               echo $CF `configNR_CPUS.sh $CONFIGFRAG/$kversion/$CF` >> $T/cfgcpu
+       else
+               echo "The --configs file $CF does not exist, terminating."
+               exit 1
        fi
-       mkdir "${rd}"
-       echo Results directory: $rd
-       kvm-test-1-rcu.sh $CONFIGFRAG/$kversion/$CF $builddir $rd $dur "-nographic $RCU_QEMU_ARG" "rcutorture.test_no_idle_hz=1 rcutorture.verbose=1 $RCU_BOOTARGS"
 done
+sort -k2nr $T/cfgcpu > $T/cfgcpu.sort
+
+# Use a greedy bin-packing algorithm, sorting the list accordingly.
+awk < $T/cfgcpu.sort > $T/cfgcpu.pack -v ncpus=$cpus '
+BEGIN {
+       njobs = 0;
+}
+
+{
+       # Read file of tests and corresponding required numbers of CPUs.
+       cf[njobs] = $1;
+       cpus[njobs] = $2;
+       njobs++;
+}
+
+END {
+       alldone = 0;
+       batch = 0;
+       nc = -1;
+
+       # Each pass through the following loop creates on test batch
+       # that can be executed concurrently given ncpus.  Note that a
+       # given test that requires more than the available CPUs will run in
+       # their own batch.  Such tests just have to make do with what
+       # is available.
+       while (nc != ncpus) {
+               batch++;
+               nc = ncpus;
+
+               # Each pass through the following loop considers one
+               # test for inclusion in the current batch.
+               for (i = 0; i < njobs; i++) {
+                       if (done[i])
+                               continue; # Already part of a batch.
+                       if (nc >= cpus[i] || nc == ncpus) {
+
+                               # This test fits into the current batch.
+                               done[i] = batch;
+                               nc -= cpus[i];
+                               if (nc <= 0)
+                                       break; # Too-big test in its own batch.
+                       }
+               }
+       }
+
+       # Dump out the tests in batch order.
+       for (b = 1; b <= batch; b++)
+               for (i = 0; i < njobs; i++)
+                       if (done[i] == b)
+                               print cf[i], cpus[i];
+}'
+
+# Generate a script to execute the tests in appropriate batches.
+cat << ___EOF___ > $T/script
+TORTURE_SUITE="$TORTURE_SUITE"; export TORTURE_SUITE
+___EOF___
+awk < $T/cfgcpu.pack \
+       -v CONFIGDIR="$CONFIGFRAG/$kversion/" \
+       -v KVM="$KVM" \
+       -v ncpus=$cpus \
+       -v rd=$resdir/$ds/ \
+       -v dur=$dur \
+       -v RCU_QEMU_ARG=$RCU_QEMU_ARG \
+       -v RCU_BOOTARGS=$RCU_BOOTARGS \
+'BEGIN {
+       i = 0;
+}
+
+{
+       cf[i] = $1;
+       cpus[i] = $2;
+       i++;
+}
+
+# Dump out the scripting required to run one test batch.
+function dump(first, pastlast)
+{
+       print "echo ----Start batch: `date`";
+       print "echo ----Start batch: `date` >> " rd "/log";
+       jn=1
+       for (j = first; j < pastlast; j++) {
+               builddir=KVM "/b" jn
+               cpusr[jn] = cpus[j];
+               if (cfrep[cf[j]] == "") {
+                       cfr[jn] = cf[j];
+                       cfrep[cf[j]] = 1;
+               } else {
+                       cfrep[cf[j]]++;
+                       cfr[jn] = cf[j] "." cfrep[cf[j]];
+               }
+               if (cpusr[jn] > ncpus && ncpus != 0)
+                       ovf = "(!)";
+               else
+                       ovf = "";
+               print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date`";
+               print "echo ", cfr[jn], cpusr[jn] ovf ": Starting build. `date` >> " rd "/log";
+               print "rm -f " builddir ".*";
+               print "touch " builddir ".wait";
+               print "mkdir " builddir " > /dev/null 2>&1 || :";
+               print "mkdir " rd cfr[jn] " || :";
+               print "kvm-test-1-run.sh " CONFIGDIR cf[j], builddir, rd cfr[jn], dur " \"" RCU_QEMU_ARG "\" \"" RCU_BOOTARGS "\" > " rd cfr[jn]  "/kvm-test-1-run.sh.out 2>&1 &"
+               print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date`";
+               print "echo ", cfr[jn], cpusr[jn] ovf ": Waiting for build to complete. `date` >> " rd "/log";
+               print "while test -f " builddir ".wait"
+               print "do"
+               print "\tsleep 1"
+               print "done"
+               print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date`";
+               print "echo ", cfr[jn], cpusr[jn] ovf ": Build complete. `date` >> " rd "/log";
+               jn++;
+       }
+       for (j = 1; j < jn; j++) {
+               builddir=KVM "/b" j
+               print "rm -f " builddir ".ready"
+               print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
+               print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
+       }
+       print "wait"
+       print "echo ---- All kernel runs complete. `date`";
+       print "echo ---- All kernel runs complete. `date` >> " rd "/log";
+       for (j = 1; j < jn; j++) {
+               builddir=KVM "/b" j
+               print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
+               print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results: >> " rd "/log";
+               print "cat " rd cfr[j]  "/kvm-test-1-run.sh.out";
+               print "cat " rd cfr[j]  "/kvm-test-1-run.sh.out >> " rd "/log";
+       }
+}
+
+END {
+       njobs = i;
+       nc = ncpus;
+       first = 0;
+
+       # Each pass through the following loop considers one test.
+       for (i = 0; i < njobs; i++) {
+               if (ncpus == 0) {
+                       # Sequential test specified, each test its own batch.
+                       dump(i, i + 1);
+                       first = i;
+               } else if (nc < cpus[i] && i != 0) {
+                       # Out of CPUs, dump out a batch.
+                       dump(first, i);
+                       first = i;
+                       nc = ncpus;
+               }
+               # Account for the CPUs needed by the current test.
+               nc -= cpus[i];
+       }
+       # Dump the last batch.
+       if (ncpus != 0)
+               dump(first, i);
+}' >> $T/script
+
+if test "$dryrun" = script
+then
+       # Dump out the script, but define the environment variables that
+       # it needs to run standalone.
+       echo CONFIGFRAG="$CONFIGFRAG; export CONFIGFRAG"
+       echo KVM="$KVM; export KVM"
+       echo KVPATH="$KVPATH; export KVPATH"
+       echo PATH="$PATH; export PATH"
+       echo RCU_BUILDONLY="$RCU_BUILDONLY; export RCU_BUILDONLY"
+       echo RCU_INITRD="$RCU_INITRD; export RCU_INITRD"
+       echo RCU_KMAKE_ARG="$RCU_KMAKE_ARG; export RCU_KMAKE_ARG"
+       echo RCU_QEMU_CMD="$RCU_QEMU_CMD; export RCU_QEMU_CMD"
+       echo RCU_QEMU_INTERACTIVE="$RCU_QEMU_INTERACTIVE; export RCU_QEMU_INTERACTIVE"
+       echo RCU_QEMU_MAC="$RCU_QEMU_MAC; export RCU_QEMU_MAC"
+       echo "mkdir -p "$resdir" || :"
+       echo "mkdir $resdir/$ds"
+       cat $T/script
+       exit 0
+elif test "$dryrun" = sched
+then
+       # Extract the test run schedule from the script.
+       egrep 'start batch|Starting build\.' $T/script |
+               sed -e 's/:.*$//' -e 's/^echo //'
+       exit 0
+else
+       # Not a dryru, so run the script.
+       sh $T/script
+fi
+
 # Tracing: trace_event=rcu:rcu_grace_period,rcu:rcu_future_grace_period,rcu:rcu_grace_period_init,rcu:rcu_nocb_wake,rcu:rcu_preempt_task,rcu:rcu_unlock_preempted_task,rcu:rcu_quiescent_state_report,rcu:rcu_fqs,rcu:rcu_callback,rcu:rcu_kfree_callback,rcu:rcu_batch_start,rcu:rcu_invoke_callback,rcu:rcu_invoke_kfree_callback,rcu:rcu_batch_end,rcu:rcu_torture_read,rcu:rcu_barrier
 
+echo
+echo
 echo " --- `date` Test summary:"
+echo Results directory: $resdir/$ds
 kvm-recheck.sh $resdir/$ds
diff --git a/tools/testing/selftests/rcutorture/configs/lock/BUSTED b/tools/testing/selftests/rcutorture/configs/lock/BUSTED
new file mode 100644 (file)
index 0000000..1d1da14
--- /dev/null
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot b/tools/testing/selftests/rcutorture/configs/lock/BUSTED.boot
new file mode 100644 (file)
index 0000000..6386c15
--- /dev/null
@@ -0,0 +1 @@
+locktorture.torture_type=lock_busted
diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFLIST b/tools/testing/selftests/rcutorture/configs/lock/CFLIST
new file mode 100644 (file)
index 0000000..a061b22
--- /dev/null
@@ -0,0 +1 @@
+LOCK01
diff --git a/tools/testing/selftests/rcutorture/configs/lock/CFcommon b/tools/testing/selftests/rcutorture/configs/lock/CFcommon
new file mode 100644 (file)
index 0000000..e372dc2
--- /dev/null
@@ -0,0 +1,2 @@
+CONFIG_LOCK_TORTURE_TEST=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/LOCK01 b/tools/testing/selftests/rcutorture/configs/lock/LOCK01
new file mode 100644 (file)
index 0000000..a9625e3
--- /dev/null
@@ -0,0 +1,6 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/lock/ver_functions.sh
new file mode 100644 (file)
index 0000000..9746ea1
--- /dev/null
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Kernel-version-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2014
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# locktorture_param_onoff bootparam-string config-file
+#
+# Adds onoff locktorture module parameters to kernels having it.
+locktorture_param_onoff () {
+       if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
+       then
+               echo CPU-hotplug kernel, adding locktorture onoff. 1>&2
+               echo locktorture.onoff_interval=3 locktorture.onoff_holdoff=30
+       fi
+}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+       echo $1 `locktorture_param_onoff "$1" "$2"` \
+               locktorture.stat_interval=15 \
+               locktorture.shutdown_secs=$3 \
+               locktorture.locktorture_runnable=1 \
+               locktorture.verbose=1
+}
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED
new file mode 100644 (file)
index 0000000..48d8a24
--- /dev/null
@@ -0,0 +1,7 @@
+CONFIG_RCU_TRACE=n
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_HOTPLUG_CPU=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot b/tools/testing/selftests/rcutorture/configs/rcu/BUSTED.boot
new file mode 100644 (file)
index 0000000..6804f9d
--- /dev/null
@@ -0,0 +1 @@
+rcutorture.torture_type=rcu_busted
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/CFcommon b/tools/testing/selftests/rcutorture/configs/rcu/CFcommon
new file mode 100644 (file)
index 0000000..d2d2a86
--- /dev/null
@@ -0,0 +1,2 @@
+CONFIG_RCU_TORTURE_TEST=y
+CONFIG_PRINTK_TIME=y
similarity index 75%
rename from tools/testing/selftests/rcutorture/configs/SRCU-N
rename to tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
index 10a0e27f4c75b095834f173a25fc0967a0e1b971..9fbb41b9b3149ed62cb0e97aabe6731d7bdc3c6c 100644 (file)
@@ -1,8 +1,7 @@
 CONFIG_RCU_TRACE=n
 CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=4
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=y
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=n
-CONFIG_PRINTK_TIME=y
similarity index 86%
rename from tools/testing/selftests/rcutorture/configs/SRCU-P
rename to tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
index 6650e00c6d91b2c4cc11bc91f127ecb8d300684f..4b6f272dba27f8483f45c99a4a28f419e1b9d69a 100644 (file)
@@ -5,4 +5,3 @@ CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
-CONFIG_PRINTK_TIME=y
similarity index 92%
rename from tools/testing/selftests/rcutorture/configs/TINY01
rename to tools/testing/selftests/rcutorture/configs/rcu/TINY01
index 0c2823f2171246722a34260fb09ceea7dcb7401f..0a63e073a00c7a3765bc3732b453bf8c0a19bf09 100644 (file)
@@ -10,4 +10,3 @@ CONFIG_RCU_TRACE=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_PREEMPT_COUNT=n
-CONFIG_PRINTK_TIME=y
similarity index 92%
rename from tools/testing/selftests/rcutorture/configs/TINY02
rename to tools/testing/selftests/rcutorture/configs/rcu/TINY02
index e5072d7528b6726a2ae831707aebdc4cb5db7ac9..f4feaee4077662e588772d62801fc93c79043b5e 100644 (file)
@@ -10,4 +10,3 @@ CONFIG_RCU_TRACE=y
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_PREEMPT_COUNT=y
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE01
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 141119a00044fac5ae32b97e331e304970ee7e42..9c827ec59a97dd43a1267e0e6e058e88e971ed6e 100644 (file)
@@ -20,4 +20,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 92%
rename from tools/testing/selftests/rcutorture/configs/TREE02
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE02
index 2d4d0960852806bae43dc79fc642baf58d227034..1a777b5f68b57be4724aa3c7f9a73d31753e67e4 100644 (file)
@@ -7,7 +7,7 @@ CONFIG_PREEMPT=y
 CONFIG_HZ_PERIODIC=n
 CONFIG_NO_HZ_IDLE=y
 CONFIG_NO_HZ_FULL=n
-CONFIG_RCU_FAST_NO_HZ=n 
+CONFIG_RCU_FAST_NO_HZ=n
 CONFIG_RCU_TRACE=n
 CONFIG_HOTPLUG_CPU=n
 CONFIG_SUSPEND=n
@@ -23,4 +23,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE03
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE03
index a47de5be8a0465ad7ad4918af317e1135d66605e..c1f111c1561b67b96c784330d333a828292106d5 100644 (file)
@@ -20,4 +20,3 @@ CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=y
 CONFIG_RCU_BOOST_PRIO=2
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE04
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 8d839b86a1d5db66000e302d1cd46d21e6257d4a..7dbd27ce17a4f82a2833804a8b9aa4395e59bc89 100644 (file)
@@ -22,4 +22,3 @@ CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=y
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE05
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE05
index b5ba72ea25cbe84814f1bf30424549e748371d1c..d0f32e57474315f7fc7141ea40763a403a9d364f 100644 (file)
@@ -22,4 +22,3 @@ CONFIG_PROVE_RCU_DELAY=y
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE06
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE06
index 7c95ab48d29f90e339fc84de2e8bc75c2ee122bd..2e477dfb9c57a66445552dc2375be2d1b3c1c03f 100644 (file)
@@ -23,4 +23,3 @@ CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE07
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE07
index 1467404bdec17b097b1183bfcc2505da157aa71e..042f86ef362a4337e04c0392e621a1ee103fd15c 100644 (file)
@@ -21,4 +21,3 @@ CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=y
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE08
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE08
index 7d097a61ac2a621f67e4fffd6de5f9b9e1d285ab..3438cee1e3c5c544d5de5db4e6572a6e540ee0a6 100644 (file)
@@ -23,4 +23,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 96%
rename from tools/testing/selftests/rcutorture/configs/TREE08-T
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
index 442c4e450ab38049584986bf2476bdb3f32392f1..bf4523d3e44c41e13170a3bceb0e1da2816774d7 100644 (file)
@@ -23,4 +23,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 95%
rename from tools/testing/selftests/rcutorture/configs/TREE09
rename to tools/testing/selftests/rcutorture/configs/rcu/TREE09
index 0d1ec0d3dfeee47a045e540228aefc4919db8d7c..81e4f7c0bf0bd6ba21c4a733dfed897320d11500 100644 (file)
@@ -18,4 +18,3 @@ CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
-CONFIG_PRINTK_TIME=y
similarity index 70%
rename from tools/testing/selftests/rcutorture/configs/v0.0/ver_functions.sh
rename to tools/testing/selftests/rcutorture/configs/rcu/v0.0/ver_functions.sh
index e8052539af54c9048584f490b5baf8ddf1dda180..5ace37a89780416fd2b8f27fc7d76dd5316e79ff 100644 (file)
 #
 # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 
-# rcutorture_param_n_barrier_cbs bootparam-string
-#
-# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
-rcutorture_param_n_barrier_cbs () {
-       echo $1
-}
-
-# rcutorture_param_onoff bootparam-string config-file
-#
-# Adds onoff rcutorture module parameters to kernels having it.
-rcutorture_param_onoff () {
-       echo $1
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+# Which old kernels do not.
+per_version_boot_params () {
+       echo    rcutorture.stat_interval=15 \
+               rcutorture.shutdown_secs=$3 \
+               rcutorture.rcutorture_runnable=1 \
+               rcutorture.test_no_idle_hz=1 \
+               rcutorture.verbose=1
 }
similarity index 72%
rename from tools/testing/selftests/rcutorture/configs/ver_functions.sh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.3/ver_functions.sh
index 5e40eadea77734d4497f711ca1f5d72aa03aa7bb..bae55692ce6e36138dfddd9d5b13dddeac956508 100644 (file)
 #
 # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 
-# rcutorture_param_n_barrier_cbs bootparam-string
-#
-# Adds n_barrier_cbs rcutorture module parameter to kernels having it.
-rcutorture_param_n_barrier_cbs () {
-       if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
-       then
-               echo $1
-       else
-               echo $1 rcutorture.n_barrier_cbs=4
-       fi
-}
-
 # rcutorture_param_onoff bootparam-string config-file
 #
 # Adds onoff rcutorture module parameters to kernels having it.
@@ -39,8 +27,18 @@ rcutorture_param_onoff () {
        if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
        then
                echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
-               echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
-       else
-               echo $1
+               echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
        fi
 }
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+       echo $1 `rcutorture_param_onoff "$1" "$2"` \
+               rcutorture.stat_interval=15 \
+               rcutorture.shutdown_secs=$3 \
+               rcutorture.rcutorture_runnable=1 \
+               rcutorture.test_no_idle_hz=1 \
+               rcutorture.verbose=1
+}
similarity index 69%
rename from tools/testing/selftests/rcutorture/configs/v3.5/ver_functions.sh
rename to tools/testing/selftests/rcutorture/configs/rcu/v3.5/ver_functions.sh
index 6a5f13aab44d7d5863a04b601c7bb021072ea66b..8977d8d31b19f5215b9a9922f2b9c1c4b1e63fcd 100644 (file)
@@ -26,9 +26,9 @@
 rcutorture_param_n_barrier_cbs () {
        if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
        then
-               echo $1
+               :
        else
-               echo $1 rcutorture.n_barrier_cbs=4
+               echo rcutorture.n_barrier_cbs=4
        fi
 }
 
@@ -38,9 +38,20 @@ rcutorture_param_n_barrier_cbs () {
 rcutorture_param_onoff () {
        if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
        then
-               echo CPU-hotplug kernel, adding rcutorture onoff.
-               echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
-       else
-               echo $1
+               echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
+               echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
        fi
 }
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+       echo $1 `rcutorture_param_onoff "$1" "$2"` \
+               `rcutorture_param_n_barrier_cbs "$1"` \
+               rcutorture.stat_interval=15 \
+               rcutorture.shutdown_secs=$3 \
+               rcutorture.rcutorture_runnable=1 \
+               rcutorture.test_no_idle_hz=1 \
+               rcutorture.verbose=1
+}
similarity index 66%
rename from tools/testing/selftests/rcutorture/configs/v3.3/ver_functions.sh
rename to tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh
index c37432f3572c42a270a1d49b134644157ef5ce4b..8977d8d31b19f5215b9a9922f2b9c1c4b1e63fcd 100644 (file)
 #
 # Adds n_barrier_cbs rcutorture module parameter to kernels having it.
 rcutorture_param_n_barrier_cbs () {
-       echo $1
+       if echo $1 | grep -q "rcutorture\.n_barrier_cbs"
+       then
+               :
+       else
+               echo rcutorture.n_barrier_cbs=4
+       fi
 }
 
 # rcutorture_param_onoff bootparam-string config-file
@@ -33,9 +38,20 @@ rcutorture_param_n_barrier_cbs () {
 rcutorture_param_onoff () {
        if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
        then
-               echo CPU-hotplug kernel, adding rcutorture onoff.
-               echo $1 rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
-       else
-               echo $1
+               echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
+               echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
        fi
 }
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+       echo $1 `rcutorture_param_onoff "$1" "$2"` \
+               `rcutorture_param_n_barrier_cbs "$1"` \
+               rcutorture.stat_interval=15 \
+               rcutorture.shutdown_secs=$3 \
+               rcutorture.rcutorture_runnable=1 \
+               rcutorture.test_no_idle_hz=1 \
+               rcutorture.verbose=1
+}
index 03a0381b1cb79a2b09a565d43883bb89c5855950..b5ec7fb986f6a560a258c99b8af3977aff936c2d 100644 (file)
@@ -102,7 +102,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn);
 static void mark_page_dirty_in_slot(struct kvm *kvm,
                                    struct kvm_memory_slot *memslot, gfn_t gfn);
 
-bool kvm_rebooting;
+__visible bool kvm_rebooting;
 EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;